Commit d45e5ac5eb

Andrew Kelley <andrew@ziglang.org>
2024-12-07 23:14:48
wasm codegen: rename func: CodeGen to cg: CodeGen
1 parent 4a1447d
Changed files (1)
src
arch
src/arch/wasm/CodeGen.zig
@@ -700,22 +700,20 @@ const InnerError = error{
     Overflow,
 } || link.File.UpdateDebugInfoError;
 
-pub fn deinit(func: *CodeGen) void {
-    // in case of an error and we still have branches
-    for (func.branches.items) |*branch| {
-        branch.deinit(func.gpa);
-    }
-    func.branches.deinit(func.gpa);
-    func.blocks.deinit(func.gpa);
-    func.loops.deinit(func.gpa);
-    func.locals.deinit(func.gpa);
-    func.simd_immediates.deinit(func.gpa);
-    func.free_locals_i32.deinit(func.gpa);
-    func.free_locals_i64.deinit(func.gpa);
-    func.free_locals_f32.deinit(func.gpa);
-    func.free_locals_f64.deinit(func.gpa);
-    func.free_locals_v128.deinit(func.gpa);
-    func.* = undefined;
+pub fn deinit(cg: *CodeGen) void {
+    const gpa = cg.gpa;
+    for (cg.branches.items) |*branch| branch.deinit(gpa);
+    cg.branches.deinit(gpa);
+    cg.blocks.deinit(gpa);
+    cg.loops.deinit(gpa);
+    cg.locals.deinit(gpa);
+    cg.simd_immediates.deinit(gpa);
+    cg.free_locals_i32.deinit(gpa);
+    cg.free_locals_i64.deinit(gpa);
+    cg.free_locals_f32.deinit(gpa);
+    cg.free_locals_f64.deinit(gpa);
+    cg.free_locals_v128.deinit(gpa);
+    cg.* = undefined;
 }
 
 fn fail(cg: *CodeGen, comptime fmt: []const u8, args: anytype) error{ OutOfMemory, CodegenFail } {
@@ -726,10 +724,10 @@ fn fail(cg: *CodeGen, comptime fmt: []const u8, args: anytype) error{ OutOfMemor
 
 /// Resolves the `WValue` for the given instruction `inst`
 /// When the given instruction has a `Value`, it returns a constant instead
-fn resolveInst(func: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue {
-    var branch_index = func.branches.items.len;
+fn resolveInst(cg: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue {
+    var branch_index = cg.branches.items.len;
     while (branch_index > 0) : (branch_index -= 1) {
-        const branch = func.branches.items[branch_index - 1];
+        const branch = cg.branches.items[branch_index - 1];
         if (branch.values.get(ref)) |value| {
             return value;
         }
@@ -739,13 +737,13 @@ fn resolveInst(func: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue {
     // means we must generate it from a constant.
     // We always store constants in the most outer branch as they must never
     // be removed. The most outer branch is always at index 0.
-    const gop = try func.branches.items[0].values.getOrPut(func.gpa, ref);
+    const gop = try cg.branches.items[0].values.getOrPut(cg.gpa, ref);
     assert(!gop.found_existing);
 
-    const pt = func.pt;
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const val = (try func.air.value(ref, pt)).?;
-    const ty = func.typeOf(ref);
+    const val = (try cg.air.value(ref, pt)).?;
+    const ty = cg.typeOf(ref);
     if (!ty.hasRuntimeBitsIgnoreComptime(zcu) and !ty.isInt(zcu) and !ty.isError(zcu)) {
         gop.value_ptr.* = .none;
         return .none;
@@ -757,24 +755,24 @@ fn resolveInst(func: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue {
     //
     // In the other cases, we will simply lower the constant to a value that fits
     // into a single local (such as a pointer, integer, bool, etc).
-    const result: WValue = if (isByRef(ty, pt, func.target))
+    const result: WValue = if (isByRef(ty, pt, cg.target))
         .{ .uav_ref = .{ .ip_index = val.toIntern() } }
     else
-        try func.lowerConstant(val, ty);
+        try cg.lowerConstant(val, ty);
 
     gop.value_ptr.* = result;
     return result;
 }
 
 /// NOTE: if result == .stack, it will be stored in .local
-fn finishAir(func: *CodeGen, inst: Air.Inst.Index, result: WValue, operands: []const Air.Inst.Ref) InnerError!void {
+fn finishAir(cg: *CodeGen, inst: Air.Inst.Index, result: WValue, operands: []const Air.Inst.Ref) InnerError!void {
     assert(operands.len <= Liveness.bpi - 1);
-    var tomb_bits = func.liveness.getTombBits(inst);
+    var tomb_bits = cg.liveness.getTombBits(inst);
     for (operands) |operand| {
         const dies = @as(u1, @truncate(tomb_bits)) != 0;
         tomb_bits >>= 1;
         if (!dies) continue;
-        processDeath(func, operand);
+        processDeath(cg, operand);
     }
 
     // results of `none` can never be referenced.
@@ -782,13 +780,13 @@ fn finishAir(func: *CodeGen, inst: Air.Inst.Index, result: WValue, operands: []c
         const trackable_result = if (result != .stack)
             result
         else
-            try result.toLocal(func, func.typeOfIndex(inst));
-        const branch = func.currentBranch();
+            try result.toLocal(cg, cg.typeOfIndex(inst));
+        const branch = cg.currentBranch();
         branch.values.putAssumeCapacityNoClobber(inst.toRef(), trackable_result);
     }
 
     if (std.debug.runtime_safety) {
-        func.air_bookkeeping += 1;
+        cg.air_bookkeeping += 1;
     }
 }
 
@@ -801,8 +799,8 @@ const Branch = struct {
     }
 };
 
-inline fn currentBranch(func: *CodeGen) *Branch {
-    return &func.branches.items[func.branches.items.len - 1];
+inline fn currentBranch(cg: *CodeGen) *Branch {
+    return &cg.branches.items[cg.branches.items.len - 1];
 }
 
 const BigTomb = struct {
@@ -829,126 +827,126 @@ const BigTomb = struct {
     }
 };
 
-fn iterateBigTomb(func: *CodeGen, inst: Air.Inst.Index, operand_count: usize) !BigTomb {
-    try func.currentBranch().values.ensureUnusedCapacity(func.gpa, operand_count + 1);
+fn iterateBigTomb(cg: *CodeGen, inst: Air.Inst.Index, operand_count: usize) !BigTomb {
+    try cg.currentBranch().values.ensureUnusedCapacity(cg.gpa, operand_count + 1);
     return BigTomb{
-        .gen = func,
+        .gen = cg,
         .inst = inst,
-        .lbt = func.liveness.iterateBigTomb(inst),
+        .lbt = cg.liveness.iterateBigTomb(inst),
     };
 }
 
-fn processDeath(func: *CodeGen, ref: Air.Inst.Ref) void {
+fn processDeath(cg: *CodeGen, ref: Air.Inst.Ref) void {
     if (ref.toIndex() == null) return;
     // Branches are currently only allowed to free locals allocated
     // within their own branch.
     // TODO: Upon branch consolidation free any locals if needed.
-    const value = func.currentBranch().values.getPtr(ref) orelse return;
+    const value = cg.currentBranch().values.getPtr(ref) orelse return;
     if (value.* != .local) return;
-    const reserved_indexes = func.args.len + @intFromBool(func.return_value != .none);
+    const reserved_indexes = cg.args.len + @intFromBool(cg.return_value != .none);
     if (value.local.value < reserved_indexes) {
         return; // function arguments can never be re-used
     }
     log.debug("Decreasing reference for ref: %{d}, using local '{d}'", .{ @intFromEnum(ref.toIndex().?), value.local.value });
     value.local.references -= 1; // if this panics, a call to `reuseOperand` was forgotten by the developer
     if (value.local.references == 0) {
-        value.free(func);
+        value.free(cg);
     }
 }
 
 /// Appends a MIR instruction and returns its index within the list of instructions
-fn addInst(func: *CodeGen, inst: Mir.Inst) error{OutOfMemory}!void {
-    try func.mir_instructions.append(func.gpa, inst);
+fn addInst(cg: *CodeGen, inst: Mir.Inst) error{OutOfMemory}!void {
+    try cg.mir_instructions.append(cg.gpa, inst);
 }
 
-fn addTag(func: *CodeGen, tag: Mir.Inst.Tag) error{OutOfMemory}!void {
-    try func.addInst(.{ .tag = tag, .data = .{ .tag = {} } });
+fn addTag(cg: *CodeGen, tag: Mir.Inst.Tag) error{OutOfMemory}!void {
+    try cg.addInst(.{ .tag = tag, .data = .{ .tag = {} } });
 }
 
-fn addExtended(func: *CodeGen, opcode: std.wasm.MiscOpcode) error{OutOfMemory}!void {
-    const extra_index = @as(u32, @intCast(func.mir_extra.items.len));
-    try func.mir_extra.append(func.gpa, @intFromEnum(opcode));
-    try func.addInst(.{ .tag = .misc_prefix, .data = .{ .payload = extra_index } });
+fn addExtended(cg: *CodeGen, opcode: std.wasm.MiscOpcode) error{OutOfMemory}!void {
+    const extra_index = @as(u32, @intCast(cg.mir_extra.items.len));
+    try cg.mir_extra.append(cg.gpa, @intFromEnum(opcode));
+    try cg.addInst(.{ .tag = .misc_prefix, .data = .{ .payload = extra_index } });
 }
 
-fn addLabel(func: *CodeGen, tag: Mir.Inst.Tag, label: u32) error{OutOfMemory}!void {
-    try func.addInst(.{ .tag = tag, .data = .{ .label = label } });
+fn addLabel(cg: *CodeGen, tag: Mir.Inst.Tag, label: u32) error{OutOfMemory}!void {
+    try cg.addInst(.{ .tag = tag, .data = .{ .label = label } });
 }
 
-fn addIpIndex(func: *CodeGen, tag: Mir.Inst.Tag, i: InternPool.Index) Allocator.Error!void {
-    try func.addInst(.{ .tag = tag, .data = .{ .ip_index = i } });
+fn addIpIndex(cg: *CodeGen, tag: Mir.Inst.Tag, i: InternPool.Index) Allocator.Error!void {
+    try cg.addInst(.{ .tag = tag, .data = .{ .ip_index = i } });
 }
 
-fn addNav(func: *CodeGen, tag: Mir.Inst.Tag, i: InternPool.Nav.Index) Allocator.Error!void {
-    try func.addInst(.{ .tag = tag, .data = .{ .nav_index = i } });
+fn addNav(cg: *CodeGen, tag: Mir.Inst.Tag, i: InternPool.Nav.Index) Allocator.Error!void {
+    try cg.addInst(.{ .tag = tag, .data = .{ .nav_index = i } });
 }
 
 /// Accepts an unsigned 32bit integer rather than a signed integer to
 /// prevent us from having to bitcast multiple times as most values
 /// within codegen are represented as unsigned rather than signed.
-fn addImm32(func: *CodeGen, imm: u32) error{OutOfMemory}!void {
-    try func.addInst(.{ .tag = .i32_const, .data = .{ .imm32 = @bitCast(imm) } });
+fn addImm32(cg: *CodeGen, imm: u32) error{OutOfMemory}!void {
+    try cg.addInst(.{ .tag = .i32_const, .data = .{ .imm32 = @bitCast(imm) } });
 }
 
 /// Accepts an unsigned 64bit integer rather than a signed integer to
 /// prevent us from having to bitcast multiple times as most values
 /// within codegen are represented as unsigned rather than signed.
-fn addImm64(func: *CodeGen, imm: u64) error{OutOfMemory}!void {
-    const extra_index = try func.addExtra(Mir.Imm64.init(imm));
-    try func.addInst(.{ .tag = .i64_const, .data = .{ .payload = extra_index } });
+fn addImm64(cg: *CodeGen, imm: u64) error{OutOfMemory}!void {
+    const extra_index = try cg.addExtra(Mir.Imm64.init(imm));
+    try cg.addInst(.{ .tag = .i64_const, .data = .{ .payload = extra_index } });
 }
 
 /// Accepts the index into the list of 128bit-immediates
-fn addImm128(func: *CodeGen, index: u32) error{OutOfMemory}!void {
-    const simd_values = func.simd_immediates.items[index];
-    const extra_index = @as(u32, @intCast(func.mir_extra.items.len));
+fn addImm128(cg: *CodeGen, index: u32) error{OutOfMemory}!void {
+    const simd_values = cg.simd_immediates.items[index];
+    const extra_index = @as(u32, @intCast(cg.mir_extra.items.len));
     // tag + 128bit value
-    try func.mir_extra.ensureUnusedCapacity(func.gpa, 5);
-    func.mir_extra.appendAssumeCapacity(@intFromEnum(std.wasm.SimdOpcode.v128_const));
-    func.mir_extra.appendSliceAssumeCapacity(@alignCast(mem.bytesAsSlice(u32, &simd_values)));
-    try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
+    try cg.mir_extra.ensureUnusedCapacity(cg.gpa, 5);
+    cg.mir_extra.appendAssumeCapacity(@intFromEnum(std.wasm.SimdOpcode.v128_const));
+    cg.mir_extra.appendSliceAssumeCapacity(@alignCast(mem.bytesAsSlice(u32, &simd_values)));
+    try cg.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
 }
 
-fn addFloat64(func: *CodeGen, float: f64) error{OutOfMemory}!void {
-    const extra_index = try func.addExtra(Mir.Float64.init(float));
-    try func.addInst(.{ .tag = .f64_const, .data = .{ .payload = extra_index } });
+fn addFloat64(cg: *CodeGen, float: f64) error{OutOfMemory}!void {
+    const extra_index = try cg.addExtra(Mir.Float64.init(float));
+    try cg.addInst(.{ .tag = .f64_const, .data = .{ .payload = extra_index } });
 }
 
 /// Inserts an instruction to load/store from/to wasm's linear memory dependent on the given `tag`.
-fn addMemArg(func: *CodeGen, tag: Mir.Inst.Tag, mem_arg: Mir.MemArg) error{OutOfMemory}!void {
-    const extra_index = try func.addExtra(mem_arg);
-    try func.addInst(.{ .tag = tag, .data = .{ .payload = extra_index } });
+fn addMemArg(cg: *CodeGen, tag: Mir.Inst.Tag, mem_arg: Mir.MemArg) error{OutOfMemory}!void {
+    const extra_index = try cg.addExtra(mem_arg);
+    try cg.addInst(.{ .tag = tag, .data = .{ .payload = extra_index } });
 }
 
 /// Inserts an instruction from the 'atomics' feature which accesses wasm's linear memory dependent on the
 /// given `tag`.
-fn addAtomicMemArg(func: *CodeGen, tag: std.wasm.AtomicsOpcode, mem_arg: Mir.MemArg) error{OutOfMemory}!void {
-    const extra_index = try func.addExtra(@as(struct { val: u32 }, .{ .val = @intFromEnum(tag) }));
-    _ = try func.addExtra(mem_arg);
-    try func.addInst(.{ .tag = .atomics_prefix, .data = .{ .payload = extra_index } });
+fn addAtomicMemArg(cg: *CodeGen, tag: std.wasm.AtomicsOpcode, mem_arg: Mir.MemArg) error{OutOfMemory}!void {
+    const extra_index = try cg.addExtra(@as(struct { val: u32 }, .{ .val = @intFromEnum(tag) }));
+    _ = try cg.addExtra(mem_arg);
+    try cg.addInst(.{ .tag = .atomics_prefix, .data = .{ .payload = extra_index } });
 }
 
 /// Helper function to emit atomic mir opcodes.
-fn addAtomicTag(func: *CodeGen, tag: std.wasm.AtomicsOpcode) error{OutOfMemory}!void {
-    const extra_index = try func.addExtra(@as(struct { val: u32 }, .{ .val = @intFromEnum(tag) }));
-    try func.addInst(.{ .tag = .atomics_prefix, .data = .{ .payload = extra_index } });
+fn addAtomicTag(cg: *CodeGen, tag: std.wasm.AtomicsOpcode) error{OutOfMemory}!void {
+    const extra_index = try cg.addExtra(@as(struct { val: u32 }, .{ .val = @intFromEnum(tag) }));
+    try cg.addInst(.{ .tag = .atomics_prefix, .data = .{ .payload = extra_index } });
 }
 
 /// Appends entries to `mir_extra` based on the type of `extra`.
 /// Returns the index into `mir_extra`
-fn addExtra(func: *CodeGen, extra: anytype) error{OutOfMemory}!u32 {
+fn addExtra(cg: *CodeGen, extra: anytype) error{OutOfMemory}!u32 {
     const fields = std.meta.fields(@TypeOf(extra));
-    try func.mir_extra.ensureUnusedCapacity(func.gpa, fields.len);
-    return func.addExtraAssumeCapacity(extra);
+    try cg.mir_extra.ensureUnusedCapacity(cg.gpa, fields.len);
+    return cg.addExtraAssumeCapacity(extra);
 }
 
 /// Appends entries to `mir_extra` based on the type of `extra`.
 /// Returns the index into `mir_extra`
-fn addExtraAssumeCapacity(func: *CodeGen, extra: anytype) error{OutOfMemory}!u32 {
+fn addExtraAssumeCapacity(cg: *CodeGen, extra: anytype) error{OutOfMemory}!u32 {
     const fields = std.meta.fields(@TypeOf(extra));
-    const result = @as(u32, @intCast(func.mir_extra.items.len));
+    const result = @as(u32, @intCast(cg.mir_extra.items.len));
     inline for (fields) |field| {
-        func.mir_extra.appendAssumeCapacity(switch (field.type) {
+        cg.mir_extra.appendAssumeCapacity(switch (field.type) {
             u32 => @field(extra, field.name),
             i32 => @bitCast(@field(extra, field.name)),
             InternPool.Index => @intFromEnum(@field(extra, field.name)),
@@ -1015,24 +1013,24 @@ fn genBlockType(ty: Type, pt: Zcu.PerThread, target: *const std.Target) u8 {
 }
 
 /// Writes the bytecode depending on the given `WValue` in `val`
-fn emitWValue(func: *CodeGen, value: WValue) InnerError!void {
+fn emitWValue(cg: *CodeGen, value: WValue) InnerError!void {
     switch (value) {
         .dead => unreachable, // reference to free'd `WValue` (missing reuseOperand?)
         .none, .stack => {}, // no-op
-        .local => |idx| try func.addLabel(.local_get, idx.value),
-        .imm32 => |val| try func.addImm32(val),
-        .imm64 => |val| try func.addImm64(val),
-        .imm128 => |val| try func.addImm128(val),
-        .float32 => |val| try func.addInst(.{ .tag = .f32_const, .data = .{ .float32 = val } }),
-        .float64 => |val| try func.addFloat64(val),
+        .local => |idx| try cg.addLabel(.local_get, idx.value),
+        .imm32 => |val| try cg.addImm32(val),
+        .imm64 => |val| try cg.addImm64(val),
+        .imm128 => |val| try cg.addImm128(val),
+        .float32 => |val| try cg.addInst(.{ .tag = .f32_const, .data = .{ .float32 = val } }),
+        .float64 => |val| try cg.addFloat64(val),
         .nav_ref => |nav_ref| {
             if (nav_ref.offset == 0) {
-                try func.addInst(.{ .tag = .nav_ref, .data = .{ .nav_index = nav_ref.nav_index } });
+                try cg.addInst(.{ .tag = .nav_ref, .data = .{ .nav_index = nav_ref.nav_index } });
             } else {
-                try func.addInst(.{
+                try cg.addInst(.{
                     .tag = .nav_ref_off,
                     .data = .{
-                        .payload = try func.addExtra(Mir.NavRefOff{
+                        .payload = try cg.addExtra(Mir.NavRefOff{
                             .nav_index = nav_ref.nav_index,
                             .offset = nav_ref.offset,
                         }),
@@ -1042,12 +1040,12 @@ fn emitWValue(func: *CodeGen, value: WValue) InnerError!void {
         },
         .uav_ref => |uav| {
             if (uav.offset == 0) {
-                try func.addInst(.{ .tag = .uav_ref, .data = .{ .ip_index = uav.ip_index } });
+                try cg.addInst(.{ .tag = .uav_ref, .data = .{ .ip_index = uav.ip_index } });
             } else {
-                try func.addInst(.{
+                try cg.addInst(.{
                     .tag = .uav_ref_off,
                     .data = .{
-                        .payload = try func.addExtra(Mir.UavRefOff{
+                        .payload = try cg.addExtra(Mir.UavRefOff{
                             .ip_index = uav.ip_index,
                             .offset = uav.offset,
                         }),
@@ -1055,7 +1053,7 @@ fn emitWValue(func: *CodeGen, value: WValue) InnerError!void {
                 });
             }
         },
-        .stack_offset => try func.addLabel(.local_get, func.bottom_stack_value.local.value), // caller must ensure to address the offset
+        .stack_offset => try cg.addLabel(.local_get, cg.bottom_stack_value.local.value), // caller must ensure to address the offset
     }
 }
 
@@ -1063,7 +1061,7 @@ fn emitWValue(func: *CodeGen, value: WValue) InnerError!void {
 /// The old `WValue` found at instruction `ref` is then replaced by the
 /// modified `WValue` and returned. When given a non-local or non-stack-offset,
 /// returns the given `operand` itfunc instead.
-fn reuseOperand(func: *CodeGen, ref: Air.Inst.Ref, operand: WValue) WValue {
+fn reuseOperand(cg: *CodeGen, ref: Air.Inst.Ref, operand: WValue) WValue {
     if (operand != .local and operand != .stack_offset) return operand;
     var new_value = operand;
     switch (new_value) {
@@ -1071,17 +1069,17 @@ fn reuseOperand(func: *CodeGen, ref: Air.Inst.Ref, operand: WValue) WValue {
         .stack_offset => |*stack_offset| stack_offset.references += 1,
         else => unreachable,
     }
-    const old_value = func.getResolvedInst(ref);
+    const old_value = cg.getResolvedInst(ref);
     old_value.* = new_value;
     return new_value;
 }
 
 /// From a reference, returns its resolved `WValue`.
 /// It's illegal to provide a `Air.Inst.Ref` that hasn't been resolved yet.
-fn getResolvedInst(func: *CodeGen, ref: Air.Inst.Ref) *WValue {
-    var index = func.branches.items.len;
+fn getResolvedInst(cg: *CodeGen, ref: Air.Inst.Ref) *WValue {
+    var index = cg.branches.items.len;
     while (index > 0) : (index -= 1) {
-        const branch = func.branches.items[index - 1];
+        const branch = cg.branches.items[index - 1];
         if (branch.values.getPtr(ref)) |value| {
             return value;
         }
@@ -1091,31 +1089,31 @@ fn getResolvedInst(func: *CodeGen, ref: Air.Inst.Ref) *WValue {
 
 /// Creates one locals for a given `Type`.
 /// Returns a corresponding `Wvalue` with `local` as active tag
-fn allocLocal(func: *CodeGen, ty: Type) InnerError!WValue {
-    const pt = func.pt;
-    const valtype = typeToValtype(ty, pt, func.target);
+fn allocLocal(cg: *CodeGen, ty: Type) InnerError!WValue {
+    const pt = cg.pt;
+    const valtype = typeToValtype(ty, pt, cg.target);
     const index_or_null = switch (valtype) {
-        .i32 => func.free_locals_i32.popOrNull(),
-        .i64 => func.free_locals_i64.popOrNull(),
-        .f32 => func.free_locals_f32.popOrNull(),
-        .f64 => func.free_locals_f64.popOrNull(),
-        .v128 => func.free_locals_v128.popOrNull(),
+        .i32 => cg.free_locals_i32.popOrNull(),
+        .i64 => cg.free_locals_i64.popOrNull(),
+        .f32 => cg.free_locals_f32.popOrNull(),
+        .f64 => cg.free_locals_f64.popOrNull(),
+        .v128 => cg.free_locals_v128.popOrNull(),
     };
     if (index_or_null) |index| {
         log.debug("reusing local ({d}) of type {}", .{ index, valtype });
         return .{ .local = .{ .value = index, .references = 1 } };
     }
     log.debug("new local of type {}", .{valtype});
-    return func.ensureAllocLocal(ty);
+    return cg.ensureAllocLocal(ty);
 }
 
 /// Ensures a new local will be created. This is useful when it's useful
 /// to use a zero-initialized local.
-fn ensureAllocLocal(func: *CodeGen, ty: Type) InnerError!WValue {
-    const pt = func.pt;
-    try func.locals.append(func.gpa, genValtype(ty, pt, func.target));
-    const initial_index = func.local_index;
-    func.local_index += 1;
+fn ensureAllocLocal(cg: *CodeGen, ty: Type) InnerError!WValue {
+    const pt = cg.pt;
+    try cg.locals.append(cg.gpa, genValtype(ty, pt, cg.target));
+    const initial_index = cg.local_index;
+    cg.local_index += 1;
     return .{ .local = .{ .value = initial_index, .references = 1 } };
 }
 
@@ -1291,10 +1289,10 @@ pub fn function(
 ) Error!Function {
     const zcu = pt.zcu;
     const gpa = zcu.gpa;
-    const func = zcu.funcInfo(func_index);
-    const file_scope = zcu.navFileScope(func.owner_nav);
+    const cg = zcu.funcInfo(func_index);
+    const file_scope = zcu.navFileScope(cg.owner_nav);
     const target = &file_scope.mod.resolved_target.result;
-    const fn_ty = zcu.navValue(func.owner_nav).typeOf(zcu);
+    const fn_ty = zcu.navValue(cg.owner_nav).typeOf(zcu);
     const fn_info = zcu.typeToFunc(fn_ty).?;
     const ip = &zcu.intern_pool;
     const fn_ty_index = try genFunctype(wasm, fn_info.cc, fn_info.param_types.get(ip), Type.fromInterned(fn_info.return_type), pt, target);
@@ -1309,7 +1307,7 @@ pub fn function(
         .pt = pt,
         .air = air,
         .liveness = liveness,
-        .owner_nav = func.owner_nav,
+        .owner_nav = cg.owner_nav,
         .target = target,
         .ptr_size = switch (target.cpu.arch) {
             .wasm32 => .wasm32,
@@ -1470,89 +1468,89 @@ fn firstParamSRet(
 
 /// Lowers a Zig type and its value based on a given calling convention to ensure
 /// it matches the ABI.
-fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value: WValue) !void {
+fn lowerArg(cg: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value: WValue) !void {
     if (cc != .wasm_watc) {
-        return func.lowerToStack(value);
+        return cg.lowerToStack(value);
     }
 
-    const pt = func.pt;
+    const pt = cg.pt;
     const zcu = pt.zcu;
     const ty_classes = abi.classifyType(ty, zcu);
     assert(ty_classes[0] != .none);
     switch (ty.zigTypeTag(zcu)) {
         .@"struct", .@"union" => {
             if (ty_classes[0] == .indirect) {
-                return func.lowerToStack(value);
+                return cg.lowerToStack(value);
             }
             assert(ty_classes[0] == .direct);
             const scalar_type = abi.scalarType(ty, zcu);
             switch (value) {
-                .nav_ref, .stack_offset => _ = try func.load(value, scalar_type, 0),
+                .nav_ref, .stack_offset => _ = try cg.load(value, scalar_type, 0),
                 .dead => unreachable,
-                else => try func.emitWValue(value),
+                else => try cg.emitWValue(value),
             }
         },
         .int, .float => {
             if (ty_classes[1] == .none) {
-                return func.lowerToStack(value);
+                return cg.lowerToStack(value);
             }
             assert(ty_classes[0] == .direct and ty_classes[1] == .direct);
             assert(ty.abiSize(zcu) == 16);
             // in this case we have an integer or float that must be lowered as 2 i64's.
-            try func.emitWValue(value);
-            try func.addMemArg(.i64_load, .{ .offset = value.offset(), .alignment = 8 });
-            try func.emitWValue(value);
-            try func.addMemArg(.i64_load, .{ .offset = value.offset() + 8, .alignment = 8 });
+            try cg.emitWValue(value);
+            try cg.addMemArg(.i64_load, .{ .offset = value.offset(), .alignment = 8 });
+            try cg.emitWValue(value);
+            try cg.addMemArg(.i64_load, .{ .offset = value.offset() + 8, .alignment = 8 });
         },
-        else => return func.lowerToStack(value),
+        else => return cg.lowerToStack(value),
     }
 }
 
 /// Lowers a `WValue` to the stack. This means when the `value` results in
 /// `.stack_offset` we calculate the pointer of this offset and use that.
 /// The value is left on the stack, and not stored in any temporary.
-fn lowerToStack(func: *CodeGen, value: WValue) !void {
+fn lowerToStack(cg: *CodeGen, value: WValue) !void {
     switch (value) {
         .stack_offset => |offset| {
-            try func.emitWValue(value);
+            try cg.emitWValue(value);
             if (offset.value > 0) {
-                switch (func.ptr_size) {
+                switch (cg.ptr_size) {
                     .wasm32 => {
-                        try func.addImm32(offset.value);
-                        try func.addTag(.i32_add);
+                        try cg.addImm32(offset.value);
+                        try cg.addTag(.i32_add);
                     },
                     .wasm64 => {
-                        try func.addImm64(offset.value);
-                        try func.addTag(.i64_add);
+                        try cg.addImm64(offset.value);
+                        try cg.addTag(.i64_add);
                     },
                 }
             }
         },
-        else => try func.emitWValue(value),
+        else => try cg.emitWValue(value),
     }
 }
 
 /// Creates a local for the initial stack value
 /// Asserts `initial_stack_value` is `.none`
-fn initializeStack(func: *CodeGen) !void {
-    assert(func.initial_stack_value == .none);
+fn initializeStack(cg: *CodeGen) !void {
+    assert(cg.initial_stack_value == .none);
     // Reserve a local to store the current stack pointer
     // We can later use this local to set the stack pointer back to the value
     // we have stored here.
-    func.initial_stack_value = try func.ensureAllocLocal(Type.usize);
+    cg.initial_stack_value = try cg.ensureAllocLocal(Type.usize);
     // Also reserve a local to store the bottom stack value
-    func.bottom_stack_value = try func.ensureAllocLocal(Type.usize);
+    cg.bottom_stack_value = try cg.ensureAllocLocal(Type.usize);
 }
 
 /// Reads the stack pointer from `Context.initial_stack_value` and writes it
 /// to the global stack pointer variable
-fn restoreStackPointer(func: *CodeGen) !void {
+fn restoreStackPointer(cg: *CodeGen) !void {
     // only restore the pointer if it was initialized
-    if (func.initial_stack_value == .none) return;
+    if (cg.initial_stack_value == .none) return;
     // Get the original stack pointer's value
-    try func.emitWValue(func.initial_stack_value);
+    try cg.emitWValue(cg.initial_stack_value);
 
-    try func.addTag(.global_set_sp);
+    try cg.addTag(.global_set_sp);
 }
 
 /// From a given type, will create space on the virtual stack to store the value of such type.
@@ -1561,24 +1559,24 @@ fn restoreStackPointer(func: *CodeGen) !void {
 /// moveStack unless a local was already created to store the pointer.
 ///
 /// Asserts Type has codegenbits
-fn allocStack(func: *CodeGen, ty: Type) !WValue {
-    const zcu = func.pt.zcu;
+fn allocStack(cg: *CodeGen, ty: Type) !WValue {
+    const zcu = cg.pt.zcu;
     assert(ty.hasRuntimeBitsIgnoreComptime(zcu));
-    if (func.initial_stack_value == .none) {
-        try func.initializeStack();
+    if (cg.initial_stack_value == .none) {
+        try cg.initializeStack();
     }
 
     const abi_size = std.math.cast(u32, ty.abiSize(zcu)) orelse {
-        return func.fail("Type {} with ABI size of {d} exceeds stack frame size", .{
-            ty.fmt(func.pt), ty.abiSize(zcu),
+        return cg.fail("Type {} with ABI size of {d} exceeds stack frame size", .{
+            ty.fmt(cg.pt), ty.abiSize(zcu),
         });
     };
     const abi_align = ty.abiAlignment(zcu);
 
-    func.stack_alignment = func.stack_alignment.max(abi_align);
+    cg.stack_alignment = cg.stack_alignment.max(abi_align);
 
-    const offset: u32 = @intCast(abi_align.forward(func.stack_size));
-    defer func.stack_size = offset + abi_size;
+    const offset: u32 = @intCast(abi_align.forward(cg.stack_size));
+    defer cg.stack_size = offset + abi_size;
 
     return .{ .stack_offset = .{ .value = offset, .references = 1 } };
 }
@@ -1587,30 +1585,30 @@ fn allocStack(func: *CodeGen, ty: Type) !WValue {
 /// the value of its type will live.
 /// This is different from allocStack where this will use the pointer's alignment
 /// if it is set, to ensure the stack alignment will be set correctly.
-fn allocStackPtr(func: *CodeGen, inst: Air.Inst.Index) !WValue {
-    const pt = func.pt;
+fn allocStackPtr(cg: *CodeGen, inst: Air.Inst.Index) !WValue {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const ptr_ty = func.typeOfIndex(inst);
+    const ptr_ty = cg.typeOfIndex(inst);
     const pointee_ty = ptr_ty.childType(zcu);
 
-    if (func.initial_stack_value == .none) {
-        try func.initializeStack();
+    if (cg.initial_stack_value == .none) {
+        try cg.initializeStack();
     }
 
     if (!pointee_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
-        return func.allocStack(Type.usize); // create a value containing just the stack pointer.
+        return cg.allocStack(Type.usize); // create a value containing just the stack pointer.
     }
 
     const abi_alignment = ptr_ty.ptrAlignment(zcu);
     const abi_size = std.math.cast(u32, pointee_ty.abiSize(zcu)) orelse {
-        return func.fail("Type {} with ABI size of {d} exceeds stack frame size", .{
+        return cg.fail("Type {} with ABI size of {d} exceeds stack frame size", .{
             pointee_ty.fmt(pt), pointee_ty.abiSize(zcu),
         });
     };
-    func.stack_alignment = func.stack_alignment.max(abi_alignment);
+    cg.stack_alignment = cg.stack_alignment.max(abi_alignment);
 
-    const offset: u32 = @intCast(abi_alignment.forward(func.stack_size));
-    defer func.stack_size = offset + abi_size;
+    const offset: u32 = @intCast(abi_alignment.forward(cg.stack_size));
+    defer cg.stack_size = offset + abi_size;
 
     return .{ .stack_offset = .{ .value = offset, .references = 1 } };
 }
@@ -1624,14 +1622,14 @@ fn toWasmBits(bits: u16) ?u16 {
 
 /// Performs a copy of bytes for a given type. Copying all bytes
 /// from rhs to lhs.
-fn memcpy(func: *CodeGen, dst: WValue, src: WValue, len: WValue) !void {
+fn memcpy(cg: *CodeGen, dst: WValue, src: WValue, len: WValue) !void {
     // When bulk_memory is enabled, we lower it to wasm's memcpy instruction.
     // If not, we lower it ourselves manually
-    if (std.Target.wasm.featureSetHas(func.target.cpu.features, .bulk_memory)) {
-        try func.lowerToStack(dst);
-        try func.lowerToStack(src);
-        try func.emitWValue(len);
-        try func.addExtended(.memory_copy);
+    if (std.Target.wasm.featureSetHas(cg.target.cpu.features, .bulk_memory)) {
+        try cg.lowerToStack(dst);
+        try cg.lowerToStack(src);
+        try cg.emitWValue(len);
+        try cg.addExtended(.memory_copy);
         return;
     }
 
@@ -1652,17 +1650,17 @@ fn memcpy(func: *CodeGen, dst: WValue, src: WValue, len: WValue) !void {
             const rhs_base = src.offset();
             while (offset < length) : (offset += 1) {
                 // get dst's address to store the result
-                try func.emitWValue(dst);
+                try cg.emitWValue(dst);
                 // load byte from src's address
-                try func.emitWValue(src);
-                switch (func.ptr_size) {
+                try cg.emitWValue(src);
+                switch (cg.ptr_size) {
                     .wasm32 => {
-                        try func.addMemArg(.i32_load8_u, .{ .offset = rhs_base + offset, .alignment = 1 });
-                        try func.addMemArg(.i32_store8, .{ .offset = lhs_base + offset, .alignment = 1 });
+                        try cg.addMemArg(.i32_load8_u, .{ .offset = rhs_base + offset, .alignment = 1 });
+                        try cg.addMemArg(.i32_store8, .{ .offset = lhs_base + offset, .alignment = 1 });
                     },
                     .wasm64 => {
-                        try func.addMemArg(.i64_load8_u, .{ .offset = rhs_base + offset, .alignment = 1 });
-                        try func.addMemArg(.i64_store8, .{ .offset = lhs_base + offset, .alignment = 1 });
+                        try cg.addMemArg(.i64_load8_u, .{ .offset = rhs_base + offset, .alignment = 1 });
+                        try cg.addMemArg(.i64_store8, .{ .offset = lhs_base + offset, .alignment = 1 });
                     },
                 }
             }
@@ -1673,79 +1671,79 @@ fn memcpy(func: *CodeGen, dst: WValue, src: WValue, len: WValue) !void {
 
     // allocate a local for the offset, and set it to 0.
     // This to ensure that inside loops we correctly re-set the counter.
-    var offset = try func.allocLocal(Type.usize); // local for counter
-    defer offset.free(func);
-    switch (func.ptr_size) {
-        .wasm32 => try func.addImm32(0),
-        .wasm64 => try func.addImm64(0),
+    var offset = try cg.allocLocal(Type.usize); // local for counter
+    defer offset.free(cg);
+    switch (cg.ptr_size) {
+        .wasm32 => try cg.addImm32(0),
+        .wasm64 => try cg.addImm64(0),
     }
-    try func.addLabel(.local_set, offset.local.value);
+    try cg.addLabel(.local_set, offset.local.value);
 
     // outer block to jump to when loop is done
-    try func.startBlock(.block, std.wasm.block_empty);
-    try func.startBlock(.loop, std.wasm.block_empty);
+    try cg.startBlock(.block, std.wasm.block_empty);
+    try cg.startBlock(.loop, std.wasm.block_empty);
 
     // loop condition (offset == length -> break)
     {
-        try func.emitWValue(offset);
-        try func.emitWValue(len);
-        switch (func.ptr_size) {
-            .wasm32 => try func.addTag(.i32_eq),
-            .wasm64 => try func.addTag(.i64_eq),
+        try cg.emitWValue(offset);
+        try cg.emitWValue(len);
+        switch (cg.ptr_size) {
+            .wasm32 => try cg.addTag(.i32_eq),
+            .wasm64 => try cg.addTag(.i64_eq),
         }
-        try func.addLabel(.br_if, 1); // jump out of loop into outer block (finished)
+        try cg.addLabel(.br_if, 1); // jump out of loop into outer block (finished)
     }
 
     // get dst ptr
     {
-        try func.emitWValue(dst);
-        try func.emitWValue(offset);
-        switch (func.ptr_size) {
-            .wasm32 => try func.addTag(.i32_add),
-            .wasm64 => try func.addTag(.i64_add),
+        try cg.emitWValue(dst);
+        try cg.emitWValue(offset);
+        switch (cg.ptr_size) {
+            .wasm32 => try cg.addTag(.i32_add),
+            .wasm64 => try cg.addTag(.i64_add),
         }
     }
 
     // get src value and also store in dst
     {
-        try func.emitWValue(src);
-        try func.emitWValue(offset);
-        switch (func.ptr_size) {
+        try cg.emitWValue(src);
+        try cg.emitWValue(offset);
+        switch (cg.ptr_size) {
             .wasm32 => {
-                try func.addTag(.i32_add);
-                try func.addMemArg(.i32_load8_u, .{ .offset = src.offset(), .alignment = 1 });
-                try func.addMemArg(.i32_store8, .{ .offset = dst.offset(), .alignment = 1 });
+                try cg.addTag(.i32_add);
+                try cg.addMemArg(.i32_load8_u, .{ .offset = src.offset(), .alignment = 1 });
+                try cg.addMemArg(.i32_store8, .{ .offset = dst.offset(), .alignment = 1 });
             },
             .wasm64 => {
-                try func.addTag(.i64_add);
-                try func.addMemArg(.i64_load8_u, .{ .offset = src.offset(), .alignment = 1 });
-                try func.addMemArg(.i64_store8, .{ .offset = dst.offset(), .alignment = 1 });
+                try cg.addTag(.i64_add);
+                try cg.addMemArg(.i64_load8_u, .{ .offset = src.offset(), .alignment = 1 });
+                try cg.addMemArg(.i64_store8, .{ .offset = dst.offset(), .alignment = 1 });
             },
         }
     }
 
     // increment loop counter
     {
-        try func.emitWValue(offset);
-        switch (func.ptr_size) {
+        try cg.emitWValue(offset);
+        switch (cg.ptr_size) {
             .wasm32 => {
-                try func.addImm32(1);
-                try func.addTag(.i32_add);
+                try cg.addImm32(1);
+                try cg.addTag(.i32_add);
             },
             .wasm64 => {
-                try func.addImm64(1);
-                try func.addTag(.i64_add);
+                try cg.addImm64(1);
+                try cg.addTag(.i64_add);
             },
         }
-        try func.addLabel(.local_set, offset.local.value);
-        try func.addLabel(.br, 0); // jump to start of loop
+        try cg.addLabel(.local_set, offset.local.value);
+        try cg.addLabel(.br, 0); // jump to start of loop
     }
-    try func.endBlock(); // close off loop block
-    try func.endBlock(); // close off outer block
+    try cg.endBlock(); // close off loop block
+    try cg.endBlock(); // close off outer block
 }
 
-fn ptrSize(func: *const CodeGen) u16 {
-    return @divExact(func.target.ptrBitWidth(), 8);
+fn ptrSize(cg: *const CodeGen) u16 {
+    return @divExact(cg.target.ptrBitWidth(), 8);
 }
 
 /// For a given `Type`, will return true when the type will be passed
@@ -1837,214 +1835,214 @@ fn determineSimdStoreStrategy(ty: Type, zcu: *Zcu, target: *const std.Target) Si
 /// This can be used to get a pointer to a struct field, error payload, etc.
 /// By providing `modify` as action, it will modify the given `ptr_value` instead of making a new
 /// local value to store the pointer. This allows for local re-use and improves binary size.
-fn buildPointerOffset(func: *CodeGen, ptr_value: WValue, offset: u64, action: enum { modify, new }) InnerError!WValue {
+fn buildPointerOffset(cg: *CodeGen, ptr_value: WValue, offset: u64, action: enum { modify, new }) InnerError!WValue {
     // do not perform arithmetic when offset is 0.
     if (offset == 0 and ptr_value.offset() == 0 and action == .modify) return ptr_value;
     const result_ptr: WValue = switch (action) {
-        .new => try func.ensureAllocLocal(Type.usize),
+        .new => try cg.ensureAllocLocal(Type.usize),
         .modify => ptr_value,
     };
-    try func.emitWValue(ptr_value);
+    try cg.emitWValue(ptr_value);
     if (offset + ptr_value.offset() > 0) {
-        switch (func.ptr_size) {
+        switch (cg.ptr_size) {
             .wasm32 => {
-                try func.addImm32(@intCast(offset + ptr_value.offset()));
-                try func.addTag(.i32_add);
+                try cg.addImm32(@intCast(offset + ptr_value.offset()));
+                try cg.addTag(.i32_add);
             },
             .wasm64 => {
-                try func.addImm64(offset + ptr_value.offset());
-                try func.addTag(.i64_add);
+                try cg.addImm64(offset + ptr_value.offset());
+                try cg.addTag(.i64_add);
             },
         }
     }
-    try func.addLabel(.local_set, result_ptr.local.value);
+    try cg.addLabel(.local_set, result_ptr.local.value);
     return result_ptr;
 }
 
-fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const air_tags = func.air.instructions.items(.tag);
+fn genInst(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const air_tags = cg.air.instructions.items(.tag);
     return switch (air_tags[@intFromEnum(inst)]) {
         .inferred_alloc, .inferred_alloc_comptime => unreachable,
 
-        .add => func.airBinOp(inst, .add),
-        .add_sat => func.airSatBinOp(inst, .add),
-        .add_wrap => func.airWrapBinOp(inst, .add),
-        .sub => func.airBinOp(inst, .sub),
-        .sub_sat => func.airSatBinOp(inst, .sub),
-        .sub_wrap => func.airWrapBinOp(inst, .sub),
-        .mul => func.airBinOp(inst, .mul),
-        .mul_sat => func.airSatMul(inst),
-        .mul_wrap => func.airWrapBinOp(inst, .mul),
-        .div_float, .div_exact => func.airDiv(inst),
-        .div_trunc => func.airDivTrunc(inst),
-        .div_floor => func.airDivFloor(inst),
-        .bit_and => func.airBinOp(inst, .@"and"),
-        .bit_or => func.airBinOp(inst, .@"or"),
-        .bool_and => func.airBinOp(inst, .@"and"),
-        .bool_or => func.airBinOp(inst, .@"or"),
-        .rem => func.airRem(inst),
-        .mod => func.airMod(inst),
-        .shl => func.airWrapBinOp(inst, .shl),
-        .shl_exact => func.airBinOp(inst, .shl),
-        .shl_sat => func.airShlSat(inst),
-        .shr, .shr_exact => func.airBinOp(inst, .shr),
-        .xor => func.airBinOp(inst, .xor),
-        .max => func.airMaxMin(inst, .max),
-        .min => func.airMaxMin(inst, .min),
-        .mul_add => func.airMulAdd(inst),
-
-        .sqrt => func.airUnaryFloatOp(inst, .sqrt),
-        .sin => func.airUnaryFloatOp(inst, .sin),
-        .cos => func.airUnaryFloatOp(inst, .cos),
-        .tan => func.airUnaryFloatOp(inst, .tan),
-        .exp => func.airUnaryFloatOp(inst, .exp),
-        .exp2 => func.airUnaryFloatOp(inst, .exp2),
-        .log => func.airUnaryFloatOp(inst, .log),
-        .log2 => func.airUnaryFloatOp(inst, .log2),
-        .log10 => func.airUnaryFloatOp(inst, .log10),
-        .floor => func.airUnaryFloatOp(inst, .floor),
-        .ceil => func.airUnaryFloatOp(inst, .ceil),
-        .round => func.airUnaryFloatOp(inst, .round),
-        .trunc_float => func.airUnaryFloatOp(inst, .trunc),
-        .neg => func.airUnaryFloatOp(inst, .neg),
-
-        .abs => func.airAbs(inst),
-
-        .add_with_overflow => func.airAddSubWithOverflow(inst, .add),
-        .sub_with_overflow => func.airAddSubWithOverflow(inst, .sub),
-        .shl_with_overflow => func.airShlWithOverflow(inst),
-        .mul_with_overflow => func.airMulWithOverflow(inst),
-
-        .clz => func.airClz(inst),
-        .ctz => func.airCtz(inst),
-
-        .cmp_eq => func.airCmp(inst, .eq),
-        .cmp_gte => func.airCmp(inst, .gte),
-        .cmp_gt => func.airCmp(inst, .gt),
-        .cmp_lte => func.airCmp(inst, .lte),
-        .cmp_lt => func.airCmp(inst, .lt),
-        .cmp_neq => func.airCmp(inst, .neq),
-
-        .cmp_vector => func.airCmpVector(inst),
-        .cmp_lt_errors_len => func.airCmpLtErrorsLen(inst),
-
-        .array_elem_val => func.airArrayElemVal(inst),
-        .array_to_slice => func.airArrayToSlice(inst),
-        .alloc => func.airAlloc(inst),
-        .arg => func.airArg(inst),
-        .bitcast => func.airBitcast(inst),
-        .block => func.airBlock(inst),
-        .trap => func.airTrap(inst),
-        .breakpoint => func.airBreakpoint(inst),
-        .br => func.airBr(inst),
-        .repeat => func.airRepeat(inst),
-        .switch_dispatch => return func.fail("TODO implement `switch_dispatch`", .{}),
-        .int_from_bool => func.airIntFromBool(inst),
-        .cond_br => func.airCondBr(inst),
-        .intcast => func.airIntcast(inst),
-        .fptrunc => func.airFptrunc(inst),
-        .fpext => func.airFpext(inst),
-        .int_from_float => func.airIntFromFloat(inst),
-        .float_from_int => func.airFloatFromInt(inst),
-        .get_union_tag => func.airGetUnionTag(inst),
-
-        .@"try" => func.airTry(inst),
-        .try_cold => func.airTry(inst),
-        .try_ptr => func.airTryPtr(inst),
-        .try_ptr_cold => func.airTryPtr(inst),
-
-        .dbg_stmt => func.airDbgStmt(inst),
-        .dbg_empty_stmt => try func.finishAir(inst, .none, &.{}),
-        .dbg_inline_block => func.airDbgInlineBlock(inst),
-        .dbg_var_ptr => func.airDbgVar(inst, .local_var, true),
-        .dbg_var_val => func.airDbgVar(inst, .local_var, false),
-        .dbg_arg_inline => func.airDbgVar(inst, .local_arg, false),
-
-        .call => func.airCall(inst, .auto),
-        .call_always_tail => func.airCall(inst, .always_tail),
-        .call_never_tail => func.airCall(inst, .never_tail),
-        .call_never_inline => func.airCall(inst, .never_inline),
-
-        .is_err => func.airIsErr(inst, .i32_ne),
-        .is_non_err => func.airIsErr(inst, .i32_eq),
-
-        .is_null => func.airIsNull(inst, .i32_eq, .value),
-        .is_non_null => func.airIsNull(inst, .i32_ne, .value),
-        .is_null_ptr => func.airIsNull(inst, .i32_eq, .ptr),
-        .is_non_null_ptr => func.airIsNull(inst, .i32_ne, .ptr),
-
-        .load => func.airLoad(inst),
-        .loop => func.airLoop(inst),
-        .memset => func.airMemset(inst, false),
-        .memset_safe => func.airMemset(inst, true),
-        .not => func.airNot(inst),
-        .optional_payload => func.airOptionalPayload(inst),
-        .optional_payload_ptr => func.airOptionalPayloadPtr(inst),
-        .optional_payload_ptr_set => func.airOptionalPayloadPtrSet(inst),
-        .ptr_add => func.airPtrBinOp(inst, .add),
-        .ptr_sub => func.airPtrBinOp(inst, .sub),
-        .ptr_elem_ptr => func.airPtrElemPtr(inst),
-        .ptr_elem_val => func.airPtrElemVal(inst),
-        .int_from_ptr => func.airIntFromPtr(inst),
-        .ret => func.airRet(inst),
-        .ret_safe => func.airRet(inst), // TODO
-        .ret_ptr => func.airRetPtr(inst),
-        .ret_load => func.airRetLoad(inst),
-        .splat => func.airSplat(inst),
-        .select => func.airSelect(inst),
-        .shuffle => func.airShuffle(inst),
-        .reduce => func.airReduce(inst),
-        .aggregate_init => func.airAggregateInit(inst),
-        .union_init => func.airUnionInit(inst),
-        .prefetch => func.airPrefetch(inst),
-        .popcount => func.airPopcount(inst),
-        .byte_swap => func.airByteSwap(inst),
-        .bit_reverse => func.airBitReverse(inst),
-
-        .slice => func.airSlice(inst),
-        .slice_len => func.airSliceLen(inst),
-        .slice_elem_val => func.airSliceElemVal(inst),
-        .slice_elem_ptr => func.airSliceElemPtr(inst),
-        .slice_ptr => func.airSlicePtr(inst),
-        .ptr_slice_len_ptr => func.airPtrSliceFieldPtr(inst, func.ptrSize()),
-        .ptr_slice_ptr_ptr => func.airPtrSliceFieldPtr(inst, 0),
-        .store => func.airStore(inst, false),
-        .store_safe => func.airStore(inst, true),
-
-        .set_union_tag => func.airSetUnionTag(inst),
-        .struct_field_ptr => func.airStructFieldPtr(inst),
-        .struct_field_ptr_index_0 => func.airStructFieldPtrIndex(inst, 0),
-        .struct_field_ptr_index_1 => func.airStructFieldPtrIndex(inst, 1),
-        .struct_field_ptr_index_2 => func.airStructFieldPtrIndex(inst, 2),
-        .struct_field_ptr_index_3 => func.airStructFieldPtrIndex(inst, 3),
-        .struct_field_val => func.airStructFieldVal(inst),
-        .field_parent_ptr => func.airFieldParentPtr(inst),
-
-        .switch_br => func.airSwitchBr(inst),
-        .loop_switch_br => return func.fail("TODO implement `loop_switch_br`", .{}),
-        .trunc => func.airTrunc(inst),
-        .unreach => func.airUnreachable(inst),
-
-        .wrap_optional => func.airWrapOptional(inst),
-        .unwrap_errunion_payload => func.airUnwrapErrUnionPayload(inst, false),
-        .unwrap_errunion_payload_ptr => func.airUnwrapErrUnionPayload(inst, true),
-        .unwrap_errunion_err => func.airUnwrapErrUnionError(inst, false),
-        .unwrap_errunion_err_ptr => func.airUnwrapErrUnionError(inst, true),
-        .wrap_errunion_payload => func.airWrapErrUnionPayload(inst),
-        .wrap_errunion_err => func.airWrapErrUnionErr(inst),
-        .errunion_payload_ptr_set => func.airErrUnionPayloadPtrSet(inst),
-        .error_name => func.airErrorName(inst),
-
-        .wasm_memory_size => func.airWasmMemorySize(inst),
-        .wasm_memory_grow => func.airWasmMemoryGrow(inst),
-
-        .memcpy => func.airMemcpy(inst),
-
-        .ret_addr => func.airRetAddr(inst),
-        .tag_name => func.airTagName(inst),
-
-        .error_set_has_value => func.airErrorSetHasValue(inst),
-        .frame_addr => func.airFrameAddress(inst),
+        .add => cg.airBinOp(inst, .add),
+        .add_sat => cg.airSatBinOp(inst, .add),
+        .add_wrap => cg.airWrapBinOp(inst, .add),
+        .sub => cg.airBinOp(inst, .sub),
+        .sub_sat => cg.airSatBinOp(inst, .sub),
+        .sub_wrap => cg.airWrapBinOp(inst, .sub),
+        .mul => cg.airBinOp(inst, .mul),
+        .mul_sat => cg.airSatMul(inst),
+        .mul_wrap => cg.airWrapBinOp(inst, .mul),
+        .div_float, .div_exact => cg.airDiv(inst),
+        .div_trunc => cg.airDivTrunc(inst),
+        .div_floor => cg.airDivFloor(inst),
+        .bit_and => cg.airBinOp(inst, .@"and"),
+        .bit_or => cg.airBinOp(inst, .@"or"),
+        .bool_and => cg.airBinOp(inst, .@"and"),
+        .bool_or => cg.airBinOp(inst, .@"or"),
+        .rem => cg.airRem(inst),
+        .mod => cg.airMod(inst),
+        .shl => cg.airWrapBinOp(inst, .shl),
+        .shl_exact => cg.airBinOp(inst, .shl),
+        .shl_sat => cg.airShlSat(inst),
+        .shr, .shr_exact => cg.airBinOp(inst, .shr),
+        .xor => cg.airBinOp(inst, .xor),
+        .max => cg.airMaxMin(inst, .max),
+        .min => cg.airMaxMin(inst, .min),
+        .mul_add => cg.airMulAdd(inst),
+
+        .sqrt => cg.airUnaryFloatOp(inst, .sqrt),
+        .sin => cg.airUnaryFloatOp(inst, .sin),
+        .cos => cg.airUnaryFloatOp(inst, .cos),
+        .tan => cg.airUnaryFloatOp(inst, .tan),
+        .exp => cg.airUnaryFloatOp(inst, .exp),
+        .exp2 => cg.airUnaryFloatOp(inst, .exp2),
+        .log => cg.airUnaryFloatOp(inst, .log),
+        .log2 => cg.airUnaryFloatOp(inst, .log2),
+        .log10 => cg.airUnaryFloatOp(inst, .log10),
+        .floor => cg.airUnaryFloatOp(inst, .floor),
+        .ceil => cg.airUnaryFloatOp(inst, .ceil),
+        .round => cg.airUnaryFloatOp(inst, .round),
+        .trunc_float => cg.airUnaryFloatOp(inst, .trunc),
+        .neg => cg.airUnaryFloatOp(inst, .neg),
+
+        .abs => cg.airAbs(inst),
+
+        .add_with_overflow => cg.airAddSubWithOverflow(inst, .add),
+        .sub_with_overflow => cg.airAddSubWithOverflow(inst, .sub),
+        .shl_with_overflow => cg.airShlWithOverflow(inst),
+        .mul_with_overflow => cg.airMulWithOverflow(inst),
+
+        .clz => cg.airClz(inst),
+        .ctz => cg.airCtz(inst),
+
+        .cmp_eq => cg.airCmp(inst, .eq),
+        .cmp_gte => cg.airCmp(inst, .gte),
+        .cmp_gt => cg.airCmp(inst, .gt),
+        .cmp_lte => cg.airCmp(inst, .lte),
+        .cmp_lt => cg.airCmp(inst, .lt),
+        .cmp_neq => cg.airCmp(inst, .neq),
+
+        .cmp_vector => cg.airCmpVector(inst),
+        .cmp_lt_errors_len => cg.airCmpLtErrorsLen(inst),
+
+        .array_elem_val => cg.airArrayElemVal(inst),
+        .array_to_slice => cg.airArrayToSlice(inst),
+        .alloc => cg.airAlloc(inst),
+        .arg => cg.airArg(inst),
+        .bitcast => cg.airBitcast(inst),
+        .block => cg.airBlock(inst),
+        .trap => cg.airTrap(inst),
+        .breakpoint => cg.airBreakpoint(inst),
+        .br => cg.airBr(inst),
+        .repeat => cg.airRepeat(inst),
+        .switch_dispatch => return cg.fail("TODO implement `switch_dispatch`", .{}),
+        .int_from_bool => cg.airIntFromBool(inst),
+        .cond_br => cg.airCondBr(inst),
+        .intcast => cg.airIntcast(inst),
+        .fptrunc => cg.airFptrunc(inst),
+        .fpext => cg.airFpext(inst),
+        .int_from_float => cg.airIntFromFloat(inst),
+        .float_from_int => cg.airFloatFromInt(inst),
+        .get_union_tag => cg.airGetUnionTag(inst),
+
+        .@"try" => cg.airTry(inst),
+        .try_cold => cg.airTry(inst),
+        .try_ptr => cg.airTryPtr(inst),
+        .try_ptr_cold => cg.airTryPtr(inst),
+
+        .dbg_stmt => cg.airDbgStmt(inst),
+        .dbg_empty_stmt => try cg.finishAir(inst, .none, &.{}),
+        .dbg_inline_block => cg.airDbgInlineBlock(inst),
+        .dbg_var_ptr => cg.airDbgVar(inst, .local_var, true),
+        .dbg_var_val => cg.airDbgVar(inst, .local_var, false),
+        .dbg_arg_inline => cg.airDbgVar(inst, .local_arg, false),
+
+        .call => cg.airCall(inst, .auto),
+        .call_always_tail => cg.airCall(inst, .always_tail),
+        .call_never_tail => cg.airCall(inst, .never_tail),
+        .call_never_inline => cg.airCall(inst, .never_inline),
+
+        .is_err => cg.airIsErr(inst, .i32_ne),
+        .is_non_err => cg.airIsErr(inst, .i32_eq),
+
+        .is_null => cg.airIsNull(inst, .i32_eq, .value),
+        .is_non_null => cg.airIsNull(inst, .i32_ne, .value),
+        .is_null_ptr => cg.airIsNull(inst, .i32_eq, .ptr),
+        .is_non_null_ptr => cg.airIsNull(inst, .i32_ne, .ptr),
+
+        .load => cg.airLoad(inst),
+        .loop => cg.airLoop(inst),
+        .memset => cg.airMemset(inst, false),
+        .memset_safe => cg.airMemset(inst, true),
+        .not => cg.airNot(inst),
+        .optional_payload => cg.airOptionalPayload(inst),
+        .optional_payload_ptr => cg.airOptionalPayloadPtr(inst),
+        .optional_payload_ptr_set => cg.airOptionalPayloadPtrSet(inst),
+        .ptr_add => cg.airPtrBinOp(inst, .add),
+        .ptr_sub => cg.airPtrBinOp(inst, .sub),
+        .ptr_elem_ptr => cg.airPtrElemPtr(inst),
+        .ptr_elem_val => cg.airPtrElemVal(inst),
+        .int_from_ptr => cg.airIntFromPtr(inst),
+        .ret => cg.airRet(inst),
+        .ret_safe => cg.airRet(inst), // TODO
+        .ret_ptr => cg.airRetPtr(inst),
+        .ret_load => cg.airRetLoad(inst),
+        .splat => cg.airSplat(inst),
+        .select => cg.airSelect(inst),
+        .shuffle => cg.airShuffle(inst),
+        .reduce => cg.airReduce(inst),
+        .aggregate_init => cg.airAggregateInit(inst),
+        .union_init => cg.airUnionInit(inst),
+        .prefetch => cg.airPrefetch(inst),
+        .popcount => cg.airPopcount(inst),
+        .byte_swap => cg.airByteSwap(inst),
+        .bit_reverse => cg.airBitReverse(inst),
+
+        .slice => cg.airSlice(inst),
+        .slice_len => cg.airSliceLen(inst),
+        .slice_elem_val => cg.airSliceElemVal(inst),
+        .slice_elem_ptr => cg.airSliceElemPtr(inst),
+        .slice_ptr => cg.airSlicePtr(inst),
+        .ptr_slice_len_ptr => cg.airPtrSliceFieldPtr(inst, cg.ptrSize()),
+        .ptr_slice_ptr_ptr => cg.airPtrSliceFieldPtr(inst, 0),
+        .store => cg.airStore(inst, false),
+        .store_safe => cg.airStore(inst, true),
+
+        .set_union_tag => cg.airSetUnionTag(inst),
+        .struct_field_ptr => cg.airStructFieldPtr(inst),
+        .struct_field_ptr_index_0 => cg.airStructFieldPtrIndex(inst, 0),
+        .struct_field_ptr_index_1 => cg.airStructFieldPtrIndex(inst, 1),
+        .struct_field_ptr_index_2 => cg.airStructFieldPtrIndex(inst, 2),
+        .struct_field_ptr_index_3 => cg.airStructFieldPtrIndex(inst, 3),
+        .struct_field_val => cg.airStructFieldVal(inst),
+        .field_parent_ptr => cg.airFieldParentPtr(inst),
+
+        .switch_br => cg.airSwitchBr(inst),
+        .loop_switch_br => return cg.fail("TODO implement `loop_switch_br`", .{}),
+        .trunc => cg.airTrunc(inst),
+        .unreach => cg.airUnreachable(inst),
+
+        .wrap_optional => cg.airWrapOptional(inst),
+        .unwrap_errunion_payload => cg.airUnwrapErrUnionPayload(inst, false),
+        .unwrap_errunion_payload_ptr => cg.airUnwrapErrUnionPayload(inst, true),
+        .unwrap_errunion_err => cg.airUnwrapErrUnionError(inst, false),
+        .unwrap_errunion_err_ptr => cg.airUnwrapErrUnionError(inst, true),
+        .wrap_errunion_payload => cg.airWrapErrUnionPayload(inst),
+        .wrap_errunion_err => cg.airWrapErrUnionErr(inst),
+        .errunion_payload_ptr_set => cg.airErrUnionPayloadPtrSet(inst),
+        .error_name => cg.airErrorName(inst),
+
+        .wasm_memory_size => cg.airWasmMemorySize(inst),
+        .wasm_memory_grow => cg.airWasmMemoryGrow(inst),
+
+        .memcpy => cg.airMemcpy(inst),
+
+        .ret_addr => cg.airRetAddr(inst),
+        .tag_name => cg.airTagName(inst),
+
+        .error_set_has_value => cg.airErrorSetHasValue(inst),
+        .frame_addr => cg.airFrameAddress(inst),
 
         .assembly,
         .is_err_ptr,
@@ -2060,18 +2058,18 @@ fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
         .c_va_copy,
         .c_va_end,
         .c_va_start,
-        => |tag| return func.fail("TODO: Implement wasm inst: {s}", .{@tagName(tag)}),
+        => |tag| return cg.fail("TODO: Implement wasm inst: {s}", .{@tagName(tag)}),
 
-        .atomic_load => func.airAtomicLoad(inst),
+        .atomic_load => cg.airAtomicLoad(inst),
         .atomic_store_unordered,
         .atomic_store_monotonic,
         .atomic_store_release,
         .atomic_store_seq_cst,
         // in WebAssembly, all atomic instructions are sequentially ordered.
-        => func.airAtomicStore(inst),
-        .atomic_rmw => func.airAtomicRmw(inst),
-        .cmpxchg_weak => func.airCmpxchg(inst),
-        .cmpxchg_strong => func.airCmpxchg(inst),
+        => cg.airAtomicStore(inst),
+        .atomic_rmw => cg.airAtomicRmw(inst),
+        .cmpxchg_weak => cg.airCmpxchg(inst),
+        .cmpxchg_strong => cg.airCmpxchg(inst),
 
         .add_optimized,
         .sub_optimized,
@@ -2092,12 +2090,12 @@ fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
         .cmp_vector_optimized,
         .reduce_optimized,
         .int_from_float_optimized,
-        => return func.fail("TODO implement optimized float mode", .{}),
+        => return cg.fail("TODO implement optimized float mode", .{}),
 
         .add_safe,
         .sub_safe,
         .mul_safe,
-        => return func.fail("TODO implement safety_checked_instructions", .{}),
+        => return cg.fail("TODO implement safety_checked_instructions", .{}),
 
         .work_item_id,
         .work_group_size,
@@ -2106,124 +2104,124 @@ fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     };
 }
 
-fn genBody(func: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
+fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
     const ip = &zcu.intern_pool;
 
     for (body) |inst| {
-        if (func.liveness.isUnused(inst) and !func.air.mustLower(inst, ip)) {
+        if (cg.liveness.isUnused(inst) and !cg.air.mustLower(inst, ip)) {
             continue;
         }
-        const old_bookkeeping_value = func.air_bookkeeping;
-        try func.currentBranch().values.ensureUnusedCapacity(func.gpa, Liveness.bpi);
-        try func.genInst(inst);
+        const old_bookkeeping_value = cg.air_bookkeeping;
+        try cg.currentBranch().values.ensureUnusedCapacity(cg.gpa, Liveness.bpi);
+        try cg.genInst(inst);
 
-        if (std.debug.runtime_safety and func.air_bookkeeping < old_bookkeeping_value + 1) {
+        if (std.debug.runtime_safety and cg.air_bookkeeping < old_bookkeeping_value + 1) {
             std.debug.panic("Missing call to `finishAir` in AIR instruction %{d} ('{}')", .{
                 inst,
-                func.air.instructions.items(.tag)[@intFromEnum(inst)],
+                cg.air.instructions.items(.tag)[@intFromEnum(inst)],
             });
         }
     }
 }
 
-fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
+fn airRet(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
-    const operand = try func.resolveInst(un_op);
-    const fn_info = zcu.typeToFunc(zcu.navValue(func.owner_nav).typeOf(zcu)).?;
+    const un_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
+    const operand = try cg.resolveInst(un_op);
+    const fn_info = zcu.typeToFunc(zcu.navValue(cg.owner_nav).typeOf(zcu)).?;
     const ret_ty = Type.fromInterned(fn_info.return_type);
 
     // result must be stored in the stack and we return a pointer
     // to the stack instead
-    if (func.return_value != .none) {
-        try func.store(func.return_value, operand, ret_ty, 0);
+    if (cg.return_value != .none) {
+        try cg.store(cg.return_value, operand, ret_ty, 0);
     } else if (fn_info.cc == .wasm_watc and ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
         switch (ret_ty.zigTypeTag(zcu)) {
             // Aggregate types can be lowered as a singular value
             .@"struct", .@"union" => {
                 const scalar_type = abi.scalarType(ret_ty, zcu);
-                try func.emitWValue(operand);
+                try cg.emitWValue(operand);
                 const opcode = buildOpcode(.{
                     .op = .load,
                     .width = @as(u8, @intCast(scalar_type.abiSize(zcu) * 8)),
                     .signedness = if (scalar_type.isSignedInt(zcu)) .signed else .unsigned,
-                    .valtype1 = typeToValtype(scalar_type, pt, func.target),
+                    .valtype1 = typeToValtype(scalar_type, pt, cg.target),
                 });
-                try func.addMemArg(Mir.Inst.Tag.fromOpcode(opcode), .{
+                try cg.addMemArg(Mir.Inst.Tag.fromOpcode(opcode), .{
                     .offset = operand.offset(),
                     .alignment = @intCast(scalar_type.abiAlignment(zcu).toByteUnits().?),
                 });
             },
-            else => try func.emitWValue(operand),
+            else => try cg.emitWValue(operand),
         }
     } else {
         if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu) and ret_ty.isError(zcu)) {
-            try func.addImm32(0);
+            try cg.addImm32(0);
         } else {
-            try func.emitWValue(operand);
+            try cg.emitWValue(operand);
         }
     }
-    try func.restoreStackPointer();
-    try func.addTag(.@"return");
+    try cg.restoreStackPointer();
+    try cg.addTag(.@"return");
 
-    return func.finishAir(inst, .none, &.{un_op});
+    return cg.finishAir(inst, .none, &.{un_op});
 }
 
-fn airRetPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
+fn airRetPtr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const child_type = func.typeOfIndex(inst).childType(zcu);
+    const child_type = cg.typeOfIndex(inst).childType(zcu);
 
     const result = result: {
         if (!child_type.isFnOrHasRuntimeBitsIgnoreComptime(zcu)) {
-            break :result try func.allocStack(Type.usize); // create pointer to void
+            break :result try cg.allocStack(Type.usize); // create pointer to void
         }
 
-        const fn_info = zcu.typeToFunc(zcu.navValue(func.owner_nav).typeOf(zcu)).?;
-        if (firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt, func.target)) {
-            break :result func.return_value;
+        const fn_info = zcu.typeToFunc(zcu.navValue(cg.owner_nav).typeOf(zcu)).?;
+        if (firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt, cg.target)) {
+            break :result cg.return_value;
         }
 
-        break :result try func.allocStackPtr(inst);
+        break :result try cg.allocStackPtr(inst);
     };
 
-    return func.finishAir(inst, result, &.{});
+    return cg.finishAir(inst, result, &.{});
 }
 
-fn airRetLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
+fn airRetLoad(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
-    const operand = try func.resolveInst(un_op);
-    const ret_ty = func.typeOf(un_op).childType(zcu);
+    const un_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
+    const operand = try cg.resolveInst(un_op);
+    const ret_ty = cg.typeOf(un_op).childType(zcu);
 
-    const fn_info = zcu.typeToFunc(zcu.navValue(func.owner_nav).typeOf(zcu)).?;
+    const fn_info = zcu.typeToFunc(zcu.navValue(cg.owner_nav).typeOf(zcu)).?;
     if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
         if (ret_ty.isError(zcu)) {
-            try func.addImm32(0);
+            try cg.addImm32(0);
         }
-    } else if (!firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt, func.target)) {
+    } else if (!firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt, cg.target)) {
         // leave on the stack
-        _ = try func.load(operand, ret_ty, 0);
+        _ = try cg.load(operand, ret_ty, 0);
     }
 
-    try func.restoreStackPointer();
-    try func.addTag(.@"return");
-    return func.finishAir(inst, .none, &.{un_op});
+    try cg.restoreStackPointer();
+    try cg.addTag(.@"return");
+    return cg.finishAir(inst, .none, &.{un_op});
 }
 
-fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) InnerError!void {
-    const wasm = func.wasm;
-    if (modifier == .always_tail) return func.fail("TODO implement tail calls for wasm", .{});
-    const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
-    const extra = func.air.extraData(Air.Call, pl_op.payload);
-    const args: []const Air.Inst.Ref = @ptrCast(func.air.extra[extra.end..][0..extra.data.args_len]);
-    const ty = func.typeOf(pl_op.operand);
+fn airCall(cg: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) InnerError!void {
+    const wasm = cg.wasm;
+    if (modifier == .always_tail) return cg.fail("TODO implement tail calls for wasm", .{});
+    const pl_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
+    const extra = cg.air.extraData(Air.Call, pl_op.payload);
+    const args: []const Air.Inst.Ref = @ptrCast(cg.air.extra[extra.end..][0..extra.data.args_len]);
+    const ty = cg.typeOf(pl_op.operand);
 
-    const pt = func.pt;
+    const pt = cg.pt;
     const zcu = pt.zcu;
     const ip = &zcu.intern_pool;
     const fn_ty = switch (ty.zigTypeTag(zcu)) {
@@ -2233,10 +2231,10 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
     };
     const ret_ty = fn_ty.fnReturnType(zcu);
     const fn_info = zcu.typeToFunc(fn_ty).?;
-    const first_param_sret = firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt, func.target);
+    const first_param_sret = firstParamSRet(fn_info.cc, Type.fromInterned(fn_info.return_type), pt, cg.target);
 
     const callee: ?InternPool.Nav.Index = blk: {
-        const func_val = (try func.air.value(pl_op.operand, pt)) orelse break :blk null;
+        const func_val = (try cg.air.value(pl_op.operand, pt)) orelse break :blk null;
 
         switch (ip.indexToKey(func_val.toIntern())) {
             inline .func, .@"extern" => |x| break :blk x.owner_nav,
@@ -2246,96 +2244,96 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
             },
             else => {},
         }
-        return func.fail("unable to lower callee to a function index", .{});
+        return cg.fail("unable to lower callee to a function index", .{});
     };
 
     const sret: WValue = if (first_param_sret) blk: {
-        const sret_local = try func.allocStack(ret_ty);
-        try func.lowerToStack(sret_local);
+        const sret_local = try cg.allocStack(ret_ty);
+        try cg.lowerToStack(sret_local);
         break :blk sret_local;
     } else .none;
 
     for (args) |arg| {
-        const arg_val = try func.resolveInst(arg);
+        const arg_val = try cg.resolveInst(arg);
 
-        const arg_ty = func.typeOf(arg);
+        const arg_ty = cg.typeOf(arg);
         if (!arg_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
 
-        try func.lowerArg(zcu.typeToFunc(fn_ty).?.cc, arg_ty, arg_val);
+        try cg.lowerArg(zcu.typeToFunc(fn_ty).?.cc, arg_ty, arg_val);
     }
 
     if (callee) |nav_index| {
-        try func.addNav(.call_nav, nav_index);
+        try cg.addNav(.call_nav, nav_index);
     } else {
         // in this case we call a function pointer
         // so load its value onto the stack
         assert(ty.zigTypeTag(zcu) == .pointer);
-        const operand = try func.resolveInst(pl_op.operand);
-        try func.emitWValue(operand);
+        const operand = try cg.resolveInst(pl_op.operand);
+        try cg.emitWValue(operand);
 
-        const fn_type_index = try genFunctype(wasm, fn_info.cc, fn_info.param_types.get(ip), Type.fromInterned(fn_info.return_type), pt, func.target);
-        try func.addLabel(.call_indirect, @intFromEnum(fn_type_index));
+        const fn_type_index = try genFunctype(wasm, fn_info.cc, fn_info.param_types.get(ip), Type.fromInterned(fn_info.return_type), pt, cg.target);
+        try cg.addLabel(.call_indirect, @intFromEnum(fn_type_index));
     }
 
     const result_value = result_value: {
         if (!ret_ty.hasRuntimeBitsIgnoreComptime(zcu) and !ret_ty.isError(zcu)) {
             break :result_value .none;
         } else if (ret_ty.isNoReturn(zcu)) {
-            try func.addTag(.@"unreachable");
+            try cg.addTag(.@"unreachable");
             break :result_value .none;
         } else if (first_param_sret) {
             break :result_value sret;
             // TODO: Make this less fragile and optimize
         } else if (zcu.typeToFunc(fn_ty).?.cc == .wasm_watc and ret_ty.zigTypeTag(zcu) == .@"struct" or ret_ty.zigTypeTag(zcu) == .@"union") {
-            const result_local = try func.allocLocal(ret_ty);
-            try func.addLabel(.local_set, result_local.local.value);
+            const result_local = try cg.allocLocal(ret_ty);
+            try cg.addLabel(.local_set, result_local.local.value);
             const scalar_type = abi.scalarType(ret_ty, zcu);
-            const result = try func.allocStack(scalar_type);
-            try func.store(result, result_local, scalar_type, 0);
+            const result = try cg.allocStack(scalar_type);
+            try cg.store(result, result_local, scalar_type, 0);
             break :result_value result;
         } else {
-            const result_local = try func.allocLocal(ret_ty);
-            try func.addLabel(.local_set, result_local.local.value);
+            const result_local = try cg.allocLocal(ret_ty);
+            try cg.addLabel(.local_set, result_local.local.value);
             break :result_value result_local;
         }
     };
 
-    var bt = try func.iterateBigTomb(inst, 1 + args.len);
+    var bt = try cg.iterateBigTomb(inst, 1 + args.len);
     bt.feed(pl_op.operand);
     for (args) |arg| bt.feed(arg);
     return bt.finishAir(result_value);
 }
 
-fn airAlloc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const value = try func.allocStackPtr(inst);
-    return func.finishAir(inst, value, &.{});
+fn airAlloc(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const value = try cg.allocStackPtr(inst);
+    return cg.finishAir(inst, value, &.{});
 }
 
-fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void {
-    const pt = func.pt;
+fn airStore(cg: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
     if (safety) {
         // TODO if the value is undef, write 0xaa bytes to dest
     } else {
         // TODO if the value is undef, don't lower this instruction
     }
-    const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+    const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
 
-    const lhs = try func.resolveInst(bin_op.lhs);
-    const rhs = try func.resolveInst(bin_op.rhs);
-    const ptr_ty = func.typeOf(bin_op.lhs);
+    const lhs = try cg.resolveInst(bin_op.lhs);
+    const rhs = try cg.resolveInst(bin_op.rhs);
+    const ptr_ty = cg.typeOf(bin_op.lhs);
     const ptr_info = ptr_ty.ptrInfo(zcu);
     const ty = ptr_ty.childType(zcu);
 
     if (ptr_info.packed_offset.host_size == 0) {
-        try func.store(lhs, rhs, ty, 0);
+        try cg.store(lhs, rhs, ty, 0);
     } else {
         // at this point we have a non-natural alignment, we must
         // load the value, and then shift+or the rhs into the result location.
         const int_elem_ty = try pt.intType(.unsigned, ptr_info.packed_offset.host_size * 8);
 
-        if (isByRef(int_elem_ty, pt, func.target)) {
-            return func.fail("TODO: airStore for pointers to bitfields with backing type larger than 64bits", .{});
+        if (isByRef(int_elem_ty, pt, cg.target)) {
+            return cg.fail("TODO: airStore for pointers to bitfields with backing type larger than 64bits", .{});
         }
 
         var mask = @as(u64, @intCast((@as(u65, 1) << @as(u7, @intCast(ty.bitSize(zcu)))) - 1));
@@ -2354,115 +2352,115 @@ fn airStore(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void
         else
             .{ .imm64 = ~@as(u64, 0) >> @intCast(64 - ty.bitSize(zcu)) };
 
-        try func.emitWValue(lhs);
-        const loaded = try func.load(lhs, int_elem_ty, 0);
-        const anded = try func.binOp(loaded, mask_val, int_elem_ty, .@"and");
-        const extended_value = try func.intcast(rhs, ty, int_elem_ty);
-        const masked_value = try func.binOp(extended_value, wrap_mask_val, int_elem_ty, .@"and");
+        try cg.emitWValue(lhs);
+        const loaded = try cg.load(lhs, int_elem_ty, 0);
+        const anded = try cg.binOp(loaded, mask_val, int_elem_ty, .@"and");
+        const extended_value = try cg.intcast(rhs, ty, int_elem_ty);
+        const masked_value = try cg.binOp(extended_value, wrap_mask_val, int_elem_ty, .@"and");
         const shifted_value = if (ptr_info.packed_offset.bit_offset > 0) shifted: {
-            break :shifted try func.binOp(masked_value, shift_val, int_elem_ty, .shl);
+            break :shifted try cg.binOp(masked_value, shift_val, int_elem_ty, .shl);
         } else masked_value;
-        const result = try func.binOp(anded, shifted_value, int_elem_ty, .@"or");
+        const result = try cg.binOp(anded, shifted_value, int_elem_ty, .@"or");
         // lhs is still on the stack
-        try func.store(.stack, result, int_elem_ty, lhs.offset());
+        try cg.store(.stack, result, int_elem_ty, lhs.offset());
     }
 
-    return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
+    return cg.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
 }
 
-fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerError!void {
+fn store(cg: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerError!void {
     assert(!(lhs != .stack and rhs == .stack));
-    const pt = func.pt;
+    const pt = cg.pt;
     const zcu = pt.zcu;
     const abi_size = ty.abiSize(zcu);
     switch (ty.zigTypeTag(zcu)) {
         .error_union => {
             const pl_ty = ty.errorUnionPayload(zcu);
             if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
-                return func.store(lhs, rhs, Type.anyerror, 0);
+                return cg.store(lhs, rhs, Type.anyerror, 0);
             }
 
             const len = @as(u32, @intCast(abi_size));
-            return func.memcpy(lhs, rhs, .{ .imm32 = len });
+            return cg.memcpy(lhs, rhs, .{ .imm32 = len });
         },
         .optional => {
             if (ty.isPtrLikeOptional(zcu)) {
-                return func.store(lhs, rhs, Type.usize, 0);
+                return cg.store(lhs, rhs, Type.usize, 0);
             }
             const pl_ty = ty.optionalChild(zcu);
             if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
-                return func.store(lhs, rhs, Type.u8, 0);
+                return cg.store(lhs, rhs, Type.u8, 0);
             }
             if (pl_ty.zigTypeTag(zcu) == .error_set) {
-                return func.store(lhs, rhs, Type.anyerror, 0);
+                return cg.store(lhs, rhs, Type.anyerror, 0);
             }
 
             const len = @as(u32, @intCast(abi_size));
-            return func.memcpy(lhs, rhs, .{ .imm32 = len });
+            return cg.memcpy(lhs, rhs, .{ .imm32 = len });
         },
-        .@"struct", .array, .@"union" => if (isByRef(ty, pt, func.target)) {
+        .@"struct", .array, .@"union" => if (isByRef(ty, pt, cg.target)) {
             const len = @as(u32, @intCast(abi_size));
-            return func.memcpy(lhs, rhs, .{ .imm32 = len });
+            return cg.memcpy(lhs, rhs, .{ .imm32 = len });
         },
-        .vector => switch (determineSimdStoreStrategy(ty, zcu, func.target)) {
+        .vector => switch (determineSimdStoreStrategy(ty, zcu, cg.target)) {
             .unrolled => {
                 const len: u32 = @intCast(abi_size);
-                return func.memcpy(lhs, rhs, .{ .imm32 = len });
+                return cg.memcpy(lhs, rhs, .{ .imm32 = len });
             },
             .direct => {
-                try func.emitWValue(lhs);
-                try func.lowerToStack(rhs);
+                try cg.emitWValue(lhs);
+                try cg.lowerToStack(rhs);
                 // TODO: Add helper functions for simd opcodes
-                const extra_index: u32 = @intCast(func.mir_extra.items.len);
+                const extra_index: u32 = @intCast(cg.mir_extra.items.len);
                 // stores as := opcode, offset, alignment (opcode::memarg)
-                try func.mir_extra.appendSlice(func.gpa, &[_]u32{
+                try cg.mir_extra.appendSlice(cg.gpa, &[_]u32{
                     @intFromEnum(std.wasm.SimdOpcode.v128_store),
                     offset + lhs.offset(),
                     @intCast(ty.abiAlignment(zcu).toByteUnits() orelse 0),
                 });
-                return func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
+                return cg.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
             },
         },
         .pointer => {
             if (ty.isSlice(zcu)) {
                 // store pointer first
                 // lower it to the stack so we do not have to store rhs into a local first
-                try func.emitWValue(lhs);
-                const ptr_local = try func.load(rhs, Type.usize, 0);
-                try func.store(.stack, ptr_local, Type.usize, 0 + lhs.offset());
+                try cg.emitWValue(lhs);
+                const ptr_local = try cg.load(rhs, Type.usize, 0);
+                try cg.store(.stack, ptr_local, Type.usize, 0 + lhs.offset());
 
                 // retrieve length from rhs, and store that alongside lhs as well
-                try func.emitWValue(lhs);
-                const len_local = try func.load(rhs, Type.usize, func.ptrSize());
-                try func.store(.stack, len_local, Type.usize, func.ptrSize() + lhs.offset());
+                try cg.emitWValue(lhs);
+                const len_local = try cg.load(rhs, Type.usize, cg.ptrSize());
+                try cg.store(.stack, len_local, Type.usize, cg.ptrSize() + lhs.offset());
                 return;
             }
         },
         .int, .@"enum", .float => if (abi_size > 8 and abi_size <= 16) {
-            try func.emitWValue(lhs);
-            const lsb = try func.load(rhs, Type.u64, 0);
-            try func.store(.stack, lsb, Type.u64, 0 + lhs.offset());
+            try cg.emitWValue(lhs);
+            const lsb = try cg.load(rhs, Type.u64, 0);
+            try cg.store(.stack, lsb, Type.u64, 0 + lhs.offset());
 
-            try func.emitWValue(lhs);
-            const msb = try func.load(rhs, Type.u64, 8);
-            try func.store(.stack, msb, Type.u64, 8 + lhs.offset());
+            try cg.emitWValue(lhs);
+            const msb = try cg.load(rhs, Type.u64, 8);
+            try cg.store(.stack, msb, Type.u64, 8 + lhs.offset());
             return;
         } else if (abi_size > 16) {
-            try func.memcpy(lhs, rhs, .{ .imm32 = @as(u32, @intCast(ty.abiSize(zcu))) });
+            try cg.memcpy(lhs, rhs, .{ .imm32 = @as(u32, @intCast(ty.abiSize(zcu))) });
         },
         else => if (abi_size > 8) {
-            return func.fail("TODO: `store` for type `{}` with abisize `{d}`", .{
+            return cg.fail("TODO: `store` for type `{}` with abisize `{d}`", .{
                 ty.fmt(pt),
                 abi_size,
             });
         },
     }
-    try func.emitWValue(lhs);
+    try cg.emitWValue(lhs);
     // In this case we're actually interested in storing the stack position
     // into lhs, so we calculate that and emit that instead
-    try func.lowerToStack(rhs);
+    try cg.lowerToStack(rhs);
 
-    const valtype = typeToValtype(ty, pt, func.target);
+    const valtype = typeToValtype(ty, pt, cg.target);
     const opcode = buildOpcode(.{
         .valtype1 = valtype,
         .width = @as(u8, @intCast(abi_size * 8)),
@@ -2470,7 +2468,7 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
     });
 
     // store rhs value at stack pointer's location in memory
-    try func.addMemArg(
+    try cg.addMemArg(
         Mir.Inst.Tag.fromOpcode(opcode),
         .{
             .offset = offset + lhs.offset(),
@@ -2479,26 +2477,26 @@ fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerE
     );
 }
 
-fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
+fn airLoad(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
-    const operand = try func.resolveInst(ty_op.operand);
+    const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+    const operand = try cg.resolveInst(ty_op.operand);
     const ty = ty_op.ty.toType();
-    const ptr_ty = func.typeOf(ty_op.operand);
+    const ptr_ty = cg.typeOf(ty_op.operand);
     const ptr_info = ptr_ty.ptrInfo(zcu);
 
-    if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) return func.finishAir(inst, .none, &.{ty_op.operand});
+    if (!ty.hasRuntimeBitsIgnoreComptime(zcu)) return cg.finishAir(inst, .none, &.{ty_op.operand});
 
     const result = result: {
-        if (isByRef(ty, pt, func.target)) {
-            const new_local = try func.allocStack(ty);
-            try func.store(new_local, operand, ty, 0);
+        if (isByRef(ty, pt, cg.target)) {
+            const new_local = try cg.allocStack(ty);
+            try cg.store(new_local, operand, ty, 0);
             break :result new_local;
         }
 
         if (ptr_info.packed_offset.host_size == 0) {
-            break :result try func.load(operand, ty, 0);
+            break :result try cg.load(operand, ty, 0);
         }
 
         // at this point we have a non-natural alignment, we must
@@ -2509,45 +2507,45 @@ fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
         else if (ptr_info.packed_offset.host_size <= 8)
             .{ .imm64 = ptr_info.packed_offset.bit_offset }
         else
-            return func.fail("TODO: airLoad where ptr to bitfield exceeds 64 bits", .{});
+            return cg.fail("TODO: airLoad where ptr to bitfield exceeds 64 bits", .{});
 
-        const stack_loaded = try func.load(operand, int_elem_ty, 0);
-        const shifted = try func.binOp(stack_loaded, shift_val, int_elem_ty, .shr);
-        break :result try func.trunc(shifted, ty, int_elem_ty);
+        const stack_loaded = try cg.load(operand, int_elem_ty, 0);
+        const shifted = try cg.binOp(stack_loaded, shift_val, int_elem_ty, .shr);
+        break :result try cg.trunc(shifted, ty, int_elem_ty);
     };
-    return func.finishAir(inst, result, &.{ty_op.operand});
+    return cg.finishAir(inst, result, &.{ty_op.operand});
 }
 
 /// Loads an operand from the linear memory section.
 /// NOTE: Leaves the value on the stack.
-fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValue {
-    const pt = func.pt;
+fn load(cg: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValue {
+    const pt = cg.pt;
     const zcu = pt.zcu;
     // load local's value from memory by its stack position
-    try func.emitWValue(operand);
+    try cg.emitWValue(operand);
 
     if (ty.zigTypeTag(zcu) == .vector) {
         // TODO: Add helper functions for simd opcodes
-        const extra_index = @as(u32, @intCast(func.mir_extra.items.len));
+        const extra_index = @as(u32, @intCast(cg.mir_extra.items.len));
         // stores as := opcode, offset, alignment (opcode::memarg)
-        try func.mir_extra.appendSlice(func.gpa, &[_]u32{
+        try cg.mir_extra.appendSlice(cg.gpa, &[_]u32{
             @intFromEnum(std.wasm.SimdOpcode.v128_load),
             offset + operand.offset(),
             @intCast(ty.abiAlignment(zcu).toByteUnits().?),
         });
-        try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
+        try cg.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
         return .stack;
     }
 
     const abi_size: u8 = @intCast(ty.abiSize(zcu));
     const opcode = buildOpcode(.{
-        .valtype1 = typeToValtype(ty, pt, func.target),
+        .valtype1 = typeToValtype(ty, pt, cg.target),
         .width = abi_size * 8,
         .op = .load,
         .signedness = if (ty.isSignedInt(zcu)) .signed else .unsigned,
     });
 
-    try func.addMemArg(
+    try cg.addMemArg(
         Mir.Inst.Tag.fromOpcode(opcode),
         .{
             .offset = offset + operand.offset(),
@@ -2558,18 +2556,18 @@ fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValu
     return .stack;
 }
 
-fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
+fn airArg(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const arg_index = func.arg_index;
-    const arg = func.args[arg_index];
-    const cc = zcu.typeToFunc(zcu.navValue(func.owner_nav).typeOf(zcu)).?.cc;
-    const arg_ty = func.typeOfIndex(inst);
+    const arg_index = cg.arg_index;
+    const arg = cg.args[arg_index];
+    const cc = zcu.typeToFunc(zcu.navValue(cg.owner_nav).typeOf(zcu)).?.cc;
+    const arg_ty = cg.typeOfIndex(inst);
     if (cc == .wasm_watc) {
         const arg_classes = abi.classifyType(arg_ty, zcu);
         for (arg_classes) |class| {
             if (class != .none) {
-                func.arg_index += 1;
+                cg.arg_index += 1;
             }
         }
 
@@ -2577,31 +2575,31 @@ fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
         // we combine them into a single stack value
         if (arg_classes[0] == .direct and arg_classes[1] == .direct) {
             if (arg_ty.zigTypeTag(zcu) != .int and arg_ty.zigTypeTag(zcu) != .float) {
-                return func.fail(
+                return cg.fail(
                     "TODO: Implement C-ABI argument for type '{}'",
                     .{arg_ty.fmt(pt)},
                 );
             }
-            const result = try func.allocStack(arg_ty);
-            try func.store(result, arg, Type.u64, 0);
-            try func.store(result, func.args[arg_index + 1], Type.u64, 8);
-            return func.finishAir(inst, result, &.{});
+            const result = try cg.allocStack(arg_ty);
+            try cg.store(result, arg, Type.u64, 0);
+            try cg.store(result, cg.args[arg_index + 1], Type.u64, 8);
+            return cg.finishAir(inst, result, &.{});
         }
     } else {
-        func.arg_index += 1;
+        cg.arg_index += 1;
     }
 
-    return func.finishAir(inst, arg, &.{});
+    return cg.finishAir(inst, arg, &.{});
 }
 
-fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
-    const pt = func.pt;
+fn airBinOp(cg: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
-    const lhs = try func.resolveInst(bin_op.lhs);
-    const rhs = try func.resolveInst(bin_op.rhs);
-    const lhs_ty = func.typeOf(bin_op.lhs);
-    const rhs_ty = func.typeOf(bin_op.rhs);
+    const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+    const lhs = try cg.resolveInst(bin_op.lhs);
+    const rhs = try cg.resolveInst(bin_op.rhs);
+    const lhs_ty = cg.typeOf(bin_op.lhs);
+    const rhs_ty = cg.typeOf(bin_op.rhs);
 
     // For certain operations, such as shifting, the types are different.
     // When converting this to a WebAssembly type, they *must* match to perform
@@ -2611,38 +2609,38 @@ fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
     const result = switch (op) {
         .shr, .shl => result: {
             const lhs_wasm_bits = toWasmBits(@intCast(lhs_ty.bitSize(zcu))) orelse {
-                return func.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)});
+                return cg.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)});
             };
             const rhs_wasm_bits = toWasmBits(@intCast(rhs_ty.bitSize(zcu))).?;
             const new_rhs = if (lhs_wasm_bits != rhs_wasm_bits and lhs_wasm_bits != 128)
-                try (try func.intcast(rhs, rhs_ty, lhs_ty)).toLocal(func, lhs_ty)
+                try (try cg.intcast(rhs, rhs_ty, lhs_ty)).toLocal(cg, lhs_ty)
             else
                 rhs;
-            break :result try func.binOp(lhs, new_rhs, lhs_ty, op);
+            break :result try cg.binOp(lhs, new_rhs, lhs_ty, op);
         },
-        else => try func.binOp(lhs, rhs, lhs_ty, op),
+        else => try cg.binOp(lhs, rhs, lhs_ty, op),
     };
 
-    return func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
+    return cg.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
 }
 
 /// Performs a binary operation on the given `WValue`'s
 /// NOTE: THis leaves the value on top of the stack.
-fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue {
-    const pt = func.pt;
+fn binOp(cg: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue {
+    const pt = cg.pt;
     const zcu = pt.zcu;
     assert(!(lhs != .stack and rhs == .stack));
 
     if (ty.isAnyFloat()) {
         const float_op = FloatOp.fromOp(op);
-        return func.floatOp(float_op, ty, &.{ lhs, rhs });
+        return cg.floatOp(float_op, ty, &.{ lhs, rhs });
     }
 
-    if (isByRef(ty, pt, func.target)) {
+    if (isByRef(ty, pt, cg.target)) {
         if (ty.zigTypeTag(zcu) == .int) {
-            return func.binOpBigInt(lhs, rhs, ty, op);
+            return cg.binOpBigInt(lhs, rhs, ty, op);
         } else {
-            return func.fail(
+            return cg.fail(
                 "TODO: Implement binary operation for type: {}",
                 .{ty.fmt(pt)},
             );
@@ -2651,82 +2649,82 @@ fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!
 
     const opcode: std.wasm.Opcode = buildOpcode(.{
         .op = op,
-        .valtype1 = typeToValtype(ty, pt, func.target),
+        .valtype1 = typeToValtype(ty, pt, cg.target),
         .signedness = if (ty.isSignedInt(zcu)) .signed else .unsigned,
     });
-    try func.emitWValue(lhs);
-    try func.emitWValue(rhs);
+    try cg.emitWValue(lhs);
+    try cg.emitWValue(rhs);
 
-    try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
+    try cg.addTag(Mir.Inst.Tag.fromOpcode(opcode));
 
     return .stack;
 }
 
-fn binOpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue {
-    const pt = func.pt;
+fn binOpBigInt(cg: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue {
+    const pt = cg.pt;
     const zcu = pt.zcu;
     const int_info = ty.intInfo(zcu);
     if (int_info.bits > 128) {
-        return func.fail("TODO: Implement binary operation for big integers larger than 128 bits", .{});
+        return cg.fail("TODO: Implement binary operation for big integers larger than 128 bits", .{});
     }
 
     switch (op) {
-        .mul => return func.callIntrinsic("__multi3", &.{ ty.toIntern(), ty.toIntern() }, ty, &.{ lhs, rhs }),
+        .mul => return cg.callIntrinsic("__multi3", &.{ ty.toIntern(), ty.toIntern() }, ty, &.{ lhs, rhs }),
         .div => switch (int_info.signedness) {
-            .signed => return func.callIntrinsic("__divti3", &.{ ty.toIntern(), ty.toIntern() }, ty, &.{ lhs, rhs }),
-            .unsigned => return func.callIntrinsic("__udivti3", &.{ ty.toIntern(), ty.toIntern() }, ty, &.{ lhs, rhs }),
+            .signed => return cg.callIntrinsic("__divti3", &.{ ty.toIntern(), ty.toIntern() }, ty, &.{ lhs, rhs }),
+            .unsigned => return cg.callIntrinsic("__udivti3", &.{ ty.toIntern(), ty.toIntern() }, ty, &.{ lhs, rhs }),
         },
         .rem => switch (int_info.signedness) {
-            .signed => return func.callIntrinsic("__modti3", &.{ ty.toIntern(), ty.toIntern() }, ty, &.{ lhs, rhs }),
-            .unsigned => return func.callIntrinsic("__umodti3", &.{ ty.toIntern(), ty.toIntern() }, ty, &.{ lhs, rhs }),
+            .signed => return cg.callIntrinsic("__modti3", &.{ ty.toIntern(), ty.toIntern() }, ty, &.{ lhs, rhs }),
+            .unsigned => return cg.callIntrinsic("__umodti3", &.{ ty.toIntern(), ty.toIntern() }, ty, &.{ lhs, rhs }),
         },
         .shr => switch (int_info.signedness) {
-            .signed => return func.callIntrinsic("__ashrti3", &.{ ty.toIntern(), .i32_type }, ty, &.{ lhs, rhs }),
-            .unsigned => return func.callIntrinsic("__lshrti3", &.{ ty.toIntern(), .i32_type }, ty, &.{ lhs, rhs }),
+            .signed => return cg.callIntrinsic("__ashrti3", &.{ ty.toIntern(), .i32_type }, ty, &.{ lhs, rhs }),
+            .unsigned => return cg.callIntrinsic("__lshrti3", &.{ ty.toIntern(), .i32_type }, ty, &.{ lhs, rhs }),
         },
-        .shl => return func.callIntrinsic("__ashlti3", &.{ ty.toIntern(), .i32_type }, ty, &.{ lhs, rhs }),
+        .shl => return cg.callIntrinsic("__ashlti3", &.{ ty.toIntern(), .i32_type }, ty, &.{ lhs, rhs }),
         .@"and", .@"or", .xor => {
-            const result = try func.allocStack(ty);
-            try func.emitWValue(result);
-            const lhs_lsb = try func.load(lhs, Type.u64, 0);
-            const rhs_lsb = try func.load(rhs, Type.u64, 0);
-            const op_lsb = try func.binOp(lhs_lsb, rhs_lsb, Type.u64, op);
-            try func.store(.stack, op_lsb, Type.u64, result.offset());
-
-            try func.emitWValue(result);
-            const lhs_msb = try func.load(lhs, Type.u64, 8);
-            const rhs_msb = try func.load(rhs, Type.u64, 8);
-            const op_msb = try func.binOp(lhs_msb, rhs_msb, Type.u64, op);
-            try func.store(.stack, op_msb, Type.u64, result.offset() + 8);
+            const result = try cg.allocStack(ty);
+            try cg.emitWValue(result);
+            const lhs_lsb = try cg.load(lhs, Type.u64, 0);
+            const rhs_lsb = try cg.load(rhs, Type.u64, 0);
+            const op_lsb = try cg.binOp(lhs_lsb, rhs_lsb, Type.u64, op);
+            try cg.store(.stack, op_lsb, Type.u64, result.offset());
+
+            try cg.emitWValue(result);
+            const lhs_msb = try cg.load(lhs, Type.u64, 8);
+            const rhs_msb = try cg.load(rhs, Type.u64, 8);
+            const op_msb = try cg.binOp(lhs_msb, rhs_msb, Type.u64, op);
+            try cg.store(.stack, op_msb, Type.u64, result.offset() + 8);
             return result;
         },
         .add, .sub => {
-            const result = try func.allocStack(ty);
-            var lhs_lsb = try (try func.load(lhs, Type.u64, 0)).toLocal(func, Type.u64);
-            defer lhs_lsb.free(func);
-            var rhs_lsb = try (try func.load(rhs, Type.u64, 0)).toLocal(func, Type.u64);
-            defer rhs_lsb.free(func);
-            var op_lsb = try (try func.binOp(lhs_lsb, rhs_lsb, Type.u64, op)).toLocal(func, Type.u64);
-            defer op_lsb.free(func);
-
-            const lhs_msb = try func.load(lhs, Type.u64, 8);
-            const rhs_msb = try func.load(rhs, Type.u64, 8);
-            const op_msb = try func.binOp(lhs_msb, rhs_msb, Type.u64, op);
+            const result = try cg.allocStack(ty);
+            var lhs_lsb = try (try cg.load(lhs, Type.u64, 0)).toLocal(cg, Type.u64);
+            defer lhs_lsb.free(cg);
+            var rhs_lsb = try (try cg.load(rhs, Type.u64, 0)).toLocal(cg, Type.u64);
+            defer rhs_lsb.free(cg);
+            var op_lsb = try (try cg.binOp(lhs_lsb, rhs_lsb, Type.u64, op)).toLocal(cg, Type.u64);
+            defer op_lsb.free(cg);
+
+            const lhs_msb = try cg.load(lhs, Type.u64, 8);
+            const rhs_msb = try cg.load(rhs, Type.u64, 8);
+            const op_msb = try cg.binOp(lhs_msb, rhs_msb, Type.u64, op);
 
             const lt = if (op == .add) blk: {
-                break :blk try func.cmp(op_lsb, rhs_lsb, Type.u64, .lt);
+                break :blk try cg.cmp(op_lsb, rhs_lsb, Type.u64, .lt);
             } else if (op == .sub) blk: {
-                break :blk try func.cmp(lhs_lsb, rhs_lsb, Type.u64, .lt);
+                break :blk try cg.cmp(lhs_lsb, rhs_lsb, Type.u64, .lt);
             } else unreachable;
-            const tmp = try func.intcast(lt, Type.u32, Type.u64);
-            var tmp_op = try (try func.binOp(op_msb, tmp, Type.u64, op)).toLocal(func, Type.u64);
-            defer tmp_op.free(func);
+            const tmp = try cg.intcast(lt, Type.u32, Type.u64);
+            var tmp_op = try (try cg.binOp(op_msb, tmp, Type.u64, op)).toLocal(cg, Type.u64);
+            defer tmp_op.free(cg);
 
-            try func.store(result, op_lsb, Type.u64, 0);
-            try func.store(result, tmp_op, Type.u64, 8);
+            try cg.store(result, op_lsb, Type.u64, 0);
+            try cg.store(result, tmp_op, Type.u64, 8);
             return result;
         },
-        else => return func.fail("TODO: Implement binary operation for big integers: '{s}'", .{@tagName(op)}),
+        else => return cg.fail("TODO: Implement binary operation for big integers: '{s}'", .{@tagName(op)}),
     }
 }
 
@@ -2806,117 +2804,117 @@ const FloatOp = enum {
     }
 };
 
-fn airAbs(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
+fn airAbs(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
-    const operand = try func.resolveInst(ty_op.operand);
-    const ty = func.typeOf(ty_op.operand);
+    const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+    const operand = try cg.resolveInst(ty_op.operand);
+    const ty = cg.typeOf(ty_op.operand);
     const scalar_ty = ty.scalarType(zcu);
 
     switch (scalar_ty.zigTypeTag(zcu)) {
         .int => if (ty.zigTypeTag(zcu) == .vector) {
-            return func.fail("TODO implement airAbs for {}", .{ty.fmt(pt)});
+            return cg.fail("TODO implement airAbs for {}", .{ty.fmt(pt)});
         } else {
             const int_bits = ty.intInfo(zcu).bits;
             const wasm_bits = toWasmBits(int_bits) orelse {
-                return func.fail("TODO: airAbs for signed integers larger than '{d}' bits", .{int_bits});
+                return cg.fail("TODO: airAbs for signed integers larger than '{d}' bits", .{int_bits});
             };
 
             switch (wasm_bits) {
                 32 => {
-                    try func.emitWValue(operand);
+                    try cg.emitWValue(operand);
 
-                    try func.addImm32(31);
-                    try func.addTag(.i32_shr_s);
+                    try cg.addImm32(31);
+                    try cg.addTag(.i32_shr_s);
 
-                    var tmp = try func.allocLocal(ty);
-                    defer tmp.free(func);
-                    try func.addLabel(.local_tee, tmp.local.value);
+                    var tmp = try cg.allocLocal(ty);
+                    defer tmp.free(cg);
+                    try cg.addLabel(.local_tee, tmp.local.value);
 
-                    try func.emitWValue(operand);
-                    try func.addTag(.i32_xor);
-                    try func.emitWValue(tmp);
-                    try func.addTag(.i32_sub);
-                    return func.finishAir(inst, .stack, &.{ty_op.operand});
+                    try cg.emitWValue(operand);
+                    try cg.addTag(.i32_xor);
+                    try cg.emitWValue(tmp);
+                    try cg.addTag(.i32_sub);
+                    return cg.finishAir(inst, .stack, &.{ty_op.operand});
                 },
                 64 => {
-                    try func.emitWValue(operand);
+                    try cg.emitWValue(operand);
 
-                    try func.addImm64(63);
-                    try func.addTag(.i64_shr_s);
+                    try cg.addImm64(63);
+                    try cg.addTag(.i64_shr_s);
 
-                    var tmp = try func.allocLocal(ty);
-                    defer tmp.free(func);
-                    try func.addLabel(.local_tee, tmp.local.value);
+                    var tmp = try cg.allocLocal(ty);
+                    defer tmp.free(cg);
+                    try cg.addLabel(.local_tee, tmp.local.value);
 
-                    try func.emitWValue(operand);
-                    try func.addTag(.i64_xor);
-                    try func.emitWValue(tmp);
-                    try func.addTag(.i64_sub);
-                    return func.finishAir(inst, .stack, &.{ty_op.operand});
+                    try cg.emitWValue(operand);
+                    try cg.addTag(.i64_xor);
+                    try cg.emitWValue(tmp);
+                    try cg.addTag(.i64_sub);
+                    return cg.finishAir(inst, .stack, &.{ty_op.operand});
                 },
                 128 => {
-                    const mask = try func.allocStack(Type.u128);
-                    try func.emitWValue(mask);
-                    try func.emitWValue(mask);
+                    const mask = try cg.allocStack(Type.u128);
+                    try cg.emitWValue(mask);
+                    try cg.emitWValue(mask);
 
-                    _ = try func.load(operand, Type.u64, 8);
-                    try func.addImm64(63);
-                    try func.addTag(.i64_shr_s);
+                    _ = try cg.load(operand, Type.u64, 8);
+                    try cg.addImm64(63);
+                    try cg.addTag(.i64_shr_s);
 
-                    var tmp = try func.allocLocal(Type.u64);
-                    defer tmp.free(func);
-                    try func.addLabel(.local_tee, tmp.local.value);
-                    try func.store(.stack, .stack, Type.u64, mask.offset() + 0);
-                    try func.emitWValue(tmp);
-                    try func.store(.stack, .stack, Type.u64, mask.offset() + 8);
+                    var tmp = try cg.allocLocal(Type.u64);
+                    defer tmp.free(cg);
+                    try cg.addLabel(.local_tee, tmp.local.value);
+                    try cg.store(.stack, .stack, Type.u64, mask.offset() + 0);
+                    try cg.emitWValue(tmp);
+                    try cg.store(.stack, .stack, Type.u64, mask.offset() + 8);
 
-                    const a = try func.binOpBigInt(operand, mask, Type.u128, .xor);
-                    const b = try func.binOpBigInt(a, mask, Type.u128, .sub);
+                    const a = try cg.binOpBigInt(operand, mask, Type.u128, .xor);
+                    const b = try cg.binOpBigInt(a, mask, Type.u128, .sub);
 
-                    return func.finishAir(inst, b, &.{ty_op.operand});
+                    return cg.finishAir(inst, b, &.{ty_op.operand});
                 },
                 else => unreachable,
             }
         },
         .float => {
-            const result = try func.floatOp(.fabs, ty, &.{operand});
-            return func.finishAir(inst, result, &.{ty_op.operand});
+            const result = try cg.floatOp(.fabs, ty, &.{operand});
+            return cg.finishAir(inst, result, &.{ty_op.operand});
         },
         else => unreachable,
     }
 }
 
-fn airUnaryFloatOp(func: *CodeGen, inst: Air.Inst.Index, op: FloatOp) InnerError!void {
-    const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
-    const operand = try func.resolveInst(un_op);
-    const ty = func.typeOf(un_op);
+fn airUnaryFloatOp(cg: *CodeGen, inst: Air.Inst.Index, op: FloatOp) InnerError!void {
+    const un_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
+    const operand = try cg.resolveInst(un_op);
+    const ty = cg.typeOf(un_op);
 
-    const result = try func.floatOp(op, ty, &.{operand});
-    return func.finishAir(inst, result, &.{un_op});
+    const result = try cg.floatOp(op, ty, &.{operand});
+    return cg.finishAir(inst, result, &.{un_op});
 }
 
-fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) InnerError!WValue {
-    const pt = func.pt;
+fn floatOp(cg: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) InnerError!WValue {
+    const pt = cg.pt;
     const zcu = pt.zcu;
     if (ty.zigTypeTag(zcu) == .vector) {
-        return func.fail("TODO: Implement floatOps for vectors", .{});
+        return cg.fail("TODO: Implement floatOps for vectors", .{});
     }
 
-    const float_bits = ty.floatBits(func.target.*);
+    const float_bits = ty.floatBits(cg.target.*);
 
     if (float_op == .neg) {
-        return func.floatNeg(ty, args[0]);
+        return cg.floatNeg(ty, args[0]);
     }
 
     if (float_bits == 32 or float_bits == 64) {
         if (float_op.toOp()) |op| {
             for (args) |operand| {
-                try func.emitWValue(operand);
+                try cg.emitWValue(operand);
             }
-            const opcode = buildOpcode(.{ .op = op, .valtype1 = typeToValtype(ty, pt, func.target) });
-            try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
+            const opcode = buildOpcode(.{ .op = op, .valtype1 = typeToValtype(ty, pt, cg.target) });
+            try cg.addTag(Mir.Inst.Tag.fromOpcode(opcode));
             return .stack;
         }
     }
@@ -2958,45 +2956,45 @@ fn floatOp(func: *CodeGen, float_op: FloatOp, ty: Type, args: []const WValue) In
     // fma requires three operands
     var param_types_buffer: [3]InternPool.Index = .{ ty.ip_index, ty.ip_index, ty.ip_index };
     const param_types = param_types_buffer[0..args.len];
-    return func.callIntrinsic(fn_name, param_types, ty, args);
+    return cg.callIntrinsic(fn_name, param_types, ty, args);
 }
 
 /// NOTE: The result value remains on top of the stack.
-fn floatNeg(func: *CodeGen, ty: Type, arg: WValue) InnerError!WValue {
-    const float_bits = ty.floatBits(func.target.*);
+fn floatNeg(cg: *CodeGen, ty: Type, arg: WValue) InnerError!WValue {
+    const float_bits = ty.floatBits(cg.target.*);
     switch (float_bits) {
         16 => {
-            try func.emitWValue(arg);
-            try func.addImm32(0x8000);
-            try func.addTag(.i32_xor);
+            try cg.emitWValue(arg);
+            try cg.addImm32(0x8000);
+            try cg.addTag(.i32_xor);
             return .stack;
         },
         32, 64 => {
-            try func.emitWValue(arg);
+            try cg.emitWValue(arg);
             const val_type: std.wasm.Valtype = if (float_bits == 32) .f32 else .f64;
             const opcode = buildOpcode(.{ .op = .neg, .valtype1 = val_type });
-            try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
+            try cg.addTag(Mir.Inst.Tag.fromOpcode(opcode));
             return .stack;
         },
         80, 128 => {
-            const result = try func.allocStack(ty);
-            try func.emitWValue(result);
-            try func.emitWValue(arg);
-            try func.addMemArg(.i64_load, .{ .offset = 0 + arg.offset(), .alignment = 2 });
-            try func.addMemArg(.i64_store, .{ .offset = 0 + result.offset(), .alignment = 2 });
+            const result = try cg.allocStack(ty);
+            try cg.emitWValue(result);
+            try cg.emitWValue(arg);
+            try cg.addMemArg(.i64_load, .{ .offset = 0 + arg.offset(), .alignment = 2 });
+            try cg.addMemArg(.i64_store, .{ .offset = 0 + result.offset(), .alignment = 2 });
 
-            try func.emitWValue(result);
-            try func.emitWValue(arg);
-            try func.addMemArg(.i64_load, .{ .offset = 8 + arg.offset(), .alignment = 2 });
+            try cg.emitWValue(result);
+            try cg.emitWValue(arg);
+            try cg.addMemArg(.i64_load, .{ .offset = 8 + arg.offset(), .alignment = 2 });
 
             if (float_bits == 80) {
-                try func.addImm64(0x8000);
-                try func.addTag(.i64_xor);
-                try func.addMemArg(.i64_store16, .{ .offset = 8 + result.offset(), .alignment = 2 });
+                try cg.addImm64(0x8000);
+                try cg.addTag(.i64_xor);
+                try cg.addMemArg(.i64_store16, .{ .offset = 8 + result.offset(), .alignment = 2 });
             } else {
-                try func.addImm64(0x8000000000000000);
-                try func.addTag(.i64_xor);
-                try func.addMemArg(.i64_store, .{ .offset = 8 + result.offset(), .alignment = 2 });
+                try cg.addImm64(0x8000000000000000);
+                try cg.addTag(.i64_xor);
+                try cg.addMemArg(.i64_store, .{ .offset = 8 + result.offset(), .alignment = 2 });
             }
             return result;
         },
@@ -3004,18 +3002,18 @@ fn floatNeg(func: *CodeGen, ty: Type, arg: WValue) InnerError!WValue {
     }
 }
 
-fn airWrapBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
-    const pt = func.pt;
+fn airWrapBinOp(cg: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+    const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
 
-    const lhs = try func.resolveInst(bin_op.lhs);
-    const rhs = try func.resolveInst(bin_op.rhs);
-    const lhs_ty = func.typeOf(bin_op.lhs);
-    const rhs_ty = func.typeOf(bin_op.rhs);
+    const lhs = try cg.resolveInst(bin_op.lhs);
+    const rhs = try cg.resolveInst(bin_op.rhs);
+    const lhs_ty = cg.typeOf(bin_op.lhs);
+    const rhs_ty = cg.typeOf(bin_op.rhs);
 
     if (lhs_ty.zigTypeTag(zcu) == .vector or rhs_ty.zigTypeTag(zcu) == .vector) {
-        return func.fail("TODO: Implement wrapping arithmetic for vectors", .{});
+        return cg.fail("TODO: Implement wrapping arithmetic for vectors", .{});
     }
 
     // For certain operations, such as shifting, the types are different.
@@ -3026,90 +3024,90 @@ fn airWrapBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
     const result = switch (op) {
         .shr, .shl => result: {
             const lhs_wasm_bits = toWasmBits(@intCast(lhs_ty.bitSize(zcu))) orelse {
-                return func.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)});
+                return cg.fail("TODO: implement '{s}' for types larger than 128 bits", .{@tagName(op)});
             };
             const rhs_wasm_bits = toWasmBits(@intCast(rhs_ty.bitSize(zcu))).?;
             const new_rhs = if (lhs_wasm_bits != rhs_wasm_bits and lhs_wasm_bits != 128)
-                try (try func.intcast(rhs, rhs_ty, lhs_ty)).toLocal(func, lhs_ty)
+                try (try cg.intcast(rhs, rhs_ty, lhs_ty)).toLocal(cg, lhs_ty)
             else
                 rhs;
-            break :result try func.wrapBinOp(lhs, new_rhs, lhs_ty, op);
+            break :result try cg.wrapBinOp(lhs, new_rhs, lhs_ty, op);
         },
-        else => try func.wrapBinOp(lhs, rhs, lhs_ty, op),
+        else => try cg.wrapBinOp(lhs, rhs, lhs_ty, op),
     };
 
-    return func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
+    return cg.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
 }
 
 /// Performs a wrapping binary operation.
 /// Asserts rhs is not a stack value when lhs also isn't.
 /// NOTE: Leaves the result on the stack when its Type is <= 64 bits
-fn wrapBinOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue {
-    const bin_local = try func.binOp(lhs, rhs, ty, op);
-    return func.wrapOperand(bin_local, ty);
+fn wrapBinOp(cg: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue {
+    const bin_local = try cg.binOp(lhs, rhs, ty, op);
+    return cg.wrapOperand(bin_local, ty);
 }
 
 /// Wraps an operand based on a given type's bitsize.
 /// Asserts `Type` is <= 128 bits.
 /// NOTE: When the Type is <= 64 bits, leaves the value on top of the stack, if wrapping was needed.
-fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue {
-    const pt = func.pt;
+fn wrapOperand(cg: *CodeGen, operand: WValue, ty: Type) InnerError!WValue {
+    const pt = cg.pt;
     const zcu = pt.zcu;
     assert(ty.abiSize(zcu) <= 16);
     const int_bits: u16 = @intCast(ty.bitSize(zcu)); // TODO use ty.intInfo(zcu).bits
     const wasm_bits = toWasmBits(int_bits) orelse {
-        return func.fail("TODO: Implement wrapOperand for bitsize '{d}'", .{int_bits});
+        return cg.fail("TODO: Implement wrapOperand for bitsize '{d}'", .{int_bits});
     };
 
     if (wasm_bits == int_bits) return operand;
 
     switch (wasm_bits) {
         32 => {
-            try func.emitWValue(operand);
+            try cg.emitWValue(operand);
             if (ty.isSignedInt(zcu)) {
-                try func.addImm32(32 - int_bits);
-                try func.addTag(.i32_shl);
-                try func.addImm32(32 - int_bits);
-                try func.addTag(.i32_shr_s);
+                try cg.addImm32(32 - int_bits);
+                try cg.addTag(.i32_shl);
+                try cg.addImm32(32 - int_bits);
+                try cg.addTag(.i32_shr_s);
             } else {
-                try func.addImm32(~@as(u32, 0) >> @intCast(32 - int_bits));
-                try func.addTag(.i32_and);
+                try cg.addImm32(~@as(u32, 0) >> @intCast(32 - int_bits));
+                try cg.addTag(.i32_and);
             }
             return .stack;
         },
         64 => {
-            try func.emitWValue(operand);
+            try cg.emitWValue(operand);
             if (ty.isSignedInt(zcu)) {
-                try func.addImm64(64 - int_bits);
-                try func.addTag(.i64_shl);
-                try func.addImm64(64 - int_bits);
-                try func.addTag(.i64_shr_s);
+                try cg.addImm64(64 - int_bits);
+                try cg.addTag(.i64_shl);
+                try cg.addImm64(64 - int_bits);
+                try cg.addTag(.i64_shr_s);
             } else {
-                try func.addImm64(~@as(u64, 0) >> @intCast(64 - int_bits));
-                try func.addTag(.i64_and);
+                try cg.addImm64(~@as(u64, 0) >> @intCast(64 - int_bits));
+                try cg.addTag(.i64_and);
             }
             return .stack;
         },
         128 => {
             assert(operand != .stack);
-            const result = try func.allocStack(ty);
+            const result = try cg.allocStack(ty);
 
-            try func.emitWValue(result);
-            _ = try func.load(operand, Type.u64, 0);
-            try func.store(.stack, .stack, Type.u64, result.offset());
+            try cg.emitWValue(result);
+            _ = try cg.load(operand, Type.u64, 0);
+            try cg.store(.stack, .stack, Type.u64, result.offset());
 
-            try func.emitWValue(result);
-            _ = try func.load(operand, Type.u64, 8);
+            try cg.emitWValue(result);
+            _ = try cg.load(operand, Type.u64, 8);
             if (ty.isSignedInt(zcu)) {
-                try func.addImm64(128 - int_bits);
-                try func.addTag(.i64_shl);
-                try func.addImm64(128 - int_bits);
-                try func.addTag(.i64_shr_s);
+                try cg.addImm64(128 - int_bits);
+                try cg.addTag(.i64_shl);
+                try cg.addImm64(128 - int_bits);
+                try cg.addTag(.i64_shr_s);
             } else {
-                try func.addImm64(~@as(u64, 0) >> @intCast(128 - int_bits));
-                try func.addTag(.i64_and);
+                try cg.addImm64(~@as(u64, 0) >> @intCast(128 - int_bits));
+                try cg.addTag(.i64_and);
             }
-            try func.store(.stack, .stack, Type.u64, result.offset() + 8);
+            try cg.store(.stack, .stack, Type.u64, result.offset() + 8);
 
             return result;
         },
@@ -3117,17 +3115,17 @@ fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue {
     }
 }
 
-fn lowerPtr(func: *CodeGen, ptr_val: InternPool.Index, prev_offset: u64) InnerError!WValue {
-    const pt = func.pt;
+fn lowerPtr(cg: *CodeGen, ptr_val: InternPool.Index, prev_offset: u64) InnerError!WValue {
+    const pt = cg.pt;
     const zcu = pt.zcu;
     const ptr = zcu.intern_pool.indexToKey(ptr_val).ptr;
     const offset: u64 = prev_offset + ptr.byte_offset;
     return switch (ptr.base_addr) {
         .nav => |nav| return .{ .nav_ref = .{ .nav_index = zcu.chaseNav(nav), .offset = @intCast(offset) } },
         .uav => |uav| return .{ .uav_ref = .{ .ip_index = uav.val, .offset = @intCast(offset) } },
-        .int => return func.lowerConstant(try pt.intValue(Type.usize, offset), Type.usize),
-        .eu_payload => return func.fail("Wasm TODO: lower error union payload pointer", .{}),
-        .opt_payload => |opt_ptr| return func.lowerPtr(opt_ptr, offset),
+        .int => return cg.lowerConstant(try pt.intValue(Type.usize, offset), Type.usize),
+        .eu_payload => return cg.fail("Wasm TODO: lower error union payload pointer", .{}),
+        .opt_payload => |opt_ptr| return cg.lowerPtr(opt_ptr, offset),
         .field => |field| {
             const base_ptr = Value.fromInterned(field.base);
             const base_ty = base_ptr.typeOf(zcu).childType(zcu);
@@ -3136,7 +3134,7 @@ fn lowerPtr(func: *CodeGen, ptr_val: InternPool.Index, prev_offset: u64) InnerEr
                     assert(base_ty.isSlice(zcu));
                     break :off switch (field.index) {
                         Value.slice_ptr_index => 0,
-                        Value.slice_len_index => @divExact(func.target.ptrBitWidth(), 8),
+                        Value.slice_len_index => @divExact(cg.target.ptrBitWidth(), 8),
                         else => unreachable,
                     };
                 },
@@ -3162,19 +3160,19 @@ fn lowerPtr(func: *CodeGen, ptr_val: InternPool.Index, prev_offset: u64) InnerEr
                 },
                 else => unreachable,
             };
-            return func.lowerPtr(field.base, offset + field_off);
+            return cg.lowerPtr(field.base, offset + field_off);
         },
         .arr_elem, .comptime_field, .comptime_alloc => unreachable,
     };
 }
 
 /// Asserts that `isByRef` returns `false` for `ty`.
-fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
-    const pt = func.pt;
+fn lowerConstant(cg: *CodeGen, val: Value, ty: Type) InnerError!WValue {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    assert(!isByRef(ty, pt, func.target));
+    assert(!isByRef(ty, pt, cg.target));
     const ip = &zcu.intern_pool;
-    if (val.isUndefDeep(zcu)) return func.emitUndefined(ty);
+    if (val.isUndefDeep(zcu)) return cg.emitUndefined(ty);
 
     switch (ip.indexToKey(val.ip_index)) {
         .int_type,
@@ -3253,14 +3251,14 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
             const payload_type = ty.errorUnionPayload(zcu);
             if (!payload_type.hasRuntimeBitsIgnoreComptime(zcu)) {
                 // We use the error type directly as the type.
-                return func.lowerConstant(err_val, err_ty);
+                return cg.lowerConstant(err_val, err_ty);
             }
 
-            return func.fail("Wasm TODO: lowerConstant error union with non-zero-bit payload type", .{});
+            return cg.fail("Wasm TODO: lowerConstant error union with non-zero-bit payload type", .{});
         },
         .enum_tag => |enum_tag| {
             const int_tag_ty = ip.typeOf(enum_tag.int);
-            return func.lowerConstant(Value.fromInterned(enum_tag.int), Type.fromInterned(int_tag_ty));
+            return cg.lowerConstant(Value.fromInterned(enum_tag.int), Type.fromInterned(int_tag_ty));
         },
         .float => |float| switch (float.storage) {
             .f16 => |f16_val| return .{ .imm32 = @as(u16, @bitCast(f16_val)) },
@@ -3269,11 +3267,11 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
             else => unreachable,
         },
         .slice => unreachable, // isByRef == true
-        .ptr => return func.lowerPtr(val.toIntern(), 0),
+        .ptr => return cg.lowerPtr(val.toIntern(), 0),
         .opt => if (ty.optionalReprIsPayload(zcu)) {
             const pl_ty = ty.optionalChild(zcu);
             if (val.optionalValue(zcu)) |payload| {
-                return func.lowerConstant(payload, pl_ty);
+                return cg.lowerConstant(payload, pl_ty);
             } else {
                 return .{ .imm32 = 0 };
             }
@@ -3281,12 +3279,12 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
             return .{ .imm32 = @intFromBool(!val.isNull(zcu)) };
         },
         .aggregate => switch (ip.indexToKey(ty.ip_index)) {
-            .array_type => return func.fail("Wasm TODO: LowerConstant for {}", .{ty.fmt(pt)}),
+            .array_type => return cg.fail("Wasm TODO: LowerConstant for {}", .{ty.fmt(pt)}),
             .vector_type => {
-                assert(determineSimdStoreStrategy(ty, zcu, func.target) == .direct);
+                assert(determineSimdStoreStrategy(ty, zcu, cg.target) == .direct);
                 var buf: [16]u8 = undefined;
                 val.writeToMemory(pt, &buf) catch unreachable;
-                return func.storeSimdImmd(buf);
+                return cg.storeSimdImmd(buf);
             },
             .struct_type => {
                 const struct_type = ip.loadStructType(ty.toIntern());
@@ -3300,7 +3298,7 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
                     backing_int_ty,
                     mem.readInt(u64, &buf, .little),
                 );
-                return func.lowerConstant(int_val, backing_int_ty);
+                return cg.lowerConstant(int_val, backing_int_ty);
             },
             else => unreachable,
         },
@@ -3313,7 +3311,7 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
                 const field_index = zcu.unionTagFieldIndex(union_obj, Value.fromInterned(un.tag)).?;
                 break :field_ty Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
             };
-            return func.lowerConstant(Value.fromInterned(un.val), constant_ty);
+            return cg.lowerConstant(Value.fromInterned(un.val), constant_ty);
         },
         .memoized_call => unreachable,
     }
@@ -3321,14 +3319,14 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
 
 /// Stores the value as a 128bit-immediate value by storing it inside
 /// the list and returning the index into this list as `WValue`.
-fn storeSimdImmd(func: *CodeGen, value: [16]u8) !WValue {
-    const index = @as(u32, @intCast(func.simd_immediates.items.len));
-    try func.simd_immediates.append(func.gpa, value);
+fn storeSimdImmd(cg: *CodeGen, value: [16]u8) !WValue {
+    const index = @as(u32, @intCast(cg.simd_immediates.items.len));
+    try cg.simd_immediates.append(cg.gpa, value);
     return .{ .imm128 = index };
 }
 
-fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
-    const pt = func.pt;
+fn emitUndefined(cg: *CodeGen, ty: Type) InnerError!WValue {
+    const pt = cg.pt;
     const zcu = pt.zcu;
     const ip = &zcu.intern_pool;
     switch (ty.zigTypeTag(zcu)) {
@@ -3338,20 +3336,20 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
             33...64 => return .{ .imm64 = 0xaaaaaaaaaaaaaaaa },
             else => unreachable,
         },
-        .float => switch (ty.floatBits(func.target.*)) {
+        .float => switch (ty.floatBits(cg.target.*)) {
             16 => return .{ .imm32 = 0xaaaaaaaa },
             32 => return .{ .float32 = @as(f32, @bitCast(@as(u32, 0xaaaaaaaa))) },
             64 => return .{ .float64 = @as(f64, @bitCast(@as(u64, 0xaaaaaaaaaaaaaaaa))) },
             else => unreachable,
         },
-        .pointer => switch (func.ptr_size) {
+        .pointer => switch (cg.ptr_size) {
             .wasm32 => return .{ .imm32 = 0xaaaaaaaa },
             .wasm64 => return .{ .imm64 = 0xaaaaaaaaaaaaaaaa },
         },
         .optional => {
             const pl_ty = ty.optionalChild(zcu);
             if (ty.optionalReprIsPayload(zcu)) {
-                return func.emitUndefined(pl_ty);
+                return cg.emitUndefined(pl_ty);
             }
             return .{ .imm32 = 0xaaaaaaaa };
         },
@@ -3360,17 +3358,17 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
         },
         .@"struct" => {
             const packed_struct = zcu.typeToPackedStruct(ty).?;
-            return func.emitUndefined(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)));
+            return cg.emitUndefined(Type.fromInterned(packed_struct.backingIntTypeUnordered(ip)));
         },
-        else => return func.fail("Wasm TODO: emitUndefined for type: {}\n", .{ty.zigTypeTag(zcu)}),
+        else => return cg.fail("Wasm TODO: emitUndefined for type: {}\n", .{ty.zigTypeTag(zcu)}),
     }
 }
 
 /// Returns a `Value` as a signed 32 bit value.
 /// It's illegal to provide a value with a type that cannot be represented
 /// as an integer value.
-fn valueAsI32(func: *const CodeGen, val: Value) i32 {
-    const pt = func.pt;
+fn valueAsI32(cg: *const CodeGen, val: Value) i32 {
+    const pt = cg.pt;
     const zcu = pt.zcu;
     const ip = &zcu.intern_pool;
 
@@ -3405,132 +3403,132 @@ fn intStorageAsI32(storage: InternPool.Key.Int.Storage, pt: Zcu.PerThread) i32 {
     };
 }
 
-fn airBlock(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
-    const extra = func.air.extraData(Air.Block, ty_pl.payload);
-    try func.lowerBlock(inst, ty_pl.ty.toType(), @ptrCast(func.air.extra[extra.end..][0..extra.data.body_len]));
+fn airBlock(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+    const extra = cg.air.extraData(Air.Block, ty_pl.payload);
+    try cg.lowerBlock(inst, ty_pl.ty.toType(), @ptrCast(cg.air.extra[extra.end..][0..extra.data.body_len]));
 }
 
-fn lowerBlock(func: *CodeGen, inst: Air.Inst.Index, block_ty: Type, body: []const Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
-    const wasm_block_ty = genBlockType(block_ty, pt, func.target);
+fn lowerBlock(cg: *CodeGen, inst: Air.Inst.Index, block_ty: Type, body: []const Air.Inst.Index) InnerError!void {
+    const pt = cg.pt;
+    const wasm_block_ty = genBlockType(block_ty, pt, cg.target);
 
     // if wasm_block_ty is non-empty, we create a register to store the temporary value
     const block_result: WValue = if (wasm_block_ty != std.wasm.block_empty) blk: {
-        const ty: Type = if (isByRef(block_ty, pt, func.target)) Type.u32 else block_ty;
-        break :blk try func.ensureAllocLocal(ty); // make sure it's a clean local as it may never get overwritten
+        const ty: Type = if (isByRef(block_ty, pt, cg.target)) Type.u32 else block_ty;
+        break :blk try cg.ensureAllocLocal(ty); // make sure it's a clean local as it may never get overwritten
     } else .none;
 
-    try func.startBlock(.block, std.wasm.block_empty);
+    try cg.startBlock(.block, std.wasm.block_empty);
     // Here we set the current block idx, so breaks know the depth to jump
     // to when breaking out.
-    try func.blocks.putNoClobber(func.gpa, inst, .{
-        .label = func.block_depth,
+    try cg.blocks.putNoClobber(cg.gpa, inst, .{
+        .label = cg.block_depth,
         .value = block_result,
     });
 
-    try func.genBody(body);
-    try func.endBlock();
+    try cg.genBody(body);
+    try cg.endBlock();
 
-    const liveness = func.liveness.getBlock(inst);
-    try func.currentBranch().values.ensureUnusedCapacity(func.gpa, liveness.deaths.len);
+    const liveness = cg.liveness.getBlock(inst);
+    try cg.currentBranch().values.ensureUnusedCapacity(cg.gpa, liveness.deaths.len);
 
-    return func.finishAir(inst, block_result, &.{});
+    return cg.finishAir(inst, block_result, &.{});
 }
 
 /// appends a new wasm block to the code section and increases the `block_depth` by 1
-fn startBlock(func: *CodeGen, block_tag: std.wasm.Opcode, valtype: u8) !void {
-    func.block_depth += 1;
-    try func.addInst(.{
+fn startBlock(cg: *CodeGen, block_tag: std.wasm.Opcode, valtype: u8) !void {
+    cg.block_depth += 1;
+    try cg.addInst(.{
         .tag = Mir.Inst.Tag.fromOpcode(block_tag),
         .data = .{ .block_type = valtype },
     });
 }
 
 /// Ends the current wasm block and decreases the `block_depth` by 1
-fn endBlock(func: *CodeGen) !void {
-    try func.addTag(.end);
-    func.block_depth -= 1;
+fn endBlock(cg: *CodeGen) !void {
+    try cg.addTag(.end);
+    cg.block_depth -= 1;
 }
 
-fn airLoop(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
-    const loop = func.air.extraData(Air.Block, ty_pl.payload);
-    const body: []const Air.Inst.Index = @ptrCast(func.air.extra[loop.end..][0..loop.data.body_len]);
+fn airLoop(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+    const loop = cg.air.extraData(Air.Block, ty_pl.payload);
+    const body: []const Air.Inst.Index = @ptrCast(cg.air.extra[loop.end..][0..loop.data.body_len]);
 
     // result type of loop is always 'noreturn', meaning we can always
     // emit the wasm type 'block_empty'.
-    try func.startBlock(.loop, std.wasm.block_empty);
+    try cg.startBlock(.loop, std.wasm.block_empty);
 
-    try func.loops.putNoClobber(func.gpa, inst, func.block_depth);
-    defer assert(func.loops.remove(inst));
+    try cg.loops.putNoClobber(cg.gpa, inst, cg.block_depth);
+    defer assert(cg.loops.remove(inst));
 
-    try func.genBody(body);
-    try func.endBlock();
+    try cg.genBody(body);
+    try cg.endBlock();
 
-    return func.finishAir(inst, .none, &.{});
+    return cg.finishAir(inst, .none, &.{});
 }
 
-fn airCondBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
-    const condition = try func.resolveInst(pl_op.operand);
-    const extra = func.air.extraData(Air.CondBr, pl_op.payload);
-    const then_body: []const Air.Inst.Index = @ptrCast(func.air.extra[extra.end..][0..extra.data.then_body_len]);
-    const else_body: []const Air.Inst.Index = @ptrCast(func.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]);
-    const liveness_condbr = func.liveness.getCondBr(inst);
+fn airCondBr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pl_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
+    const condition = try cg.resolveInst(pl_op.operand);
+    const extra = cg.air.extraData(Air.CondBr, pl_op.payload);
+    const then_body: []const Air.Inst.Index = @ptrCast(cg.air.extra[extra.end..][0..extra.data.then_body_len]);
+    const else_body: []const Air.Inst.Index = @ptrCast(cg.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]);
+    const liveness_condbr = cg.liveness.getCondBr(inst);
 
     // result type is always noreturn, so use `block_empty` as type.
-    try func.startBlock(.block, std.wasm.block_empty);
+    try cg.startBlock(.block, std.wasm.block_empty);
     // emit the conditional value
-    try func.emitWValue(condition);
+    try cg.emitWValue(condition);
 
     // we inserted the block in front of the condition
     // so now check if condition matches. If not, break outside this block
     // and continue with the then codepath
-    try func.addLabel(.br_if, 0);
+    try cg.addLabel(.br_if, 0);
 
-    try func.branches.ensureUnusedCapacity(func.gpa, 2);
+    try cg.branches.ensureUnusedCapacity(cg.gpa, 2);
     {
-        func.branches.appendAssumeCapacity(.{});
-        try func.currentBranch().values.ensureUnusedCapacity(func.gpa, @as(u32, @intCast(liveness_condbr.else_deaths.len)));
+        cg.branches.appendAssumeCapacity(.{});
+        try cg.currentBranch().values.ensureUnusedCapacity(cg.gpa, @as(u32, @intCast(liveness_condbr.else_deaths.len)));
         defer {
-            var else_stack = func.branches.pop();
-            else_stack.deinit(func.gpa);
+            var else_stack = cg.branches.pop();
+            else_stack.deinit(cg.gpa);
         }
-        try func.genBody(else_body);
-        try func.endBlock();
+        try cg.genBody(else_body);
+        try cg.endBlock();
     }
 
     // Outer block that matches the condition
     {
-        func.branches.appendAssumeCapacity(.{});
-        try func.currentBranch().values.ensureUnusedCapacity(func.gpa, @as(u32, @intCast(liveness_condbr.then_deaths.len)));
+        cg.branches.appendAssumeCapacity(.{});
+        try cg.currentBranch().values.ensureUnusedCapacity(cg.gpa, @as(u32, @intCast(liveness_condbr.then_deaths.len)));
         defer {
-            var then_stack = func.branches.pop();
-            then_stack.deinit(func.gpa);
+            var then_stack = cg.branches.pop();
+            then_stack.deinit(cg.gpa);
         }
-        try func.genBody(then_body);
+        try cg.genBody(then_body);
     }
 
-    return func.finishAir(inst, .none, &.{});
+    return cg.finishAir(inst, .none, &.{});
 }
 
-fn airCmp(func: *CodeGen, inst: Air.Inst.Index, op: std.math.CompareOperator) InnerError!void {
-    const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+fn airCmp(cg: *CodeGen, inst: Air.Inst.Index, op: std.math.CompareOperator) InnerError!void {
+    const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
 
-    const lhs = try func.resolveInst(bin_op.lhs);
-    const rhs = try func.resolveInst(bin_op.rhs);
-    const operand_ty = func.typeOf(bin_op.lhs);
-    const result = try func.cmp(lhs, rhs, operand_ty, op);
-    return func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
+    const lhs = try cg.resolveInst(bin_op.lhs);
+    const rhs = try cg.resolveInst(bin_op.rhs);
+    const operand_ty = cg.typeOf(bin_op.lhs);
+    const result = try cg.cmp(lhs, rhs, operand_ty, op);
+    return cg.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
 }
 
 /// Compares two operands.
 /// Asserts rhs is not a stack value when the lhs isn't a stack value either
 /// NOTE: This leaves the result on top of the stack, rather than a new local.
-fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareOperator) InnerError!WValue {
+fn cmp(cg: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareOperator) InnerError!WValue {
     assert(!(lhs != .stack and rhs == .stack));
-    const pt = func.pt;
+    const pt = cg.pt;
     const zcu = pt.zcu;
     if (ty.zigTypeTag(zcu) == .optional and !ty.optionalReprIsPayload(zcu)) {
         const payload_ty = ty.optionalChild(zcu);
@@ -3538,12 +3536,12 @@ fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareO
             // When we hit this case, we must check the value of optionals
             // that are not pointers. This means first checking against non-null for
             // both lhs and rhs, as well as checking the payload are matching of lhs and rhs
-            return func.cmpOptionals(lhs, rhs, ty, op);
+            return cg.cmpOptionals(lhs, rhs, ty, op);
         }
     } else if (ty.isAnyFloat()) {
-        return func.cmpFloat(ty, lhs, rhs, op);
-    } else if (isByRef(ty, pt, func.target)) {
-        return func.cmpBigInt(lhs, rhs, ty, op);
+        return cg.cmpFloat(ty, lhs, rhs, op);
+    } else if (isByRef(ty, pt, cg.target)) {
+        return cg.cmpBigInt(lhs, rhs, ty, op);
     }
 
     const signedness: std.builtin.Signedness = blk: {
@@ -3556,11 +3554,11 @@ fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareO
 
     // ensure that when we compare pointers, we emit
     // the true pointer of a stack value, rather than the stack pointer.
-    try func.lowerToStack(lhs);
-    try func.lowerToStack(rhs);
+    try cg.lowerToStack(lhs);
+    try cg.lowerToStack(rhs);
 
     const opcode: std.wasm.Opcode = buildOpcode(.{
-        .valtype1 = typeToValtype(ty, pt, func.target),
+        .valtype1 = typeToValtype(ty, pt, cg.target),
         .op = switch (op) {
             .lt => .lt,
             .lte => .le,
@@ -3571,15 +3569,15 @@ fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareO
         },
         .signedness = signedness,
     });
-    try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
+    try cg.addTag(Mir.Inst.Tag.fromOpcode(opcode));
 
     return .stack;
 }
 
 /// Compares two floats.
 /// NOTE: Leaves the result of the comparison on top of the stack.
-fn cmpFloat(func: *CodeGen, ty: Type, lhs: WValue, rhs: WValue, cmp_op: std.math.CompareOperator) InnerError!WValue {
-    const float_bits = ty.floatBits(func.target.*);
+fn cmpFloat(cg: *CodeGen, ty: Type, lhs: WValue, rhs: WValue, cmp_op: std.math.CompareOperator) InnerError!WValue {
+    const float_bits = ty.floatBits(cg.target.*);
 
     const op: Op = switch (cmp_op) {
         .lt => .lt,
@@ -3592,18 +3590,18 @@ fn cmpFloat(func: *CodeGen, ty: Type, lhs: WValue, rhs: WValue, cmp_op: std.math
 
     switch (float_bits) {
         16 => {
-            _ = try func.fpext(lhs, Type.f16, Type.f32);
-            _ = try func.fpext(rhs, Type.f16, Type.f32);
+            _ = try cg.fpext(lhs, Type.f16, Type.f32);
+            _ = try cg.fpext(rhs, Type.f16, Type.f32);
             const opcode = buildOpcode(.{ .op = op, .valtype1 = .f32 });
-            try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
+            try cg.addTag(Mir.Inst.Tag.fromOpcode(opcode));
             return .stack;
         },
         32, 64 => {
-            try func.emitWValue(lhs);
-            try func.emitWValue(rhs);
+            try cg.emitWValue(lhs);
+            try cg.emitWValue(rhs);
             const val_type: std.wasm.Valtype = if (float_bits == 32) .f32 else .f64;
             const opcode = buildOpcode(.{ .op = op, .valtype1 = val_type });
-            try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
+            try cg.addTag(Mir.Inst.Tag.fromOpcode(opcode));
             return .stack;
         },
         80, 128 => {
@@ -3612,121 +3610,121 @@ fn cmpFloat(func: *CodeGen, ty: Type, lhs: WValue, rhs: WValue, cmp_op: std.math
                 @tagName(op), target_util.compilerRtFloatAbbrev(float_bits),
             }) catch unreachable;
 
-            const result = try func.callIntrinsic(fn_name, &.{ ty.ip_index, ty.ip_index }, Type.bool, &.{ lhs, rhs });
-            return func.cmp(result, .{ .imm32 = 0 }, Type.i32, cmp_op);
+            const result = try cg.callIntrinsic(fn_name, &.{ ty.ip_index, ty.ip_index }, Type.bool, &.{ lhs, rhs });
+            return cg.cmp(result, .{ .imm32 = 0 }, Type.i32, cmp_op);
         },
         else => unreachable,
     }
 }
 
-fn airCmpVector(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+fn airCmpVector(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     _ = inst;
-    return func.fail("TODO implement airCmpVector for wasm", .{});
+    return cg.fail("TODO implement airCmpVector for wasm", .{});
 }
 
-fn airCmpLtErrorsLen(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
-    const operand = try func.resolveInst(un_op);
+fn airCmpLtErrorsLen(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const un_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
+    const operand = try cg.resolveInst(un_op);
 
-    try func.emitWValue(operand);
-    const pt = func.pt;
+    try cg.emitWValue(operand);
+    const pt = cg.pt;
     const err_int_ty = try pt.errorIntType();
-    try func.addTag(.errors_len);
-    const result = try func.cmp(.stack, .stack, err_int_ty, .lt);
+    try cg.addTag(.errors_len);
+    const result = try cg.cmp(.stack, .stack, err_int_ty, .lt);
 
-    return func.finishAir(inst, result, &.{un_op});
+    return cg.finishAir(inst, result, &.{un_op});
 }
 
-fn airBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const zcu = func.pt.zcu;
-    const br = func.air.instructions.items(.data)[@intFromEnum(inst)].br;
-    const block = func.blocks.get(br.block_inst).?;
+fn airBr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const zcu = cg.pt.zcu;
+    const br = cg.air.instructions.items(.data)[@intFromEnum(inst)].br;
+    const block = cg.blocks.get(br.block_inst).?;
 
     // if operand has codegen bits we should break with a value
-    if (func.typeOf(br.operand).hasRuntimeBitsIgnoreComptime(zcu)) {
-        const operand = try func.resolveInst(br.operand);
-        try func.lowerToStack(operand);
+    if (cg.typeOf(br.operand).hasRuntimeBitsIgnoreComptime(zcu)) {
+        const operand = try cg.resolveInst(br.operand);
+        try cg.lowerToStack(operand);
 
         if (block.value != .none) {
-            try func.addLabel(.local_set, block.value.local.value);
+            try cg.addLabel(.local_set, block.value.local.value);
         }
     }
 
     // We map every block to its block index.
     // We then determine how far we have to jump to it by subtracting it from current block depth
-    const idx: u32 = func.block_depth - block.label;
-    try func.addLabel(.br, idx);
+    const idx: u32 = cg.block_depth - block.label;
+    try cg.addLabel(.br, idx);
 
-    return func.finishAir(inst, .none, &.{br.operand});
+    return cg.finishAir(inst, .none, &.{br.operand});
 }
 
-fn airRepeat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const repeat = func.air.instructions.items(.data)[@intFromEnum(inst)].repeat;
-    const loop_label = func.loops.get(repeat.loop_inst).?;
+fn airRepeat(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const repeat = cg.air.instructions.items(.data)[@intFromEnum(inst)].repeat;
+    const loop_label = cg.loops.get(repeat.loop_inst).?;
 
-    const idx: u32 = func.block_depth - loop_label;
-    try func.addLabel(.br, idx);
+    const idx: u32 = cg.block_depth - loop_label;
+    try cg.addLabel(.br, idx);
 
-    return func.finishAir(inst, .none, &.{});
+    return cg.finishAir(inst, .none, &.{});
 }
 
-fn airNot(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+fn airNot(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
-    const operand = try func.resolveInst(ty_op.operand);
-    const operand_ty = func.typeOf(ty_op.operand);
-    const pt = func.pt;
+    const operand = try cg.resolveInst(ty_op.operand);
+    const operand_ty = cg.typeOf(ty_op.operand);
+    const pt = cg.pt;
     const zcu = pt.zcu;
 
     const result = result: {
         if (operand_ty.zigTypeTag(zcu) == .bool) {
-            try func.emitWValue(operand);
-            try func.addTag(.i32_eqz);
-            const not_tmp = try func.allocLocal(operand_ty);
-            try func.addLabel(.local_set, not_tmp.local.value);
+            try cg.emitWValue(operand);
+            try cg.addTag(.i32_eqz);
+            const not_tmp = try cg.allocLocal(operand_ty);
+            try cg.addLabel(.local_set, not_tmp.local.value);
             break :result not_tmp;
         } else {
             const int_info = operand_ty.intInfo(zcu);
             const wasm_bits = toWasmBits(int_info.bits) orelse {
-                return func.fail("TODO: Implement binary NOT for {}", .{operand_ty.fmt(pt)});
+                return cg.fail("TODO: Implement binary NOT for {}", .{operand_ty.fmt(pt)});
             };
 
             switch (wasm_bits) {
                 32 => {
-                    try func.emitWValue(operand);
-                    try func.addImm32(switch (int_info.signedness) {
+                    try cg.emitWValue(operand);
+                    try cg.addImm32(switch (int_info.signedness) {
                         .unsigned => ~@as(u32, 0) >> @intCast(32 - int_info.bits),
                         .signed => ~@as(u32, 0),
                     });
-                    try func.addTag(.i32_xor);
+                    try cg.addTag(.i32_xor);
                     break :result .stack;
                 },
                 64 => {
-                    try func.emitWValue(operand);
-                    try func.addImm64(switch (int_info.signedness) {
+                    try cg.emitWValue(operand);
+                    try cg.addImm64(switch (int_info.signedness) {
                         .unsigned => ~@as(u64, 0) >> @intCast(64 - int_info.bits),
                         .signed => ~@as(u64, 0),
                     });
-                    try func.addTag(.i64_xor);
+                    try cg.addTag(.i64_xor);
                     break :result .stack;
                 },
                 128 => {
-                    const ptr = try func.allocStack(operand_ty);
+                    const ptr = try cg.allocStack(operand_ty);
 
-                    try func.emitWValue(ptr);
-                    _ = try func.load(operand, Type.u64, 0);
-                    try func.addImm64(~@as(u64, 0));
-                    try func.addTag(.i64_xor);
-                    try func.store(.stack, .stack, Type.u64, ptr.offset());
+                    try cg.emitWValue(ptr);
+                    _ = try cg.load(operand, Type.u64, 0);
+                    try cg.addImm64(~@as(u64, 0));
+                    try cg.addTag(.i64_xor);
+                    try cg.store(.stack, .stack, Type.u64, ptr.offset());
 
-                    try func.emitWValue(ptr);
-                    _ = try func.load(operand, Type.u64, 8);
-                    try func.addImm64(switch (int_info.signedness) {
+                    try cg.emitWValue(ptr);
+                    _ = try cg.load(operand, Type.u64, 8);
+                    try cg.addImm64(switch (int_info.signedness) {
                         .unsigned => ~@as(u64, 0) >> @intCast(128 - int_info.bits),
                         .signed => ~@as(u64, 0),
                     });
-                    try func.addTag(.i64_xor);
-                    try func.store(.stack, .stack, Type.u64, ptr.offset() + 8);
+                    try cg.addTag(.i64_xor);
+                    try cg.store(.stack, .stack, Type.u64, ptr.offset() + 8);
 
                     break :result ptr;
                 },
@@ -3734,33 +3732,33 @@ fn airNot(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
             }
         }
     };
-    return func.finishAir(inst, result, &.{ty_op.operand});
+    return cg.finishAir(inst, result, &.{ty_op.operand});
 }
 
-fn airTrap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    try func.addTag(.@"unreachable");
-    return func.finishAir(inst, .none, &.{});
+fn airTrap(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    try cg.addTag(.@"unreachable");
+    return cg.finishAir(inst, .none, &.{});
 }
 
-fn airBreakpoint(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+fn airBreakpoint(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     // unsupported by wasm itfunc. Can be implemented once we support DWARF
     // for wasm
-    try func.addTag(.@"unreachable");
-    return func.finishAir(inst, .none, &.{});
+    try cg.addTag(.@"unreachable");
+    return cg.finishAir(inst, .none, &.{});
 }
 
-fn airUnreachable(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    try func.addTag(.@"unreachable");
-    return func.finishAir(inst, .none, &.{});
+fn airUnreachable(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    try cg.addTag(.@"unreachable");
+    return cg.finishAir(inst, .none, &.{});
 }
 
-fn airBitcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
+fn airBitcast(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
-    const operand = try func.resolveInst(ty_op.operand);
-    const wanted_ty = func.typeOfIndex(inst);
-    const given_ty = func.typeOf(ty_op.operand);
+    const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+    const operand = try cg.resolveInst(ty_op.operand);
+    const wanted_ty = cg.typeOfIndex(inst);
+    const given_ty = cg.typeOf(ty_op.operand);
 
     const bit_size = given_ty.bitSize(zcu);
     const needs_wrapping = (given_ty.isSignedInt(zcu) != wanted_ty.isSignedInt(zcu)) and
@@ -3768,38 +3766,38 @@ fn airBitcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
     const result = result: {
         if (given_ty.isAnyFloat() or wanted_ty.isAnyFloat()) {
-            break :result try func.bitcast(wanted_ty, given_ty, operand);
+            break :result try cg.bitcast(wanted_ty, given_ty, operand);
         }
 
-        if (isByRef(given_ty, pt, func.target) and !isByRef(wanted_ty, pt, func.target)) {
-            const loaded_memory = try func.load(operand, wanted_ty, 0);
+        if (isByRef(given_ty, pt, cg.target) and !isByRef(wanted_ty, pt, cg.target)) {
+            const loaded_memory = try cg.load(operand, wanted_ty, 0);
             if (needs_wrapping) {
-                break :result try func.wrapOperand(loaded_memory, wanted_ty);
+                break :result try cg.wrapOperand(loaded_memory, wanted_ty);
             } else {
                 break :result loaded_memory;
             }
         }
-        if (!isByRef(given_ty, pt, func.target) and isByRef(wanted_ty, pt, func.target)) {
-            const stack_memory = try func.allocStack(wanted_ty);
-            try func.store(stack_memory, operand, given_ty, 0);
+        if (!isByRef(given_ty, pt, cg.target) and isByRef(wanted_ty, pt, cg.target)) {
+            const stack_memory = try cg.allocStack(wanted_ty);
+            try cg.store(stack_memory, operand, given_ty, 0);
             if (needs_wrapping) {
-                break :result try func.wrapOperand(stack_memory, wanted_ty);
+                break :result try cg.wrapOperand(stack_memory, wanted_ty);
             } else {
                 break :result stack_memory;
             }
         }
 
         if (needs_wrapping) {
-            break :result try func.wrapOperand(operand, wanted_ty);
+            break :result try cg.wrapOperand(operand, wanted_ty);
         }
 
-        break :result func.reuseOperand(ty_op.operand, operand);
+        break :result cg.reuseOperand(ty_op.operand, operand);
     };
-    return func.finishAir(inst, result, &.{ty_op.operand});
+    return cg.finishAir(inst, result, &.{ty_op.operand});
 }
 
-fn bitcast(func: *CodeGen, wanted_ty: Type, given_ty: Type, operand: WValue) InnerError!WValue {
-    const pt = func.pt;
+fn bitcast(cg: *CodeGen, wanted_ty: Type, given_ty: Type, operand: WValue) InnerError!WValue {
+    const pt = cg.pt;
     const zcu = pt.zcu;
     // if we bitcast a float to or from an integer we must use the 'reinterpret' instruction
     if (!(wanted_ty.isAnyFloat() or given_ty.isAnyFloat())) return operand;
@@ -3809,41 +3807,41 @@ fn bitcast(func: *CodeGen, wanted_ty: Type, given_ty: Type, operand: WValue) Inn
 
     const opcode = buildOpcode(.{
         .op = .reinterpret,
-        .valtype1 = typeToValtype(wanted_ty, pt, func.target),
-        .valtype2 = typeToValtype(given_ty, pt, func.target),
+        .valtype1 = typeToValtype(wanted_ty, pt, cg.target),
+        .valtype2 = typeToValtype(given_ty, pt, cg.target),
     });
-    try func.emitWValue(operand);
-    try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
+    try cg.emitWValue(operand);
+    try cg.addTag(Mir.Inst.Tag.fromOpcode(opcode));
     return .stack;
 }
 
-fn airStructFieldPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
+fn airStructFieldPtr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
-    const extra = func.air.extraData(Air.StructField, ty_pl.payload);
+    const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+    const extra = cg.air.extraData(Air.StructField, ty_pl.payload);
 
-    const struct_ptr = try func.resolveInst(extra.data.struct_operand);
-    const struct_ptr_ty = func.typeOf(extra.data.struct_operand);
+    const struct_ptr = try cg.resolveInst(extra.data.struct_operand);
+    const struct_ptr_ty = cg.typeOf(extra.data.struct_operand);
     const struct_ty = struct_ptr_ty.childType(zcu);
-    const result = try func.structFieldPtr(inst, extra.data.struct_operand, struct_ptr, struct_ptr_ty, struct_ty, extra.data.field_index);
-    return func.finishAir(inst, result, &.{extra.data.struct_operand});
+    const result = try cg.structFieldPtr(inst, extra.data.struct_operand, struct_ptr, struct_ptr_ty, struct_ty, extra.data.field_index);
+    return cg.finishAir(inst, result, &.{extra.data.struct_operand});
 }
 
-fn airStructFieldPtrIndex(func: *CodeGen, inst: Air.Inst.Index, index: u32) InnerError!void {
-    const pt = func.pt;
+fn airStructFieldPtrIndex(cg: *CodeGen, inst: Air.Inst.Index, index: u32) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
-    const struct_ptr = try func.resolveInst(ty_op.operand);
-    const struct_ptr_ty = func.typeOf(ty_op.operand);
+    const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+    const struct_ptr = try cg.resolveInst(ty_op.operand);
+    const struct_ptr_ty = cg.typeOf(ty_op.operand);
     const struct_ty = struct_ptr_ty.childType(zcu);
 
-    const result = try func.structFieldPtr(inst, ty_op.operand, struct_ptr, struct_ptr_ty, struct_ty, index);
-    return func.finishAir(inst, result, &.{ty_op.operand});
+    const result = try cg.structFieldPtr(inst, ty_op.operand, struct_ptr, struct_ptr_ty, struct_ty, index);
+    return cg.finishAir(inst, result, &.{ty_op.operand});
 }
 
 fn structFieldPtr(
-    func: *CodeGen,
+    cg: *CodeGen,
     inst: Air.Inst.Index,
     ref: Air.Inst.Ref,
     struct_ptr: WValue,
@@ -3851,9 +3849,9 @@ fn structFieldPtr(
     struct_ty: Type,
     index: u32,
 ) InnerError!WValue {
-    const pt = func.pt;
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const result_ty = func.typeOfIndex(inst);
+    const result_ty = cg.typeOfIndex(inst);
     const struct_ptr_ty_info = struct_ptr_ty.ptrInfo(zcu);
 
     const offset = switch (struct_ty.containerLayout(zcu)) {
@@ -3872,28 +3870,28 @@ fn structFieldPtr(
     };
     // save a load and store when we can simply reuse the operand
     if (offset == 0) {
-        return func.reuseOperand(ref, struct_ptr);
+        return cg.reuseOperand(ref, struct_ptr);
     }
     switch (struct_ptr) {
         .stack_offset => |stack_offset| {
             return .{ .stack_offset = .{ .value = stack_offset.value + @as(u32, @intCast(offset)), .references = 1 } };
         },
-        else => return func.buildPointerOffset(struct_ptr, offset, .new),
+        else => return cg.buildPointerOffset(struct_ptr, offset, .new),
     }
 }
 
-fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
+fn airStructFieldVal(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
     const ip = &zcu.intern_pool;
-    const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
-    const struct_field = func.air.extraData(Air.StructField, ty_pl.payload).data;
+    const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+    const struct_field = cg.air.extraData(Air.StructField, ty_pl.payload).data;
 
-    const struct_ty = func.typeOf(struct_field.struct_operand);
-    const operand = try func.resolveInst(struct_field.struct_operand);
+    const struct_ty = cg.typeOf(struct_field.struct_operand);
+    const operand = try cg.resolveInst(struct_field.struct_operand);
     const field_index = struct_field.field_index;
     const field_ty = struct_ty.fieldType(field_index, zcu);
-    if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) return func.finishAir(inst, .none, &.{struct_field.struct_operand});
+    if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) return cg.finishAir(inst, .none, &.{struct_field.struct_operand});
 
     const result: WValue = switch (struct_ty.containerLayout(zcu)) {
         .@"packed" => switch (struct_ty.zigTypeTag(zcu)) {
@@ -3902,42 +3900,42 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                 const offset = pt.structPackedFieldBitOffset(packed_struct, field_index);
                 const backing_ty = Type.fromInterned(packed_struct.backingIntTypeUnordered(ip));
                 const wasm_bits = toWasmBits(backing_ty.intInfo(zcu).bits) orelse {
-                    return func.fail("TODO: airStructFieldVal for packed structs larger than 128 bits", .{});
+                    return cg.fail("TODO: airStructFieldVal for packed structs larger than 128 bits", .{});
                 };
                 const const_wvalue: WValue = if (wasm_bits == 32)
                     .{ .imm32 = offset }
                 else if (wasm_bits == 64)
                     .{ .imm64 = offset }
                 else
-                    return func.fail("TODO: airStructFieldVal for packed structs larger than 64 bits", .{});
+                    return cg.fail("TODO: airStructFieldVal for packed structs larger than 64 bits", .{});
 
                 // for first field we don't require any shifting
                 const shifted_value = if (offset == 0)
                     operand
                 else
-                    try func.binOp(operand, const_wvalue, backing_ty, .shr);
+                    try cg.binOp(operand, const_wvalue, backing_ty, .shr);
 
                 if (field_ty.zigTypeTag(zcu) == .float) {
                     const int_type = try pt.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(zcu))));
-                    const truncated = try func.trunc(shifted_value, int_type, backing_ty);
-                    break :result try func.bitcast(field_ty, int_type, truncated);
+                    const truncated = try cg.trunc(shifted_value, int_type, backing_ty);
+                    break :result try cg.bitcast(field_ty, int_type, truncated);
                 } else if (field_ty.isPtrAtRuntime(zcu) and packed_struct.field_types.len == 1) {
                     // In this case we do not have to perform any transformations,
                     // we can simply reuse the operand.
-                    break :result func.reuseOperand(struct_field.struct_operand, operand);
+                    break :result cg.reuseOperand(struct_field.struct_operand, operand);
                 } else if (field_ty.isPtrAtRuntime(zcu)) {
                     const int_type = try pt.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(zcu))));
-                    break :result try func.trunc(shifted_value, int_type, backing_ty);
+                    break :result try cg.trunc(shifted_value, int_type, backing_ty);
                 }
-                break :result try func.trunc(shifted_value, field_ty, backing_ty);
+                break :result try cg.trunc(shifted_value, field_ty, backing_ty);
             },
             .@"union" => result: {
-                if (isByRef(struct_ty, pt, func.target)) {
-                    if (!isByRef(field_ty, pt, func.target)) {
-                        break :result try func.load(operand, field_ty, 0);
+                if (isByRef(struct_ty, pt, cg.target)) {
+                    if (!isByRef(field_ty, pt, cg.target)) {
+                        break :result try cg.load(operand, field_ty, 0);
                     } else {
-                        const new_stack_val = try func.allocStack(field_ty);
-                        try func.store(new_stack_val, operand, field_ty, 0);
+                        const new_stack_val = try cg.allocStack(field_ty);
+                        try cg.store(new_stack_val, operand, field_ty, 0);
                         break :result new_stack_val;
                     }
                 }
@@ -3945,45 +3943,45 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                 const union_int_type = try pt.intType(.unsigned, @as(u16, @intCast(struct_ty.bitSize(zcu))));
                 if (field_ty.zigTypeTag(zcu) == .float) {
                     const int_type = try pt.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(zcu))));
-                    const truncated = try func.trunc(operand, int_type, union_int_type);
-                    break :result try func.bitcast(field_ty, int_type, truncated);
+                    const truncated = try cg.trunc(operand, int_type, union_int_type);
+                    break :result try cg.bitcast(field_ty, int_type, truncated);
                 } else if (field_ty.isPtrAtRuntime(zcu)) {
                     const int_type = try pt.intType(.unsigned, @as(u16, @intCast(field_ty.bitSize(zcu))));
-                    break :result try func.trunc(operand, int_type, union_int_type);
+                    break :result try cg.trunc(operand, int_type, union_int_type);
                 }
-                break :result try func.trunc(operand, field_ty, union_int_type);
+                break :result try cg.trunc(operand, field_ty, union_int_type);
             },
             else => unreachable,
         },
         else => result: {
             const offset = std.math.cast(u32, struct_ty.structFieldOffset(field_index, zcu)) orelse {
-                return func.fail("Field type '{}' too big to fit into stack frame", .{field_ty.fmt(pt)});
+                return cg.fail("Field type '{}' too big to fit into stack frame", .{field_ty.fmt(pt)});
             };
-            if (isByRef(field_ty, pt, func.target)) {
+            if (isByRef(field_ty, pt, cg.target)) {
                 switch (operand) {
                     .stack_offset => |stack_offset| {
                         break :result .{ .stack_offset = .{ .value = stack_offset.value + offset, .references = 1 } };
                     },
-                    else => break :result try func.buildPointerOffset(operand, offset, .new),
+                    else => break :result try cg.buildPointerOffset(operand, offset, .new),
                 }
             }
-            break :result try func.load(operand, field_ty, offset);
+            break :result try cg.load(operand, field_ty, offset);
         },
     };
 
-    return func.finishAir(inst, result, &.{struct_field.struct_operand});
+    return cg.finishAir(inst, result, &.{struct_field.struct_operand});
 }
 
-fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
+fn airSwitchBr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
     // result type is always 'noreturn'
     const blocktype = std.wasm.block_empty;
-    const switch_br = func.air.unwrapSwitch(inst);
-    const target = try func.resolveInst(switch_br.operand);
-    const target_ty = func.typeOf(switch_br.operand);
-    const liveness = try func.liveness.getSwitchBr(func.gpa, inst, switch_br.cases_len + 1);
-    defer func.gpa.free(liveness.deaths);
+    const switch_br = cg.air.unwrapSwitch(inst);
+    const target = try cg.resolveInst(switch_br.operand);
+    const target_ty = cg.typeOf(switch_br.operand);
+    const liveness = try cg.liveness.getSwitchBr(cg.gpa, inst, switch_br.cases_len + 1);
+    defer cg.gpa.free(liveness.deaths);
 
     // a list that maps each value with its value and body based on the order inside the list.
     const CaseValue = union(enum) {
@@ -3993,21 +3991,21 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     var case_list = try std.ArrayList(struct {
         values: []const CaseValue,
         body: []const Air.Inst.Index,
-    }).initCapacity(func.gpa, switch_br.cases_len);
+    }).initCapacity(cg.gpa, switch_br.cases_len);
     defer for (case_list.items) |case| {
-        func.gpa.free(case.values);
+        cg.gpa.free(case.values);
     } else case_list.deinit();
 
     var lowest_maybe: ?i32 = null;
     var highest_maybe: ?i32 = null;
     var it = switch_br.iterateCases();
     while (it.next()) |case| {
-        const values = try func.gpa.alloc(CaseValue, case.items.len + case.ranges.len);
-        errdefer func.gpa.free(values);
+        const values = try cg.gpa.alloc(CaseValue, case.items.len + case.ranges.len);
+        errdefer cg.gpa.free(values);
 
         for (case.items, 0..) |ref, i| {
-            const item_val = (try func.air.value(ref, pt)).?;
-            const int_val = func.valueAsI32(item_val);
+            const item_val = (try cg.air.value(ref, pt)).?;
+            const int_val = cg.valueAsI32(item_val);
             if (lowest_maybe == null or int_val < lowest_maybe.?) {
                 lowest_maybe = int_val;
             }
@@ -4018,15 +4016,15 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
         }
 
         for (case.ranges, 0..) |range, i| {
-            const min_val = (try func.air.value(range[0], pt)).?;
-            const int_min_val = func.valueAsI32(min_val);
+            const min_val = (try cg.air.value(range[0], pt)).?;
+            const int_min_val = cg.valueAsI32(min_val);
 
             if (lowest_maybe == null or int_min_val < lowest_maybe.?) {
                 lowest_maybe = int_min_val;
             }
 
-            const max_val = (try func.air.value(range[1], pt)).?;
-            const int_max_val = func.valueAsI32(max_val);
+            const max_val = (try cg.air.value(range[1], pt)).?;
+            const int_max_val = cg.valueAsI32(max_val);
 
             if (highest_maybe == null or int_max_val > highest_maybe.?) {
                 highest_maybe = int_max_val;
@@ -4041,7 +4039,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
         }
 
         case_list.appendAssumeCapacity(.{ .values = values, .body = case.body });
-        try func.startBlock(.block, blocktype);
+        try cg.startBlock(.block, blocktype);
     }
 
     // When highest and lowest are null, we have no cases and can use a jump table
@@ -4057,7 +4055,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const else_body = it.elseBody();
     const has_else_body = else_body.len != 0;
     if (has_else_body) {
-        try func.startBlock(.block, blocktype);
+        try cg.startBlock(.block, blocktype);
     }
 
     if (!is_sparse) {
@@ -4065,25 +4063,25 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
         // The value 'target' represents the index into the table.
         // Each index in the table represents a label to the branch
         // to jump to.
-        try func.startBlock(.block, blocktype);
-        try func.emitWValue(target);
+        try cg.startBlock(.block, blocktype);
+        try cg.emitWValue(target);
         if (lowest < 0) {
             // since br_table works using indexes, starting from '0', we must ensure all values
             // we put inside, are atleast 0.
-            try func.addImm32(@bitCast(lowest * -1));
-            try func.addTag(.i32_add);
+            try cg.addImm32(@bitCast(lowest * -1));
+            try cg.addTag(.i32_add);
         } else if (lowest > 0) {
             // make the index start from 0 by substracting the lowest value
-            try func.addImm32(@bitCast(lowest));
-            try func.addTag(.i32_sub);
+            try cg.addImm32(@bitCast(lowest));
+            try cg.addTag(.i32_sub);
         }
 
         // Account for default branch so always add '1'
         const depth = @as(u32, @intCast(highest - lowest + @intFromBool(has_else_body))) + 1;
         const jump_table: Mir.JumpTable = .{ .length = depth };
-        const table_extra_index = try func.addExtra(jump_table);
-        try func.addInst(.{ .tag = .br_table, .data = .{ .payload = table_extra_index } });
-        try func.mir_extra.ensureUnusedCapacity(func.gpa, depth);
+        const table_extra_index = try cg.addExtra(jump_table);
+        try cg.addInst(.{ .tag = .br_table, .data = .{ .payload = table_extra_index } });
+        try cg.mir_extra.ensureUnusedCapacity(cg.gpa, depth);
         var value = lowest;
         while (value <= highest) : (value += 1) {
             // idx represents the branch we jump to
@@ -4104,78 +4102,78 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                 // by using a jump table for this instead of if-else chains.
                 break :blk if (has_else_body or target_ty.zigTypeTag(zcu) == .error_set) switch_br.cases_len else unreachable;
             };
-            func.mir_extra.appendAssumeCapacity(idx);
+            cg.mir_extra.appendAssumeCapacity(idx);
         } else if (has_else_body) {
-            func.mir_extra.appendAssumeCapacity(switch_br.cases_len); // default branch
+            cg.mir_extra.appendAssumeCapacity(switch_br.cases_len); // default branch
         }
-        try func.endBlock();
+        try cg.endBlock();
     }
 
-    try func.branches.ensureUnusedCapacity(func.gpa, case_list.items.len + @intFromBool(has_else_body));
+    try cg.branches.ensureUnusedCapacity(cg.gpa, case_list.items.len + @intFromBool(has_else_body));
     for (case_list.items, 0..) |case, index| {
         // when sparse, we use if/else-chain, so emit conditional checks
         if (is_sparse) {
             // for single value prong we can emit a simple condition
             if (case.values.len == 1 and case.values[0] == .singular) {
-                const val = try func.lowerConstant(case.values[0].singular.value, target_ty);
+                const val = try cg.lowerConstant(case.values[0].singular.value, target_ty);
                 // not equal, because we want to jump out of this block if it does not match the condition.
-                _ = try func.cmp(target, val, target_ty, .neq);
-                try func.addLabel(.br_if, 0);
+                _ = try cg.cmp(target, val, target_ty, .neq);
+                try cg.addLabel(.br_if, 0);
             } else {
                 // in multi-value prongs we must check if any prongs match the target value.
-                try func.startBlock(.block, blocktype);
+                try cg.startBlock(.block, blocktype);
                 for (case.values) |value| {
                     switch (value) {
                         .singular => |single_val| {
-                            const val = try func.lowerConstant(single_val.value, target_ty);
-                            _ = try func.cmp(target, val, target_ty, .eq);
+                            const val = try cg.lowerConstant(single_val.value, target_ty);
+                            _ = try cg.cmp(target, val, target_ty, .eq);
                         },
                         .range => |range| {
-                            const min_val = try func.lowerConstant(range.min_value, target_ty);
-                            const max_val = try func.lowerConstant(range.max_value, target_ty);
+                            const min_val = try cg.lowerConstant(range.min_value, target_ty);
+                            const max_val = try cg.lowerConstant(range.max_value, target_ty);
 
-                            const gte = try func.cmp(target, min_val, target_ty, .gte);
-                            const lte = try func.cmp(target, max_val, target_ty, .lte);
-                            _ = try func.binOp(gte, lte, Type.bool, .@"and");
+                            const gte = try cg.cmp(target, min_val, target_ty, .gte);
+                            const lte = try cg.cmp(target, max_val, target_ty, .lte);
+                            _ = try cg.binOp(gte, lte, Type.bool, .@"and");
                         },
                     }
-                    try func.addLabel(.br_if, 0);
+                    try cg.addLabel(.br_if, 0);
                 }
                 // value did not match any of the prong values
-                try func.addLabel(.br, 1);
-                try func.endBlock();
+                try cg.addLabel(.br, 1);
+                try cg.endBlock();
             }
         }
-        func.branches.appendAssumeCapacity(.{});
-        try func.currentBranch().values.ensureUnusedCapacity(func.gpa, liveness.deaths[index].len);
+        cg.branches.appendAssumeCapacity(.{});
+        try cg.currentBranch().values.ensureUnusedCapacity(cg.gpa, liveness.deaths[index].len);
         defer {
-            var case_branch = func.branches.pop();
-            case_branch.deinit(func.gpa);
+            var case_branch = cg.branches.pop();
+            case_branch.deinit(cg.gpa);
         }
-        try func.genBody(case.body);
-        try func.endBlock();
+        try cg.genBody(case.body);
+        try cg.endBlock();
     }
 
     if (has_else_body) {
-        func.branches.appendAssumeCapacity(.{});
+        cg.branches.appendAssumeCapacity(.{});
         const else_deaths = liveness.deaths.len - 1;
-        try func.currentBranch().values.ensureUnusedCapacity(func.gpa, liveness.deaths[else_deaths].len);
+        try cg.currentBranch().values.ensureUnusedCapacity(cg.gpa, liveness.deaths[else_deaths].len);
         defer {
-            var else_branch = func.branches.pop();
-            else_branch.deinit(func.gpa);
+            var else_branch = cg.branches.pop();
+            else_branch.deinit(cg.gpa);
         }
-        try func.genBody(else_body);
-        try func.endBlock();
+        try cg.genBody(else_body);
+        try cg.endBlock();
     }
-    return func.finishAir(inst, .none, &.{});
+    return cg.finishAir(inst, .none, &.{});
 }
 
-fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: std.wasm.Opcode) InnerError!void {
-    const pt = func.pt;
+fn airIsErr(cg: *CodeGen, inst: Air.Inst.Index, opcode: std.wasm.Opcode) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
-    const operand = try func.resolveInst(un_op);
-    const err_union_ty = func.typeOf(un_op);
+    const un_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
+    const operand = try cg.resolveInst(un_op);
+    const err_union_ty = cg.typeOf(un_op);
     const pl_ty = err_union_ty.errorUnionPayload(zcu);
 
     const result: WValue = result: {
@@ -4187,57 +4185,57 @@ fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: std.wasm.Opcode) Inner
             }
         }
 
-        try func.emitWValue(operand);
+        try cg.emitWValue(operand);
         if (pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
-            try func.addMemArg(.i32_load16_u, .{
+            try cg.addMemArg(.i32_load16_u, .{
                 .offset = operand.offset() + @as(u32, @intCast(errUnionErrorOffset(pl_ty, zcu))),
                 .alignment = @intCast(Type.anyerror.abiAlignment(zcu).toByteUnits().?),
             });
         }
 
         // Compare the error value with '0'
-        try func.addImm32(0);
-        try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
+        try cg.addImm32(0);
+        try cg.addTag(Mir.Inst.Tag.fromOpcode(opcode));
         break :result .stack;
     };
-    return func.finishAir(inst, result, &.{un_op});
+    return cg.finishAir(inst, result, &.{un_op});
 }
 
-fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!void {
-    const pt = func.pt;
+fn airUnwrapErrUnionPayload(cg: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+    const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
-    const operand = try func.resolveInst(ty_op.operand);
-    const op_ty = func.typeOf(ty_op.operand);
+    const operand = try cg.resolveInst(ty_op.operand);
+    const op_ty = cg.typeOf(ty_op.operand);
     const err_ty = if (op_is_ptr) op_ty.childType(zcu) else op_ty;
     const payload_ty = err_ty.errorUnionPayload(zcu);
 
     const result: WValue = result: {
         if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
             if (op_is_ptr) {
-                break :result func.reuseOperand(ty_op.operand, operand);
+                break :result cg.reuseOperand(ty_op.operand, operand);
             }
             break :result .none;
         }
 
         const pl_offset = @as(u32, @intCast(errUnionPayloadOffset(payload_ty, zcu)));
-        if (op_is_ptr or isByRef(payload_ty, pt, func.target)) {
-            break :result try func.buildPointerOffset(operand, pl_offset, .new);
+        if (op_is_ptr or isByRef(payload_ty, pt, cg.target)) {
+            break :result try cg.buildPointerOffset(operand, pl_offset, .new);
         }
 
-        break :result try func.load(operand, payload_ty, pl_offset);
+        break :result try cg.load(operand, payload_ty, pl_offset);
     };
-    return func.finishAir(inst, result, &.{ty_op.operand});
+    return cg.finishAir(inst, result, &.{ty_op.operand});
 }
 
-fn airUnwrapErrUnionError(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!void {
-    const pt = func.pt;
+fn airUnwrapErrUnionError(cg: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+    const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
-    const operand = try func.resolveInst(ty_op.operand);
-    const op_ty = func.typeOf(ty_op.operand);
+    const operand = try cg.resolveInst(ty_op.operand);
+    const op_ty = cg.typeOf(ty_op.operand);
     const err_ty = if (op_is_ptr) op_ty.childType(zcu) else op_ty;
     const payload_ty = err_ty.errorUnionPayload(zcu);
 
@@ -4247,103 +4245,103 @@ fn airUnwrapErrUnionError(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool)
         }
 
         if (op_is_ptr or !payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
-            break :result func.reuseOperand(ty_op.operand, operand);
+            break :result cg.reuseOperand(ty_op.operand, operand);
         }
 
-        break :result try func.load(operand, Type.anyerror, @intCast(errUnionErrorOffset(payload_ty, zcu)));
+        break :result try cg.load(operand, Type.anyerror, @intCast(errUnionErrorOffset(payload_ty, zcu)));
     };
-    return func.finishAir(inst, result, &.{ty_op.operand});
+    return cg.finishAir(inst, result, &.{ty_op.operand});
 }
 
-fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const zcu = func.pt.zcu;
-    const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+fn airWrapErrUnionPayload(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const zcu = cg.pt.zcu;
+    const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
-    const operand = try func.resolveInst(ty_op.operand);
-    const err_ty = func.typeOfIndex(inst);
+    const operand = try cg.resolveInst(ty_op.operand);
+    const err_ty = cg.typeOfIndex(inst);
 
-    const pl_ty = func.typeOf(ty_op.operand);
+    const pl_ty = cg.typeOf(ty_op.operand);
     const result = result: {
         if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
-            break :result func.reuseOperand(ty_op.operand, operand);
+            break :result cg.reuseOperand(ty_op.operand, operand);
         }
 
-        const err_union = try func.allocStack(err_ty);
-        const payload_ptr = try func.buildPointerOffset(err_union, @as(u32, @intCast(errUnionPayloadOffset(pl_ty, zcu))), .new);
-        try func.store(payload_ptr, operand, pl_ty, 0);
+        const err_union = try cg.allocStack(err_ty);
+        const payload_ptr = try cg.buildPointerOffset(err_union, @as(u32, @intCast(errUnionPayloadOffset(pl_ty, zcu))), .new);
+        try cg.store(payload_ptr, operand, pl_ty, 0);
 
         // ensure we also write '0' to the error part, so any present stack value gets overwritten by it.
-        try func.emitWValue(err_union);
-        try func.addImm32(0);
+        try cg.emitWValue(err_union);
+        try cg.addImm32(0);
         const err_val_offset: u32 = @intCast(errUnionErrorOffset(pl_ty, zcu));
-        try func.addMemArg(.i32_store16, .{
+        try cg.addMemArg(.i32_store16, .{
             .offset = err_union.offset() + err_val_offset,
             .alignment = 2,
         });
         break :result err_union;
     };
-    return func.finishAir(inst, result, &.{ty_op.operand});
+    return cg.finishAir(inst, result, &.{ty_op.operand});
 }
 
-fn airWrapErrUnionErr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
+fn airWrapErrUnionErr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+    const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
-    const operand = try func.resolveInst(ty_op.operand);
+    const operand = try cg.resolveInst(ty_op.operand);
     const err_ty = ty_op.ty.toType();
     const pl_ty = err_ty.errorUnionPayload(zcu);
 
     const result = result: {
         if (!pl_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
-            break :result func.reuseOperand(ty_op.operand, operand);
+            break :result cg.reuseOperand(ty_op.operand, operand);
         }
 
-        const err_union = try func.allocStack(err_ty);
+        const err_union = try cg.allocStack(err_ty);
         // store error value
-        try func.store(err_union, operand, Type.anyerror, @intCast(errUnionErrorOffset(pl_ty, zcu)));
+        try cg.store(err_union, operand, Type.anyerror, @intCast(errUnionErrorOffset(pl_ty, zcu)));
 
         // write 'undefined' to the payload
-        const payload_ptr = try func.buildPointerOffset(err_union, @as(u32, @intCast(errUnionPayloadOffset(pl_ty, zcu))), .new);
+        const payload_ptr = try cg.buildPointerOffset(err_union, @as(u32, @intCast(errUnionPayloadOffset(pl_ty, zcu))), .new);
         const len = @as(u32, @intCast(err_ty.errorUnionPayload(zcu).abiSize(zcu)));
-        try func.memset(Type.u8, payload_ptr, .{ .imm32 = len }, .{ .imm32 = 0xaa });
+        try cg.memset(Type.u8, payload_ptr, .{ .imm32 = len }, .{ .imm32 = 0xaa });
 
         break :result err_union;
     };
-    return func.finishAir(inst, result, &.{ty_op.operand});
+    return cg.finishAir(inst, result, &.{ty_op.operand});
 }
 
-fn airIntcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+fn airIntcast(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
     const ty = ty_op.ty.toType();
-    const operand = try func.resolveInst(ty_op.operand);
-    const operand_ty = func.typeOf(ty_op.operand);
-    const pt = func.pt;
+    const operand = try cg.resolveInst(ty_op.operand);
+    const operand_ty = cg.typeOf(ty_op.operand);
+    const pt = cg.pt;
     const zcu = pt.zcu;
     if (ty.zigTypeTag(zcu) == .vector or operand_ty.zigTypeTag(zcu) == .vector) {
-        return func.fail("todo Wasm intcast for vectors", .{});
+        return cg.fail("todo Wasm intcast for vectors", .{});
     }
     if (ty.abiSize(zcu) > 16 or operand_ty.abiSize(zcu) > 16) {
-        return func.fail("todo Wasm intcast for bitsize > 128", .{});
+        return cg.fail("todo Wasm intcast for bitsize > 128", .{});
     }
 
     const op_bits = toWasmBits(@intCast(operand_ty.bitSize(zcu))).?;
     const wanted_bits = toWasmBits(@intCast(ty.bitSize(zcu))).?;
     const result = if (op_bits == wanted_bits)
-        func.reuseOperand(ty_op.operand, operand)
+        cg.reuseOperand(ty_op.operand, operand)
     else
-        try func.intcast(operand, operand_ty, ty);
+        try cg.intcast(operand, operand_ty, ty);
 
-    return func.finishAir(inst, result, &.{ty_op.operand});
+    return cg.finishAir(inst, result, &.{ty_op.operand});
 }
 
 /// Upcasts or downcasts an integer based on the given and wanted types,
 /// and stores the result in a new operand.
 /// Asserts type's bitsize <= 128
 /// NOTE: May leave the result on the top of the stack.
-fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!WValue {
-    const pt = func.pt;
+fn intcast(cg: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!WValue {
+    const pt = cg.pt;
     const zcu = pt.zcu;
     const given_bitsize = @as(u16, @intCast(given.bitSize(zcu)));
     const wanted_bitsize = @as(u16, @intCast(wanted.bitSize(zcu)));
@@ -4357,469 +4355,469 @@ fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro
     }
 
     if (op_bits == 64 and wanted_bits == 32) {
-        try func.emitWValue(operand);
-        try func.addTag(.i32_wrap_i64);
+        try cg.emitWValue(operand);
+        try cg.addTag(.i32_wrap_i64);
         return .stack;
     } else if (op_bits == 32 and wanted_bits == 64) {
-        try func.emitWValue(operand);
-        try func.addTag(if (wanted.isSignedInt(zcu)) .i64_extend_i32_s else .i64_extend_i32_u);
+        try cg.emitWValue(operand);
+        try cg.addTag(if (wanted.isSignedInt(zcu)) .i64_extend_i32_s else .i64_extend_i32_u);
         return .stack;
     } else if (wanted_bits == 128) {
         // for 128bit integers we store the integer in the virtual stack, rather than a local
-        const stack_ptr = try func.allocStack(wanted);
-        try func.emitWValue(stack_ptr);
+        const stack_ptr = try cg.allocStack(wanted);
+        try cg.emitWValue(stack_ptr);
 
         // for 32 bit integers, we first coerce the value into a 64 bit integer before storing it
         // meaning less store operations are required.
         const lhs = if (op_bits == 32) blk: {
             const sign_ty = if (wanted.isSignedInt(zcu)) Type.i64 else Type.u64;
-            break :blk try (try func.intcast(operand, given, sign_ty)).toLocal(func, sign_ty);
+            break :blk try (try cg.intcast(operand, given, sign_ty)).toLocal(cg, sign_ty);
         } else operand;
 
         // store lsb first
-        try func.store(.stack, lhs, Type.u64, 0 + stack_ptr.offset());
+        try cg.store(.stack, lhs, Type.u64, 0 + stack_ptr.offset());
 
         // For signed integers we shift lsb by 63 (64bit integer - 1 sign bit) and store remaining value
         if (wanted.isSignedInt(zcu)) {
-            try func.emitWValue(stack_ptr);
-            const shr = try func.binOp(lhs, .{ .imm64 = 63 }, Type.i64, .shr);
-            try func.store(.stack, shr, Type.u64, 8 + stack_ptr.offset());
+            try cg.emitWValue(stack_ptr);
+            const shr = try cg.binOp(lhs, .{ .imm64 = 63 }, Type.i64, .shr);
+            try cg.store(.stack, shr, Type.u64, 8 + stack_ptr.offset());
         } else {
             // Ensure memory of msb is zero'd
-            try func.store(stack_ptr, .{ .imm64 = 0 }, Type.u64, 8);
+            try cg.store(stack_ptr, .{ .imm64 = 0 }, Type.u64, 8);
         }
         return stack_ptr;
-    } else return func.load(operand, wanted, 0);
+    } else return cg.load(operand, wanted, 0);
 }
 
-fn airIsNull(func: *CodeGen, inst: Air.Inst.Index, opcode: std.wasm.Opcode, op_kind: enum { value, ptr }) InnerError!void {
-    const pt = func.pt;
+fn airIsNull(cg: *CodeGen, inst: Air.Inst.Index, opcode: std.wasm.Opcode, op_kind: enum { value, ptr }) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
-    const operand = try func.resolveInst(un_op);
+    const un_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
+    const operand = try cg.resolveInst(un_op);
 
-    const op_ty = func.typeOf(un_op);
+    const op_ty = cg.typeOf(un_op);
     const optional_ty = if (op_kind == .ptr) op_ty.childType(zcu) else op_ty;
-    const result = try func.isNull(operand, optional_ty, opcode);
-    return func.finishAir(inst, result, &.{un_op});
+    const result = try cg.isNull(operand, optional_ty, opcode);
+    return cg.finishAir(inst, result, &.{un_op});
 }
 
 /// For a given type and operand, checks if it's considered `null`.
 /// NOTE: Leaves the result on the stack
-fn isNull(func: *CodeGen, operand: WValue, optional_ty: Type, opcode: std.wasm.Opcode) InnerError!WValue {
-    const pt = func.pt;
+fn isNull(cg: *CodeGen, operand: WValue, optional_ty: Type, opcode: std.wasm.Opcode) InnerError!WValue {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    try func.emitWValue(operand);
+    try cg.emitWValue(operand);
     const payload_ty = optional_ty.optionalChild(zcu);
     if (!optional_ty.optionalReprIsPayload(zcu)) {
         // When payload is zero-bits, we can treat operand as a value, rather than
         // a pointer to the stack value
         if (payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
             const offset = std.math.cast(u32, payload_ty.abiSize(zcu)) orelse {
-                return func.fail("Optional type {} too big to fit into stack frame", .{optional_ty.fmt(pt)});
+                return cg.fail("Optional type {} too big to fit into stack frame", .{optional_ty.fmt(pt)});
             };
-            try func.addMemArg(.i32_load8_u, .{ .offset = operand.offset() + offset, .alignment = 1 });
+            try cg.addMemArg(.i32_load8_u, .{ .offset = operand.offset() + offset, .alignment = 1 });
         }
     } else if (payload_ty.isSlice(zcu)) {
-        switch (func.ptr_size) {
-            .wasm32 => try func.addMemArg(.i32_load, .{ .offset = operand.offset(), .alignment = 4 }),
-            .wasm64 => try func.addMemArg(.i64_load, .{ .offset = operand.offset(), .alignment = 8 }),
+        switch (cg.ptr_size) {
+            .wasm32 => try cg.addMemArg(.i32_load, .{ .offset = operand.offset(), .alignment = 4 }),
+            .wasm64 => try cg.addMemArg(.i64_load, .{ .offset = operand.offset(), .alignment = 8 }),
         }
     }
 
     // Compare the null value with '0'
-    try func.addImm32(0);
-    try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
+    try cg.addImm32(0);
+    try cg.addTag(Mir.Inst.Tag.fromOpcode(opcode));
 
     return .stack;
 }
 
-fn airOptionalPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
+fn airOptionalPayload(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
-    const opt_ty = func.typeOf(ty_op.operand);
-    const payload_ty = func.typeOfIndex(inst);
+    const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+    const opt_ty = cg.typeOf(ty_op.operand);
+    const payload_ty = cg.typeOfIndex(inst);
     if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
-        return func.finishAir(inst, .none, &.{ty_op.operand});
+        return cg.finishAir(inst, .none, &.{ty_op.operand});
     }
 
     const result = result: {
-        const operand = try func.resolveInst(ty_op.operand);
-        if (opt_ty.optionalReprIsPayload(zcu)) break :result func.reuseOperand(ty_op.operand, operand);
+        const operand = try cg.resolveInst(ty_op.operand);
+        if (opt_ty.optionalReprIsPayload(zcu)) break :result cg.reuseOperand(ty_op.operand, operand);
 
-        if (isByRef(payload_ty, pt, func.target)) {
-            break :result try func.buildPointerOffset(operand, 0, .new);
+        if (isByRef(payload_ty, pt, cg.target)) {
+            break :result try cg.buildPointerOffset(operand, 0, .new);
         }
 
-        break :result try func.load(operand, payload_ty, 0);
+        break :result try cg.load(operand, payload_ty, 0);
     };
-    return func.finishAir(inst, result, &.{ty_op.operand});
+    return cg.finishAir(inst, result, &.{ty_op.operand});
 }
 
-fn airOptionalPayloadPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
+fn airOptionalPayloadPtr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
-    const operand = try func.resolveInst(ty_op.operand);
-    const opt_ty = func.typeOf(ty_op.operand).childType(zcu);
+    const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+    const operand = try cg.resolveInst(ty_op.operand);
+    const opt_ty = cg.typeOf(ty_op.operand).childType(zcu);
 
     const result = result: {
         const payload_ty = opt_ty.optionalChild(zcu);
         if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu) or opt_ty.optionalReprIsPayload(zcu)) {
-            break :result func.reuseOperand(ty_op.operand, operand);
+            break :result cg.reuseOperand(ty_op.operand, operand);
         }
 
-        break :result try func.buildPointerOffset(operand, 0, .new);
+        break :result try cg.buildPointerOffset(operand, 0, .new);
     };
-    return func.finishAir(inst, result, &.{ty_op.operand});
+    return cg.finishAir(inst, result, &.{ty_op.operand});
 }
 
-fn airOptionalPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
+fn airOptionalPayloadPtrSet(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
-    const operand = try func.resolveInst(ty_op.operand);
-    const opt_ty = func.typeOf(ty_op.operand).childType(zcu);
+    const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+    const operand = try cg.resolveInst(ty_op.operand);
+    const opt_ty = cg.typeOf(ty_op.operand).childType(zcu);
     const payload_ty = opt_ty.optionalChild(zcu);
     if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
-        return func.fail("TODO: Implement OptionalPayloadPtrSet for optional with zero-sized type {}", .{payload_ty.fmtDebug()});
+        return cg.fail("TODO: Implement OptionalPayloadPtrSet for optional with zero-sized type {}", .{payload_ty.fmtDebug()});
     }
 
     if (opt_ty.optionalReprIsPayload(zcu)) {
-        return func.finishAir(inst, operand, &.{ty_op.operand});
+        return cg.finishAir(inst, operand, &.{ty_op.operand});
     }
 
     const offset = std.math.cast(u32, payload_ty.abiSize(zcu)) orelse {
-        return func.fail("Optional type {} too big to fit into stack frame", .{opt_ty.fmt(pt)});
+        return cg.fail("Optional type {} too big to fit into stack frame", .{opt_ty.fmt(pt)});
     };
 
-    try func.emitWValue(operand);
-    try func.addImm32(1);
-    try func.addMemArg(.i32_store8, .{ .offset = operand.offset() + offset, .alignment = 1 });
+    try cg.emitWValue(operand);
+    try cg.addImm32(1);
+    try cg.addMemArg(.i32_store8, .{ .offset = operand.offset() + offset, .alignment = 1 });
 
-    const result = try func.buildPointerOffset(operand, 0, .new);
-    return func.finishAir(inst, result, &.{ty_op.operand});
+    const result = try cg.buildPointerOffset(operand, 0, .new);
+    return cg.finishAir(inst, result, &.{ty_op.operand});
 }
 
-fn airWrapOptional(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
-    const payload_ty = func.typeOf(ty_op.operand);
-    const pt = func.pt;
+fn airWrapOptional(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+    const payload_ty = cg.typeOf(ty_op.operand);
+    const pt = cg.pt;
     const zcu = pt.zcu;
 
     const result = result: {
         if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
-            const non_null_bit = try func.allocStack(Type.u1);
-            try func.emitWValue(non_null_bit);
-            try func.addImm32(1);
-            try func.addMemArg(.i32_store8, .{ .offset = non_null_bit.offset(), .alignment = 1 });
+            const non_null_bit = try cg.allocStack(Type.u1);
+            try cg.emitWValue(non_null_bit);
+            try cg.addImm32(1);
+            try cg.addMemArg(.i32_store8, .{ .offset = non_null_bit.offset(), .alignment = 1 });
             break :result non_null_bit;
         }
 
-        const operand = try func.resolveInst(ty_op.operand);
-        const op_ty = func.typeOfIndex(inst);
+        const operand = try cg.resolveInst(ty_op.operand);
+        const op_ty = cg.typeOfIndex(inst);
         if (op_ty.optionalReprIsPayload(zcu)) {
-            break :result func.reuseOperand(ty_op.operand, operand);
+            break :result cg.reuseOperand(ty_op.operand, operand);
         }
         const offset = std.math.cast(u32, payload_ty.abiSize(zcu)) orelse {
-            return func.fail("Optional type {} too big to fit into stack frame", .{op_ty.fmt(pt)});
+            return cg.fail("Optional type {} too big to fit into stack frame", .{op_ty.fmt(pt)});
         };
 
         // Create optional type, set the non-null bit, and store the operand inside the optional type
-        const result_ptr = try func.allocStack(op_ty);
-        try func.emitWValue(result_ptr);
-        try func.addImm32(1);
-        try func.addMemArg(.i32_store8, .{ .offset = result_ptr.offset() + offset, .alignment = 1 });
+        const result_ptr = try cg.allocStack(op_ty);
+        try cg.emitWValue(result_ptr);
+        try cg.addImm32(1);
+        try cg.addMemArg(.i32_store8, .{ .offset = result_ptr.offset() + offset, .alignment = 1 });
 
-        const payload_ptr = try func.buildPointerOffset(result_ptr, 0, .new);
-        try func.store(payload_ptr, operand, payload_ty, 0);
+        const payload_ptr = try cg.buildPointerOffset(result_ptr, 0, .new);
+        try cg.store(payload_ptr, operand, payload_ty, 0);
         break :result result_ptr;
     };
 
-    return func.finishAir(inst, result, &.{ty_op.operand});
+    return cg.finishAir(inst, result, &.{ty_op.operand});
 }
 
-fn airSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
-    const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data;
+fn airSlice(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+    const bin_op = cg.air.extraData(Air.Bin, ty_pl.payload).data;
 
-    const lhs = try func.resolveInst(bin_op.lhs);
-    const rhs = try func.resolveInst(bin_op.rhs);
-    const slice_ty = func.typeOfIndex(inst);
+    const lhs = try cg.resolveInst(bin_op.lhs);
+    const rhs = try cg.resolveInst(bin_op.rhs);
+    const slice_ty = cg.typeOfIndex(inst);
 
-    const slice = try func.allocStack(slice_ty);
-    try func.store(slice, lhs, Type.usize, 0);
-    try func.store(slice, rhs, Type.usize, func.ptrSize());
+    const slice = try cg.allocStack(slice_ty);
+    try cg.store(slice, lhs, Type.usize, 0);
+    try cg.store(slice, rhs, Type.usize, cg.ptrSize());
 
-    return func.finishAir(inst, slice, &.{ bin_op.lhs, bin_op.rhs });
+    return cg.finishAir(inst, slice, &.{ bin_op.lhs, bin_op.rhs });
 }
 
-fn airSliceLen(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+fn airSliceLen(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
-    const operand = try func.resolveInst(ty_op.operand);
-    return func.finishAir(inst, try func.sliceLen(operand), &.{ty_op.operand});
+    const operand = try cg.resolveInst(ty_op.operand);
+    return cg.finishAir(inst, try cg.sliceLen(operand), &.{ty_op.operand});
 }
 
-fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
+fn airSliceElemVal(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+    const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
 
-    const slice_ty = func.typeOf(bin_op.lhs);
-    const slice = try func.resolveInst(bin_op.lhs);
-    const index = try func.resolveInst(bin_op.rhs);
+    const slice_ty = cg.typeOf(bin_op.lhs);
+    const slice = try cg.resolveInst(bin_op.lhs);
+    const index = try cg.resolveInst(bin_op.rhs);
     const elem_ty = slice_ty.childType(zcu);
     const elem_size = elem_ty.abiSize(zcu);
 
     // load pointer onto stack
-    _ = try func.load(slice, Type.usize, 0);
+    _ = try cg.load(slice, Type.usize, 0);
 
     // calculate index into slice
-    try func.emitWValue(index);
-    try func.addImm32(@intCast(elem_size));
-    try func.addTag(.i32_mul);
-    try func.addTag(.i32_add);
+    try cg.emitWValue(index);
+    try cg.addImm32(@intCast(elem_size));
+    try cg.addTag(.i32_mul);
+    try cg.addTag(.i32_add);
 
-    const elem_result = if (isByRef(elem_ty, pt, func.target))
+    const elem_result = if (isByRef(elem_ty, pt, cg.target))
         .stack
     else
-        try func.load(.stack, elem_ty, 0);
+        try cg.load(.stack, elem_ty, 0);
 
-    return func.finishAir(inst, elem_result, &.{ bin_op.lhs, bin_op.rhs });
+    return cg.finishAir(inst, elem_result, &.{ bin_op.lhs, bin_op.rhs });
 }
 
-fn airSliceElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
+fn airSliceElemPtr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
-    const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data;
+    const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+    const bin_op = cg.air.extraData(Air.Bin, ty_pl.payload).data;
 
     const elem_ty = ty_pl.ty.toType().childType(zcu);
     const elem_size = elem_ty.abiSize(zcu);
 
-    const slice = try func.resolveInst(bin_op.lhs);
-    const index = try func.resolveInst(bin_op.rhs);
+    const slice = try cg.resolveInst(bin_op.lhs);
+    const index = try cg.resolveInst(bin_op.rhs);
 
-    _ = try func.load(slice, Type.usize, 0);
+    _ = try cg.load(slice, Type.usize, 0);
 
     // calculate index into slice
-    try func.emitWValue(index);
-    try func.addImm32(@intCast(elem_size));
-    try func.addTag(.i32_mul);
-    try func.addTag(.i32_add);
+    try cg.emitWValue(index);
+    try cg.addImm32(@intCast(elem_size));
+    try cg.addTag(.i32_mul);
+    try cg.addTag(.i32_add);
 
-    return func.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs });
+    return cg.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs });
 }
 
-fn airSlicePtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
-    const operand = try func.resolveInst(ty_op.operand);
-    return func.finishAir(inst, try func.slicePtr(operand), &.{ty_op.operand});
+fn airSlicePtr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+    const operand = try cg.resolveInst(ty_op.operand);
+    return cg.finishAir(inst, try cg.slicePtr(operand), &.{ty_op.operand});
 }
 
-fn slicePtr(func: *CodeGen, operand: WValue) InnerError!WValue {
-    const ptr = try func.load(operand, Type.usize, 0);
-    return ptr.toLocal(func, Type.usize);
+fn slicePtr(cg: *CodeGen, operand: WValue) InnerError!WValue {
+    const ptr = try cg.load(operand, Type.usize, 0);
+    return ptr.toLocal(cg, Type.usize);
 }
 
-fn sliceLen(func: *CodeGen, operand: WValue) InnerError!WValue {
-    const len = try func.load(operand, Type.usize, func.ptrSize());
-    return len.toLocal(func, Type.usize);
+fn sliceLen(cg: *CodeGen, operand: WValue) InnerError!WValue {
+    const len = try cg.load(operand, Type.usize, cg.ptrSize());
+    return len.toLocal(cg, Type.usize);
 }
 
-fn airTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+fn airTrunc(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
-    const operand = try func.resolveInst(ty_op.operand);
+    const operand = try cg.resolveInst(ty_op.operand);
     const wanted_ty: Type = ty_op.ty.toType();
-    const op_ty = func.typeOf(ty_op.operand);
-    const pt = func.pt;
+    const op_ty = cg.typeOf(ty_op.operand);
+    const pt = cg.pt;
     const zcu = pt.zcu;
 
     if (wanted_ty.zigTypeTag(zcu) == .vector or op_ty.zigTypeTag(zcu) == .vector) {
-        return func.fail("TODO: trunc for vectors", .{});
+        return cg.fail("TODO: trunc for vectors", .{});
     }
 
     const result = if (op_ty.bitSize(zcu) == wanted_ty.bitSize(zcu))
-        func.reuseOperand(ty_op.operand, operand)
+        cg.reuseOperand(ty_op.operand, operand)
     else
-        try func.trunc(operand, wanted_ty, op_ty);
+        try cg.trunc(operand, wanted_ty, op_ty);
 
-    return func.finishAir(inst, result, &.{ty_op.operand});
+    return cg.finishAir(inst, result, &.{ty_op.operand});
 }
 
 /// Truncates a given operand to a given type, discarding any overflown bits.
 /// NOTE: Resulting value is left on the stack.
-fn trunc(func: *CodeGen, operand: WValue, wanted_ty: Type, given_ty: Type) InnerError!WValue {
-    const pt = func.pt;
+fn trunc(cg: *CodeGen, operand: WValue, wanted_ty: Type, given_ty: Type) InnerError!WValue {
+    const pt = cg.pt;
     const zcu = pt.zcu;
     const given_bits = @as(u16, @intCast(given_ty.bitSize(zcu)));
     if (toWasmBits(given_bits) == null) {
-        return func.fail("TODO: Implement wasm integer truncation for integer bitsize: {d}", .{given_bits});
+        return cg.fail("TODO: Implement wasm integer truncation for integer bitsize: {d}", .{given_bits});
     }
 
-    var result = try func.intcast(operand, given_ty, wanted_ty);
+    var result = try cg.intcast(operand, given_ty, wanted_ty);
     const wanted_bits = @as(u16, @intCast(wanted_ty.bitSize(zcu)));
     const wasm_bits = toWasmBits(wanted_bits).?;
     if (wasm_bits != wanted_bits) {
-        result = try func.wrapOperand(result, wanted_ty);
+        result = try cg.wrapOperand(result, wanted_ty);
     }
     return result;
 }
 
-fn airIntFromBool(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
-    const operand = try func.resolveInst(un_op);
-    const result = func.reuseOperand(un_op, operand);
+fn airIntFromBool(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const un_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
+    const operand = try cg.resolveInst(un_op);
+    const result = cg.reuseOperand(un_op, operand);
 
-    return func.finishAir(inst, result, &.{un_op});
+    return cg.finishAir(inst, result, &.{un_op});
 }
 
-fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
+fn airArrayToSlice(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+    const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
-    const operand = try func.resolveInst(ty_op.operand);
-    const array_ty = func.typeOf(ty_op.operand).childType(zcu);
+    const operand = try cg.resolveInst(ty_op.operand);
+    const array_ty = cg.typeOf(ty_op.operand).childType(zcu);
     const slice_ty = ty_op.ty.toType();
 
     // create a slice on the stack
-    const slice_local = try func.allocStack(slice_ty);
+    const slice_local = try cg.allocStack(slice_ty);
 
     // store the array ptr in the slice
     if (array_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
-        try func.store(slice_local, operand, Type.usize, 0);
+        try cg.store(slice_local, operand, Type.usize, 0);
     }
 
     // store the length of the array in the slice
     const array_len: u32 = @intCast(array_ty.arrayLen(zcu));
-    try func.store(slice_local, .{ .imm32 = array_len }, Type.usize, func.ptrSize());
+    try cg.store(slice_local, .{ .imm32 = array_len }, Type.usize, cg.ptrSize());
 
-    return func.finishAir(inst, slice_local, &.{ty_op.operand});
+    return cg.finishAir(inst, slice_local, &.{ty_op.operand});
 }
 
-fn airIntFromPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
+fn airIntFromPtr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
-    const operand = try func.resolveInst(un_op);
-    const ptr_ty = func.typeOf(un_op);
+    const un_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
+    const operand = try cg.resolveInst(un_op);
+    const ptr_ty = cg.typeOf(un_op);
     const result = if (ptr_ty.isSlice(zcu))
-        try func.slicePtr(operand)
+        try cg.slicePtr(operand)
     else switch (operand) {
         // for stack offset, return a pointer to this offset.
-        .stack_offset => try func.buildPointerOffset(operand, 0, .new),
-        else => func.reuseOperand(un_op, operand),
+        .stack_offset => try cg.buildPointerOffset(operand, 0, .new),
+        else => cg.reuseOperand(un_op, operand),
     };
-    return func.finishAir(inst, result, &.{un_op});
+    return cg.finishAir(inst, result, &.{un_op});
 }
 
-fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
+fn airPtrElemVal(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+    const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
 
-    const ptr_ty = func.typeOf(bin_op.lhs);
-    const ptr = try func.resolveInst(bin_op.lhs);
-    const index = try func.resolveInst(bin_op.rhs);
+    const ptr_ty = cg.typeOf(bin_op.lhs);
+    const ptr = try cg.resolveInst(bin_op.lhs);
+    const index = try cg.resolveInst(bin_op.rhs);
     const elem_ty = ptr_ty.childType(zcu);
     const elem_size = elem_ty.abiSize(zcu);
 
     // load pointer onto the stack
     if (ptr_ty.isSlice(zcu)) {
-        _ = try func.load(ptr, Type.usize, 0);
+        _ = try cg.load(ptr, Type.usize, 0);
     } else {
-        try func.lowerToStack(ptr);
+        try cg.lowerToStack(ptr);
     }
 
     // calculate index into slice
-    try func.emitWValue(index);
-    try func.addImm32(@intCast(elem_size));
-    try func.addTag(.i32_mul);
-    try func.addTag(.i32_add);
+    try cg.emitWValue(index);
+    try cg.addImm32(@intCast(elem_size));
+    try cg.addTag(.i32_mul);
+    try cg.addTag(.i32_add);
 
-    const elem_result = if (isByRef(elem_ty, pt, func.target))
+    const elem_result = if (isByRef(elem_ty, pt, cg.target))
         .stack
     else
-        try func.load(.stack, elem_ty, 0);
+        try cg.load(.stack, elem_ty, 0);
 
-    return func.finishAir(inst, elem_result, &.{ bin_op.lhs, bin_op.rhs });
+    return cg.finishAir(inst, elem_result, &.{ bin_op.lhs, bin_op.rhs });
 }
 
-fn airPtrElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
+fn airPtrElemPtr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
-    const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data;
+    const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+    const bin_op = cg.air.extraData(Air.Bin, ty_pl.payload).data;
 
-    const ptr_ty = func.typeOf(bin_op.lhs);
+    const ptr_ty = cg.typeOf(bin_op.lhs);
     const elem_ty = ty_pl.ty.toType().childType(zcu);
     const elem_size = elem_ty.abiSize(zcu);
 
-    const ptr = try func.resolveInst(bin_op.lhs);
-    const index = try func.resolveInst(bin_op.rhs);
+    const ptr = try cg.resolveInst(bin_op.lhs);
+    const index = try cg.resolveInst(bin_op.rhs);
 
     // load pointer onto the stack
     if (ptr_ty.isSlice(zcu)) {
-        _ = try func.load(ptr, Type.usize, 0);
+        _ = try cg.load(ptr, Type.usize, 0);
     } else {
-        try func.lowerToStack(ptr);
+        try cg.lowerToStack(ptr);
     }
 
     // calculate index into ptr
-    try func.emitWValue(index);
-    try func.addImm32(@intCast(elem_size));
-    try func.addTag(.i32_mul);
-    try func.addTag(.i32_add);
+    try cg.emitWValue(index);
+    try cg.addImm32(@intCast(elem_size));
+    try cg.addTag(.i32_mul);
+    try cg.addTag(.i32_add);
 
-    return func.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs });
+    return cg.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs });
 }
 
-fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
-    const pt = func.pt;
+fn airPtrBinOp(cg: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
-    const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data;
+    const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+    const bin_op = cg.air.extraData(Air.Bin, ty_pl.payload).data;
 
-    const ptr = try func.resolveInst(bin_op.lhs);
-    const offset = try func.resolveInst(bin_op.rhs);
-    const ptr_ty = func.typeOf(bin_op.lhs);
+    const ptr = try cg.resolveInst(bin_op.lhs);
+    const offset = try cg.resolveInst(bin_op.rhs);
+    const ptr_ty = cg.typeOf(bin_op.lhs);
     const pointee_ty = switch (ptr_ty.ptrSize(zcu)) {
         .One => ptr_ty.childType(zcu).childType(zcu), // ptr to array, so get array element type
         else => ptr_ty.childType(zcu),
     };
 
-    const valtype = typeToValtype(Type.usize, pt, func.target);
+    const valtype = typeToValtype(Type.usize, pt, cg.target);
     const mul_opcode = buildOpcode(.{ .valtype1 = valtype, .op = .mul });
     const bin_opcode = buildOpcode(.{ .valtype1 = valtype, .op = op });
 
-    try func.lowerToStack(ptr);
-    try func.emitWValue(offset);
-    try func.addImm32(@intCast(pointee_ty.abiSize(zcu)));
-    try func.addTag(Mir.Inst.Tag.fromOpcode(mul_opcode));
-    try func.addTag(Mir.Inst.Tag.fromOpcode(bin_opcode));
+    try cg.lowerToStack(ptr);
+    try cg.emitWValue(offset);
+    try cg.addImm32(@intCast(pointee_ty.abiSize(zcu)));
+    try cg.addTag(Mir.Inst.Tag.fromOpcode(mul_opcode));
+    try cg.addTag(Mir.Inst.Tag.fromOpcode(bin_opcode));
 
-    return func.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs });
+    return cg.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs });
 }
 
-fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void {
-    const pt = func.pt;
+fn airMemset(cg: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
     if (safety) {
         // TODO if the value is undef, write 0xaa bytes to dest
     } else {
         // TODO if the value is undef, don't lower this instruction
     }
-    const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+    const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
 
-    const ptr = try func.resolveInst(bin_op.lhs);
-    const ptr_ty = func.typeOf(bin_op.lhs);
-    const value = try func.resolveInst(bin_op.rhs);
+    const ptr = try cg.resolveInst(bin_op.lhs);
+    const ptr_ty = cg.typeOf(bin_op.lhs);
+    const value = try cg.resolveInst(bin_op.rhs);
     const len = switch (ptr_ty.ptrSize(zcu)) {
-        .Slice => try func.sliceLen(ptr),
+        .Slice => try cg.sliceLen(ptr),
         .One => @as(WValue, .{ .imm32 = @as(u32, @intCast(ptr_ty.childType(zcu).arrayLen(zcu))) }),
         .C, .Many => unreachable,
     };
@@ -4829,27 +4827,27 @@ fn airMemset(func: *CodeGen, inst: Air.Inst.Index, safety: bool) InnerError!void
     else
         ptr_ty.childType(zcu);
 
-    const dst_ptr = try func.sliceOrArrayPtr(ptr, ptr_ty);
-    try func.memset(elem_ty, dst_ptr, len, value);
+    const dst_ptr = try cg.sliceOrArrayPtr(ptr, ptr_ty);
+    try cg.memset(elem_ty, dst_ptr, len, value);
 
-    return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
+    return cg.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
 }
 
 /// Sets a region of memory at `ptr` to the value of `value`
 /// When the user has enabled the bulk_memory feature, we lower
 /// this to wasm's memset instruction. When the feature is not present,
 /// we implement it manually.
-fn memset(func: *CodeGen, elem_ty: Type, ptr: WValue, len: WValue, value: WValue) InnerError!void {
-    const pt = func.pt;
+fn memset(cg: *CodeGen, elem_ty: Type, ptr: WValue, len: WValue, value: WValue) InnerError!void {
+    const pt = cg.pt;
     const abi_size = @as(u32, @intCast(elem_ty.abiSize(pt.zcu)));
 
     // When bulk_memory is enabled, we lower it to wasm's memset instruction.
     // If not, we lower it ourselves.
-    if (std.Target.wasm.featureSetHas(func.target.cpu.features, .bulk_memory) and abi_size == 1) {
-        try func.lowerToStack(ptr);
-        try func.emitWValue(value);
-        try func.emitWValue(len);
-        try func.addExtended(.memory_fill);
+    if (std.Target.wasm.featureSetHas(cg.target.cpu.features, .bulk_memory) and abi_size == 1) {
+        try cg.lowerToStack(ptr);
+        try cg.emitWValue(value);
+        try cg.emitWValue(len);
+        try cg.addExtended(.memory_fill);
         return;
     }
 
@@ -4857,90 +4855,90 @@ fn memset(func: *CodeGen, elem_ty: Type, ptr: WValue, len: WValue, value: WValue
         .imm32 => |val| .{ .imm32 = val * abi_size },
         .imm64 => |val| .{ .imm64 = val * abi_size },
         else => if (abi_size != 1) blk: {
-            const new_len = try func.ensureAllocLocal(Type.usize);
-            try func.emitWValue(len);
-            switch (func.ptr_size) {
+            const new_len = try cg.ensureAllocLocal(Type.usize);
+            try cg.emitWValue(len);
+            switch (cg.ptr_size) {
                 .wasm32 => {
-                    try func.emitWValue(.{ .imm32 = abi_size });
-                    try func.addTag(.i32_mul);
+                    try cg.emitWValue(.{ .imm32 = abi_size });
+                    try cg.addTag(.i32_mul);
                 },
                 .wasm64 => {
-                    try func.emitWValue(.{ .imm64 = abi_size });
-                    try func.addTag(.i64_mul);
+                    try cg.emitWValue(.{ .imm64 = abi_size });
+                    try cg.addTag(.i64_mul);
                 },
             }
-            try func.addLabel(.local_set, new_len.local.value);
+            try cg.addLabel(.local_set, new_len.local.value);
             break :blk new_len;
         } else len,
     };
 
-    var end_ptr = try func.allocLocal(Type.usize);
-    defer end_ptr.free(func);
-    var new_ptr = try func.buildPointerOffset(ptr, 0, .new);
-    defer new_ptr.free(func);
+    var end_ptr = try cg.allocLocal(Type.usize);
+    defer end_ptr.free(cg);
+    var new_ptr = try cg.buildPointerOffset(ptr, 0, .new);
+    defer new_ptr.free(cg);
 
     // get the loop conditional: if current pointer address equals final pointer's address
-    try func.lowerToStack(ptr);
-    try func.emitWValue(final_len);
-    switch (func.ptr_size) {
-        .wasm32 => try func.addTag(.i32_add),
-        .wasm64 => try func.addTag(.i64_add),
+    try cg.lowerToStack(ptr);
+    try cg.emitWValue(final_len);
+    switch (cg.ptr_size) {
+        .wasm32 => try cg.addTag(.i32_add),
+        .wasm64 => try cg.addTag(.i64_add),
     }
-    try func.addLabel(.local_set, end_ptr.local.value);
+    try cg.addLabel(.local_set, end_ptr.local.value);
 
     // outer block to jump to when loop is done
-    try func.startBlock(.block, std.wasm.block_empty);
-    try func.startBlock(.loop, std.wasm.block_empty);
+    try cg.startBlock(.block, std.wasm.block_empty);
+    try cg.startBlock(.loop, std.wasm.block_empty);
 
     // check for condition for loop end
-    try func.emitWValue(new_ptr);
-    try func.emitWValue(end_ptr);
-    switch (func.ptr_size) {
-        .wasm32 => try func.addTag(.i32_eq),
-        .wasm64 => try func.addTag(.i64_eq),
+    try cg.emitWValue(new_ptr);
+    try cg.emitWValue(end_ptr);
+    switch (cg.ptr_size) {
+        .wasm32 => try cg.addTag(.i32_eq),
+        .wasm64 => try cg.addTag(.i64_eq),
     }
-    try func.addLabel(.br_if, 1); // jump out of loop into outer block (finished)
+    try cg.addLabel(.br_if, 1); // jump out of loop into outer block (finished)
 
     // store the value at the current position of the pointer
-    try func.store(new_ptr, value, elem_ty, 0);
+    try cg.store(new_ptr, value, elem_ty, 0);
 
     // move the pointer to the next element
-    try func.emitWValue(new_ptr);
-    switch (func.ptr_size) {
+    try cg.emitWValue(new_ptr);
+    switch (cg.ptr_size) {
         .wasm32 => {
-            try func.emitWValue(.{ .imm32 = abi_size });
-            try func.addTag(.i32_add);
+            try cg.emitWValue(.{ .imm32 = abi_size });
+            try cg.addTag(.i32_add);
         },
         .wasm64 => {
-            try func.emitWValue(.{ .imm64 = abi_size });
-            try func.addTag(.i64_add);
+            try cg.emitWValue(.{ .imm64 = abi_size });
+            try cg.addTag(.i64_add);
         },
     }
-    try func.addLabel(.local_set, new_ptr.local.value);
+    try cg.addLabel(.local_set, new_ptr.local.value);
 
     // end of loop
-    try func.addLabel(.br, 0); // jump to start of loop
-    try func.endBlock();
-    try func.endBlock();
+    try cg.addLabel(.br, 0); // jump to start of loop
+    try cg.endBlock();
+    try cg.endBlock();
 }
 
-fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
+fn airArrayElemVal(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+    const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
 
-    const array_ty = func.typeOf(bin_op.lhs);
-    const array = try func.resolveInst(bin_op.lhs);
-    const index = try func.resolveInst(bin_op.rhs);
+    const array_ty = cg.typeOf(bin_op.lhs);
+    const array = try cg.resolveInst(bin_op.lhs);
+    const index = try cg.resolveInst(bin_op.rhs);
     const elem_ty = array_ty.childType(zcu);
     const elem_size = elem_ty.abiSize(zcu);
 
-    if (isByRef(array_ty, pt, func.target)) {
-        try func.lowerToStack(array);
-        try func.emitWValue(index);
-        try func.addImm32(@intCast(elem_size));
-        try func.addTag(.i32_mul);
-        try func.addTag(.i32_add);
+    if (isByRef(array_ty, pt, cg.target)) {
+        try cg.lowerToStack(array);
+        try cg.emitWValue(index);
+        try cg.addImm32(@intCast(elem_size));
+        try cg.addTag(.i32_mul);
+        try cg.addTag(.i32_add);
     } else {
         assert(array_ty.zigTypeTag(zcu) == .vector);
 
@@ -4956,50 +4954,50 @@ fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 
                 var operands = [_]u32{ @intFromEnum(opcode), @as(u8, @intCast(lane)) };
 
-                try func.emitWValue(array);
+                try cg.emitWValue(array);
 
-                const extra_index = @as(u32, @intCast(func.mir_extra.items.len));
-                try func.mir_extra.appendSlice(func.gpa, &operands);
-                try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
+                const extra_index = @as(u32, @intCast(cg.mir_extra.items.len));
+                try cg.mir_extra.appendSlice(cg.gpa, &operands);
+                try cg.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
 
-                return func.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs });
+                return cg.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs });
             },
             else => {
-                const stack_vec = try func.allocStack(array_ty);
-                try func.store(stack_vec, array, array_ty, 0);
+                const stack_vec = try cg.allocStack(array_ty);
+                try cg.store(stack_vec, array, array_ty, 0);
 
                 // Is a non-unrolled vector (v128)
-                try func.lowerToStack(stack_vec);
-                try func.emitWValue(index);
-                try func.addImm32(@intCast(elem_size));
-                try func.addTag(.i32_mul);
-                try func.addTag(.i32_add);
+                try cg.lowerToStack(stack_vec);
+                try cg.emitWValue(index);
+                try cg.addImm32(@intCast(elem_size));
+                try cg.addTag(.i32_mul);
+                try cg.addTag(.i32_add);
             },
         }
     }
 
-    const elem_result = if (isByRef(elem_ty, pt, func.target))
+    const elem_result = if (isByRef(elem_ty, pt, cg.target))
         .stack
     else
-        try func.load(.stack, elem_ty, 0);
+        try cg.load(.stack, elem_ty, 0);
 
-    return func.finishAir(inst, elem_result, &.{ bin_op.lhs, bin_op.rhs });
+    return cg.finishAir(inst, elem_result, &.{ bin_op.lhs, bin_op.rhs });
 }
 
-fn airIntFromFloat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
+fn airIntFromFloat(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+    const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
-    const operand = try func.resolveInst(ty_op.operand);
-    const op_ty = func.typeOf(ty_op.operand);
-    const op_bits = op_ty.floatBits(func.target.*);
+    const operand = try cg.resolveInst(ty_op.operand);
+    const op_ty = cg.typeOf(ty_op.operand);
+    const op_bits = op_ty.floatBits(cg.target.*);
 
-    const dest_ty = func.typeOfIndex(inst);
+    const dest_ty = cg.typeOfIndex(inst);
     const dest_info = dest_ty.intInfo(zcu);
 
     if (dest_info.bits > 128) {
-        return func.fail("TODO: intFromFloat for integers/floats with bitsize {}", .{dest_info.bits});
+        return cg.fail("TODO: intFromFloat for integers/floats with bitsize {}", .{dest_info.bits});
     }
 
     if ((op_bits != 32 and op_bits != 64) or dest_info.bits > 64) {
@@ -5015,36 +5013,36 @@ fn airIntFromFloat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
             target_util.compilerRtIntAbbrev(dest_bitsize),
         }) catch unreachable;
 
-        const result = try func.callIntrinsic(fn_name, &.{op_ty.ip_index}, dest_ty, &.{operand});
-        return func.finishAir(inst, result, &.{ty_op.operand});
+        const result = try cg.callIntrinsic(fn_name, &.{op_ty.ip_index}, dest_ty, &.{operand});
+        return cg.finishAir(inst, result, &.{ty_op.operand});
     }
 
-    try func.emitWValue(operand);
+    try cg.emitWValue(operand);
     const op = buildOpcode(.{
         .op = .trunc,
-        .valtype1 = typeToValtype(dest_ty, pt, func.target),
-        .valtype2 = typeToValtype(op_ty, pt, func.target),
+        .valtype1 = typeToValtype(dest_ty, pt, cg.target),
+        .valtype2 = typeToValtype(op_ty, pt, cg.target),
         .signedness = dest_info.signedness,
     });
-    try func.addTag(Mir.Inst.Tag.fromOpcode(op));
-    const result = try func.wrapOperand(.stack, dest_ty);
-    return func.finishAir(inst, result, &.{ty_op.operand});
+    try cg.addTag(Mir.Inst.Tag.fromOpcode(op));
+    const result = try cg.wrapOperand(.stack, dest_ty);
+    return cg.finishAir(inst, result, &.{ty_op.operand});
 }
 
-fn airFloatFromInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
+fn airFloatFromInt(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+    const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
-    const operand = try func.resolveInst(ty_op.operand);
-    const op_ty = func.typeOf(ty_op.operand);
+    const operand = try cg.resolveInst(ty_op.operand);
+    const op_ty = cg.typeOf(ty_op.operand);
     const op_info = op_ty.intInfo(zcu);
 
-    const dest_ty = func.typeOfIndex(inst);
-    const dest_bits = dest_ty.floatBits(func.target.*);
+    const dest_ty = cg.typeOfIndex(inst);
+    const dest_bits = dest_ty.floatBits(cg.target.*);
 
     if (op_info.bits > 128) {
-        return func.fail("TODO: floatFromInt for integers/floats with bitsize {d} bits", .{op_info.bits});
+        return cg.fail("TODO: floatFromInt for integers/floats with bitsize {d} bits", .{op_info.bits});
     }
 
     if (op_info.bits > 64 or (dest_bits > 64 or dest_bits < 32)) {
@@ -5060,31 +5058,31 @@ fn airFloatFromInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
             target_util.compilerRtFloatAbbrev(dest_bits),
         }) catch unreachable;
 
-        const result = try func.callIntrinsic(fn_name, &.{op_ty.ip_index}, dest_ty, &.{operand});
-        return func.finishAir(inst, result, &.{ty_op.operand});
+        const result = try cg.callIntrinsic(fn_name, &.{op_ty.ip_index}, dest_ty, &.{operand});
+        return cg.finishAir(inst, result, &.{ty_op.operand});
     }
 
-    try func.emitWValue(operand);
+    try cg.emitWValue(operand);
     const op = buildOpcode(.{
         .op = .convert,
-        .valtype1 = typeToValtype(dest_ty, pt, func.target),
-        .valtype2 = typeToValtype(op_ty, pt, func.target),
+        .valtype1 = typeToValtype(dest_ty, pt, cg.target),
+        .valtype2 = typeToValtype(op_ty, pt, cg.target),
         .signedness = op_info.signedness,
     });
-    try func.addTag(Mir.Inst.Tag.fromOpcode(op));
+    try cg.addTag(Mir.Inst.Tag.fromOpcode(op));
 
-    return func.finishAir(inst, .stack, &.{ty_op.operand});
+    return cg.finishAir(inst, .stack, &.{ty_op.operand});
 }
 
-fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
+fn airSplat(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
-    const operand = try func.resolveInst(ty_op.operand);
-    const ty = func.typeOfIndex(inst);
+    const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+    const operand = try cg.resolveInst(ty_op.operand);
+    const ty = cg.typeOfIndex(inst);
     const elem_ty = ty.childType(zcu);
 
-    if (determineSimdStoreStrategy(ty, zcu, func.target) == .direct) blk: {
+    if (determineSimdStoreStrategy(ty, zcu, cg.target) == .direct) blk: {
         switch (operand) {
             // when the operand lives in the linear memory section, we can directly
             // load and splat the value at once. Meaning we do not first have to load
@@ -5097,16 +5095,16 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                     64 => @intFromEnum(std.wasm.SimdOpcode.v128_load64_splat),
                     else => break :blk, // Cannot make use of simd-instructions
                 };
-                try func.emitWValue(operand);
-                const extra_index: u32 = @intCast(func.mir_extra.items.len);
+                try cg.emitWValue(operand);
+                const extra_index: u32 = @intCast(cg.mir_extra.items.len);
                 // stores as := opcode, offset, alignment (opcode::memarg)
-                try func.mir_extra.appendSlice(func.gpa, &[_]u32{
+                try cg.mir_extra.appendSlice(cg.gpa, &[_]u32{
                     opcode,
                     operand.offset(),
                     @intCast(elem_ty.abiAlignment(zcu).toByteUnits().?),
                 });
-                try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
-                return func.finishAir(inst, .stack, &.{ty_op.operand});
+                try cg.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
+                return cg.finishAir(inst, .stack, &.{ty_op.operand});
             },
             .local => {
                 const opcode = switch (elem_ty.bitSize(zcu)) {
@@ -5116,11 +5114,11 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                     64 => if (elem_ty.isInt(zcu)) @intFromEnum(std.wasm.SimdOpcode.i64x2_splat) else @intFromEnum(std.wasm.SimdOpcode.f64x2_splat),
                     else => break :blk, // Cannot make use of simd-instructions
                 };
-                try func.emitWValue(operand);
-                const extra_index = @as(u32, @intCast(func.mir_extra.items.len));
-                try func.mir_extra.append(func.gpa, opcode);
-                try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
-                return func.finishAir(inst, .stack, &.{ty_op.operand});
+                try cg.emitWValue(operand);
+                const extra_index = @as(u32, @intCast(cg.mir_extra.items.len));
+                try cg.mir_extra.append(cg.gpa, opcode);
+                try cg.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
+                return cg.finishAir(inst, .stack, &.{ty_op.operand});
             },
             else => unreachable,
         }
@@ -5128,38 +5126,38 @@ fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const elem_size = elem_ty.bitSize(zcu);
     const vector_len = @as(usize, @intCast(ty.vectorLen(zcu)));
     if ((!std.math.isPowerOfTwo(elem_size) or elem_size % 8 != 0) and vector_len > 1) {
-        return func.fail("TODO: WebAssembly `@splat` for arbitrary element bitsize {d}", .{elem_size});
+        return cg.fail("TODO: WebAssembly `@splat` for arbitrary element bitsize {d}", .{elem_size});
     }
 
-    const result = try func.allocStack(ty);
+    const result = try cg.allocStack(ty);
     const elem_byte_size = @as(u32, @intCast(elem_ty.abiSize(zcu)));
     var index: usize = 0;
     var offset: u32 = 0;
     while (index < vector_len) : (index += 1) {
-        try func.store(result, operand, elem_ty, offset);
+        try cg.store(result, operand, elem_ty, offset);
         offset += elem_byte_size;
     }
 
-    return func.finishAir(inst, result, &.{ty_op.operand});
+    return cg.finishAir(inst, result, &.{ty_op.operand});
 }
 
-fn airSelect(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
-    const operand = try func.resolveInst(pl_op.operand);
+fn airSelect(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pl_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
+    const operand = try cg.resolveInst(pl_op.operand);
 
     _ = operand;
-    return func.fail("TODO: Implement wasm airSelect", .{});
+    return cg.fail("TODO: Implement wasm airSelect", .{});
 }
 
-fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
+fn airShuffle(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const inst_ty = func.typeOfIndex(inst);
-    const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
-    const extra = func.air.extraData(Air.Shuffle, ty_pl.payload).data;
+    const inst_ty = cg.typeOfIndex(inst);
+    const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+    const extra = cg.air.extraData(Air.Shuffle, ty_pl.payload).data;
 
-    const a = try func.resolveInst(extra.a);
-    const b = try func.resolveInst(extra.b);
+    const a = try cg.resolveInst(extra.a);
+    const b = try cg.resolveInst(extra.b);
     const mask = Value.fromInterned(extra.mask);
     const mask_len = extra.mask_len;
 
@@ -5167,23 +5165,23 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const elem_size = child_ty.abiSize(zcu);
 
     // TODO: One of them could be by ref; handle in loop
-    if (isByRef(func.typeOf(extra.a), pt, func.target) or isByRef(inst_ty, pt, func.target)) {
-        const result = try func.allocStack(inst_ty);
+    if (isByRef(cg.typeOf(extra.a), pt, cg.target) or isByRef(inst_ty, pt, cg.target)) {
+        const result = try cg.allocStack(inst_ty);
 
         for (0..mask_len) |index| {
             const value = (try mask.elemValue(pt, index)).toSignedInt(zcu);
 
-            try func.emitWValue(result);
+            try cg.emitWValue(result);
 
             const loaded = if (value >= 0)
-                try func.load(a, child_ty, @as(u32, @intCast(@as(i64, @intCast(elem_size)) * value)))
+                try cg.load(a, child_ty, @as(u32, @intCast(@as(i64, @intCast(elem_size)) * value)))
             else
-                try func.load(b, child_ty, @as(u32, @intCast(@as(i64, @intCast(elem_size)) * ~value)));
+                try cg.load(b, child_ty, @as(u32, @intCast(@as(i64, @intCast(elem_size)) * ~value)));
 
-            try func.store(.stack, loaded, child_ty, result.stack_offset.value + @as(u32, @intCast(elem_size)) * @as(u32, @intCast(index)));
+            try cg.store(.stack, loaded, child_ty, result.stack_offset.value + @as(u32, @intCast(elem_size)) * @as(u32, @intCast(index)));
         }
 
-        return func.finishAir(inst, result, &.{ extra.a, extra.b });
+        return cg.finishAir(inst, result, &.{ extra.a, extra.b });
     } else {
         var operands = [_]u32{
             @intFromEnum(std.wasm.SimdOpcode.i8x16_shuffle),
@@ -5202,91 +5200,91 @@ fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
             }
         }
 
-        try func.emitWValue(a);
-        try func.emitWValue(b);
+        try cg.emitWValue(a);
+        try cg.emitWValue(b);
 
-        const extra_index = @as(u32, @intCast(func.mir_extra.items.len));
-        try func.mir_extra.appendSlice(func.gpa, &operands);
-        try func.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
+        const extra_index = @as(u32, @intCast(cg.mir_extra.items.len));
+        try cg.mir_extra.appendSlice(cg.gpa, &operands);
+        try cg.addInst(.{ .tag = .simd_prefix, .data = .{ .payload = extra_index } });
 
-        return func.finishAir(inst, .stack, &.{ extra.a, extra.b });
+        return cg.finishAir(inst, .stack, &.{ extra.a, extra.b });
     }
 }
 
-fn airReduce(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const reduce = func.air.instructions.items(.data)[@intFromEnum(inst)].reduce;
-    const operand = try func.resolveInst(reduce.operand);
+fn airReduce(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const reduce = cg.air.instructions.items(.data)[@intFromEnum(inst)].reduce;
+    const operand = try cg.resolveInst(reduce.operand);
 
     _ = operand;
-    return func.fail("TODO: Implement wasm airReduce", .{});
+    return cg.fail("TODO: Implement wasm airReduce", .{});
 }
 
-fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
+fn airAggregateInit(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
     const ip = &zcu.intern_pool;
-    const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
-    const result_ty = func.typeOfIndex(inst);
+    const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+    const result_ty = cg.typeOfIndex(inst);
     const len = @as(usize, @intCast(result_ty.arrayLen(zcu)));
-    const elements = @as([]const Air.Inst.Ref, @ptrCast(func.air.extra[ty_pl.payload..][0..len]));
+    const elements = @as([]const Air.Inst.Ref, @ptrCast(cg.air.extra[ty_pl.payload..][0..len]));
 
     const result: WValue = result_value: {
         switch (result_ty.zigTypeTag(zcu)) {
             .array => {
-                const result = try func.allocStack(result_ty);
+                const result = try cg.allocStack(result_ty);
                 const elem_ty = result_ty.childType(zcu);
                 const elem_size = @as(u32, @intCast(elem_ty.abiSize(zcu)));
                 const sentinel = if (result_ty.sentinel(zcu)) |sent| blk: {
-                    break :blk try func.lowerConstant(sent, elem_ty);
+                    break :blk try cg.lowerConstant(sent, elem_ty);
                 } else null;
 
                 // When the element type is by reference, we must copy the entire
                 // value. It is therefore safer to move the offset pointer and store
                 // each value individually, instead of using store offsets.
-                if (isByRef(elem_ty, pt, func.target)) {
+                if (isByRef(elem_ty, pt, cg.target)) {
                     // copy stack pointer into a temporary local, which is
                     // moved for each element to store each value in the right position.
-                    const offset = try func.buildPointerOffset(result, 0, .new);
+                    const offset = try cg.buildPointerOffset(result, 0, .new);
                     for (elements, 0..) |elem, elem_index| {
-                        const elem_val = try func.resolveInst(elem);
-                        try func.store(offset, elem_val, elem_ty, 0);
+                        const elem_val = try cg.resolveInst(elem);
+                        try cg.store(offset, elem_val, elem_ty, 0);
 
                         if (elem_index < elements.len - 1 and sentinel == null) {
-                            _ = try func.buildPointerOffset(offset, elem_size, .modify);
+                            _ = try cg.buildPointerOffset(offset, elem_size, .modify);
                         }
                     }
                     if (sentinel) |sent| {
-                        try func.store(offset, sent, elem_ty, 0);
+                        try cg.store(offset, sent, elem_ty, 0);
                     }
                 } else {
                     var offset: u32 = 0;
                     for (elements) |elem| {
-                        const elem_val = try func.resolveInst(elem);
-                        try func.store(result, elem_val, elem_ty, offset);
+                        const elem_val = try cg.resolveInst(elem);
+                        try cg.store(result, elem_val, elem_ty, offset);
                         offset += elem_size;
                     }
                     if (sentinel) |sent| {
-                        try func.store(result, sent, elem_ty, offset);
+                        try cg.store(result, sent, elem_ty, offset);
                     }
                 }
                 break :result_value result;
             },
             .@"struct" => switch (result_ty.containerLayout(zcu)) {
                 .@"packed" => {
-                    if (isByRef(result_ty, pt, func.target)) {
-                        return func.fail("TODO: airAggregateInit for packed structs larger than 64 bits", .{});
+                    if (isByRef(result_ty, pt, cg.target)) {
+                        return cg.fail("TODO: airAggregateInit for packed structs larger than 64 bits", .{});
                     }
                     const packed_struct = zcu.typeToPackedStruct(result_ty).?;
                     const field_types = packed_struct.field_types;
                     const backing_type = Type.fromInterned(packed_struct.backingIntTypeUnordered(ip));
 
                     // ensure the result is zero'd
-                    const result = try func.allocLocal(backing_type);
+                    const result = try cg.allocLocal(backing_type);
                     if (backing_type.bitSize(zcu) <= 32)
-                        try func.addImm32(0)
+                        try cg.addImm32(0)
                     else
-                        try func.addImm64(0);
-                    try func.addLabel(.local_set, result.local.value);
+                        try cg.addImm64(0);
+                    try cg.addLabel(.local_set, result.local.value);
 
                     var current_bit: u16 = 0;
                     for (elements, 0..) |elem, elem_index| {
@@ -5298,46 +5296,46 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                         else
                             .{ .imm64 = current_bit };
 
-                        const value = try func.resolveInst(elem);
+                        const value = try cg.resolveInst(elem);
                         const value_bit_size: u16 = @intCast(field_ty.bitSize(zcu));
                         const int_ty = try pt.intType(.unsigned, value_bit_size);
 
                         // load our current result on stack so we can perform all transformations
                         // using only stack values. Saving the cost of loads and stores.
-                        try func.emitWValue(result);
-                        const bitcasted = try func.bitcast(int_ty, field_ty, value);
-                        const extended_val = try func.intcast(bitcasted, int_ty, backing_type);
+                        try cg.emitWValue(result);
+                        const bitcasted = try cg.bitcast(int_ty, field_ty, value);
+                        const extended_val = try cg.intcast(bitcasted, int_ty, backing_type);
                         // no need to shift any values when the current offset is 0
                         const shifted = if (current_bit != 0) shifted: {
-                            break :shifted try func.binOp(extended_val, shift_val, backing_type, .shl);
+                            break :shifted try cg.binOp(extended_val, shift_val, backing_type, .shl);
                         } else extended_val;
                         // we ignore the result as we keep it on the stack to assign it directly to `result`
-                        _ = try func.binOp(.stack, shifted, backing_type, .@"or");
-                        try func.addLabel(.local_set, result.local.value);
+                        _ = try cg.binOp(.stack, shifted, backing_type, .@"or");
+                        try cg.addLabel(.local_set, result.local.value);
                         current_bit += value_bit_size;
                     }
                     break :result_value result;
                 },
                 else => {
-                    const result = try func.allocStack(result_ty);
-                    const offset = try func.buildPointerOffset(result, 0, .new); // pointer to offset
+                    const result = try cg.allocStack(result_ty);
+                    const offset = try cg.buildPointerOffset(result, 0, .new); // pointer to offset
                     var prev_field_offset: u64 = 0;
                     for (elements, 0..) |elem, elem_index| {
                         if (try result_ty.structFieldValueComptime(pt, elem_index) != null) continue;
 
                         const elem_ty = result_ty.fieldType(elem_index, zcu);
                         const field_offset = result_ty.structFieldOffset(elem_index, zcu);
-                        _ = try func.buildPointerOffset(offset, @intCast(field_offset - prev_field_offset), .modify);
+                        _ = try cg.buildPointerOffset(offset, @intCast(field_offset - prev_field_offset), .modify);
                         prev_field_offset = field_offset;
 
-                        const value = try func.resolveInst(elem);
-                        try func.store(offset, value, elem_ty, 0);
+                        const value = try cg.resolveInst(elem);
+                        try cg.store(offset, value, elem_ty, 0);
                     }
 
                     break :result_value result;
                 },
             },
-            .vector => return func.fail("TODO: Wasm backend: implement airAggregateInit for vectors", .{}),
+            .vector => return cg.fail("TODO: Wasm backend: implement airAggregateInit for vectors", .{}),
             else => unreachable,
         }
     };
@@ -5345,22 +5343,22 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     if (elements.len <= Liveness.bpi - 1) {
         var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1);
         @memcpy(buf[0..elements.len], elements);
-        return func.finishAir(inst, result, &buf);
+        return cg.finishAir(inst, result, &buf);
     }
-    var bt = try func.iterateBigTomb(inst, elements.len);
+    var bt = try cg.iterateBigTomb(inst, elements.len);
     for (elements) |arg| bt.feed(arg);
     return bt.finishAir(result);
 }
 
-fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
+fn airUnionInit(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
     const ip = &zcu.intern_pool;
-    const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
-    const extra = func.air.extraData(Air.UnionInit, ty_pl.payload).data;
+    const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+    const extra = cg.air.extraData(Air.UnionInit, ty_pl.payload).data;
 
     const result = result: {
-        const union_ty = func.typeOfIndex(inst);
+        const union_ty = cg.typeOfIndex(inst);
         const layout = union_ty.unionGetLayout(zcu);
         const union_obj = zcu.typeToUnion(union_ty).?;
         const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[extra.field_index]);
@@ -5370,34 +5368,34 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
             const tag_ty = union_ty.unionTagTypeHypothetical(zcu);
             const enum_field_index = tag_ty.enumFieldIndex(field_name, zcu).?;
             const tag_val = try pt.enumValueFieldIndex(tag_ty, enum_field_index);
-            break :blk try func.lowerConstant(tag_val, tag_ty);
+            break :blk try cg.lowerConstant(tag_val, tag_ty);
         };
         if (layout.payload_size == 0) {
             if (layout.tag_size == 0) {
                 break :result .none;
             }
-            assert(!isByRef(union_ty, pt, func.target));
+            assert(!isByRef(union_ty, pt, cg.target));
             break :result tag_int;
         }
 
-        if (isByRef(union_ty, pt, func.target)) {
-            const result_ptr = try func.allocStack(union_ty);
-            const payload = try func.resolveInst(extra.init);
+        if (isByRef(union_ty, pt, cg.target)) {
+            const result_ptr = try cg.allocStack(union_ty);
+            const payload = try cg.resolveInst(extra.init);
             if (layout.tag_align.compare(.gte, layout.payload_align)) {
-                if (isByRef(field_ty, pt, func.target)) {
-                    const payload_ptr = try func.buildPointerOffset(result_ptr, layout.tag_size, .new);
-                    try func.store(payload_ptr, payload, field_ty, 0);
+                if (isByRef(field_ty, pt, cg.target)) {
+                    const payload_ptr = try cg.buildPointerOffset(result_ptr, layout.tag_size, .new);
+                    try cg.store(payload_ptr, payload, field_ty, 0);
                 } else {
-                    try func.store(result_ptr, payload, field_ty, @intCast(layout.tag_size));
+                    try cg.store(result_ptr, payload, field_ty, @intCast(layout.tag_size));
                 }
 
                 if (layout.tag_size > 0) {
-                    try func.store(result_ptr, tag_int, Type.fromInterned(union_obj.enum_tag_ty), 0);
+                    try cg.store(result_ptr, tag_int, Type.fromInterned(union_obj.enum_tag_ty), 0);
                 }
             } else {
-                try func.store(result_ptr, payload, field_ty, 0);
+                try cg.store(result_ptr, payload, field_ty, 0);
                 if (layout.tag_size > 0) {
-                    try func.store(
+                    try cg.store(
                         result_ptr,
                         tag_int,
                         Type.fromInterned(union_obj.enum_tag_ty),
@@ -5407,46 +5405,46 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
             }
             break :result result_ptr;
         } else {
-            const operand = try func.resolveInst(extra.init);
+            const operand = try cg.resolveInst(extra.init);
             const union_int_type = try pt.intType(.unsigned, @as(u16, @intCast(union_ty.bitSize(zcu))));
             if (field_ty.zigTypeTag(zcu) == .float) {
                 const int_type = try pt.intType(.unsigned, @intCast(field_ty.bitSize(zcu)));
-                const bitcasted = try func.bitcast(field_ty, int_type, operand);
-                break :result try func.trunc(bitcasted, int_type, union_int_type);
+                const bitcasted = try cg.bitcast(field_ty, int_type, operand);
+                break :result try cg.trunc(bitcasted, int_type, union_int_type);
             } else if (field_ty.isPtrAtRuntime(zcu)) {
                 const int_type = try pt.intType(.unsigned, @intCast(field_ty.bitSize(zcu)));
-                break :result try func.intcast(operand, int_type, union_int_type);
+                break :result try cg.intcast(operand, int_type, union_int_type);
             }
-            break :result try func.intcast(operand, field_ty, union_int_type);
+            break :result try cg.intcast(operand, field_ty, union_int_type);
         }
     };
 
-    return func.finishAir(inst, result, &.{extra.init});
+    return cg.finishAir(inst, result, &.{extra.init});
 }
 
-fn airPrefetch(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const prefetch = func.air.instructions.items(.data)[@intFromEnum(inst)].prefetch;
-    return func.finishAir(inst, .none, &.{prefetch.ptr});
+fn airPrefetch(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const prefetch = cg.air.instructions.items(.data)[@intFromEnum(inst)].prefetch;
+    return cg.finishAir(inst, .none, &.{prefetch.ptr});
 }
 
-fn airWasmMemorySize(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
+fn airWasmMemorySize(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pl_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
 
-    try func.addLabel(.memory_size, pl_op.payload);
-    return func.finishAir(inst, .stack, &.{pl_op.operand});
+    try cg.addLabel(.memory_size, pl_op.payload);
+    return cg.finishAir(inst, .stack, &.{pl_op.operand});
 }
 
-fn airWasmMemoryGrow(func: *CodeGen, inst: Air.Inst.Index) !void {
-    const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
+fn airWasmMemoryGrow(cg: *CodeGen, inst: Air.Inst.Index) !void {
+    const pl_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
 
-    const operand = try func.resolveInst(pl_op.operand);
-    try func.emitWValue(operand);
-    try func.addLabel(.memory_grow, pl_op.payload);
-    return func.finishAir(inst, .stack, &.{pl_op.operand});
+    const operand = try cg.resolveInst(pl_op.operand);
+    try cg.emitWValue(operand);
+    try cg.addLabel(.memory_grow, pl_op.payload);
+    return cg.finishAir(inst, .stack, &.{pl_op.operand});
 }
 
-fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue {
-    const pt = func.pt;
+fn cmpOptionals(cg: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue {
+    const pt = cg.pt;
     const zcu = pt.zcu;
     assert(operand_ty.hasRuntimeBitsIgnoreComptime(zcu));
     assert(op == .eq or op == .neq);
@@ -5454,91 +5452,91 @@ fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op:
 
     // We store the final result in here that will be validated
     // if the optional is truly equal.
-    var result = try func.ensureAllocLocal(Type.i32);
-    defer result.free(func);
-
-    try func.startBlock(.block, std.wasm.block_empty);
-    _ = try func.isNull(lhs, operand_ty, .i32_eq);
-    _ = try func.isNull(rhs, operand_ty, .i32_eq);
-    try func.addTag(.i32_ne); // inverse so we can exit early
-    try func.addLabel(.br_if, 0);
-
-    _ = try func.load(lhs, payload_ty, 0);
-    _ = try func.load(rhs, payload_ty, 0);
-    const opcode = buildOpcode(.{ .op = .ne, .valtype1 = typeToValtype(payload_ty, pt, func.target) });
-    try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
-    try func.addLabel(.br_if, 0);
-
-    try func.addImm32(1);
-    try func.addLabel(.local_set, result.local.value);
-    try func.endBlock();
-
-    try func.emitWValue(result);
-    try func.addImm32(0);
-    try func.addTag(if (op == .eq) .i32_ne else .i32_eq);
+    var result = try cg.ensureAllocLocal(Type.i32);
+    defer result.free(cg);
+
+    try cg.startBlock(.block, std.wasm.block_empty);
+    _ = try cg.isNull(lhs, operand_ty, .i32_eq);
+    _ = try cg.isNull(rhs, operand_ty, .i32_eq);
+    try cg.addTag(.i32_ne); // inverse so we can exit early
+    try cg.addLabel(.br_if, 0);
+
+    _ = try cg.load(lhs, payload_ty, 0);
+    _ = try cg.load(rhs, payload_ty, 0);
+    const opcode = buildOpcode(.{ .op = .ne, .valtype1 = typeToValtype(payload_ty, pt, cg.target) });
+    try cg.addTag(Mir.Inst.Tag.fromOpcode(opcode));
+    try cg.addLabel(.br_if, 0);
+
+    try cg.addImm32(1);
+    try cg.addLabel(.local_set, result.local.value);
+    try cg.endBlock();
+
+    try cg.emitWValue(result);
+    try cg.addImm32(0);
+    try cg.addTag(if (op == .eq) .i32_ne else .i32_eq);
     return .stack;
 }
 
 /// Compares big integers by checking both its high bits and low bits.
 /// NOTE: Leaves the result of the comparison on top of the stack.
 /// TODO: Lower this to compiler_rt call when bitsize > 128
-fn cmpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue {
-    const pt = func.pt;
+fn cmpBigInt(cg: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue {
+    const pt = cg.pt;
     const zcu = pt.zcu;
     assert(operand_ty.abiSize(zcu) >= 16);
     assert(!(lhs != .stack and rhs == .stack));
     if (operand_ty.bitSize(zcu) > 128) {
-        return func.fail("TODO: Support cmpBigInt for integer bitsize: '{d}'", .{operand_ty.bitSize(zcu)});
+        return cg.fail("TODO: Support cmpBigInt for integer bitsize: '{d}'", .{operand_ty.bitSize(zcu)});
     }
 
-    var lhs_msb = try (try func.load(lhs, Type.u64, 8)).toLocal(func, Type.u64);
-    defer lhs_msb.free(func);
-    var rhs_msb = try (try func.load(rhs, Type.u64, 8)).toLocal(func, Type.u64);
-    defer rhs_msb.free(func);
+    var lhs_msb = try (try cg.load(lhs, Type.u64, 8)).toLocal(cg, Type.u64);
+    defer lhs_msb.free(cg);
+    var rhs_msb = try (try cg.load(rhs, Type.u64, 8)).toLocal(cg, Type.u64);
+    defer rhs_msb.free(cg);
 
     switch (op) {
         .eq, .neq => {
-            const xor_high = try func.binOp(lhs_msb, rhs_msb, Type.u64, .xor);
-            const lhs_lsb = try func.load(lhs, Type.u64, 0);
-            const rhs_lsb = try func.load(rhs, Type.u64, 0);
-            const xor_low = try func.binOp(lhs_lsb, rhs_lsb, Type.u64, .xor);
-            const or_result = try func.binOp(xor_high, xor_low, Type.u64, .@"or");
+            const xor_high = try cg.binOp(lhs_msb, rhs_msb, Type.u64, .xor);
+            const lhs_lsb = try cg.load(lhs, Type.u64, 0);
+            const rhs_lsb = try cg.load(rhs, Type.u64, 0);
+            const xor_low = try cg.binOp(lhs_lsb, rhs_lsb, Type.u64, .xor);
+            const or_result = try cg.binOp(xor_high, xor_low, Type.u64, .@"or");
 
             switch (op) {
-                .eq => return func.cmp(or_result, .{ .imm64 = 0 }, Type.u64, .eq),
-                .neq => return func.cmp(or_result, .{ .imm64 = 0 }, Type.u64, .neq),
+                .eq => return cg.cmp(or_result, .{ .imm64 = 0 }, Type.u64, .eq),
+                .neq => return cg.cmp(or_result, .{ .imm64 = 0 }, Type.u64, .neq),
                 else => unreachable,
             }
         },
         else => {
             const ty = if (operand_ty.isSignedInt(zcu)) Type.i64 else Type.u64;
             // leave those value on top of the stack for '.select'
-            const lhs_lsb = try func.load(lhs, Type.u64, 0);
-            const rhs_lsb = try func.load(rhs, Type.u64, 0);
-            _ = try func.cmp(lhs_lsb, rhs_lsb, Type.u64, op);
-            _ = try func.cmp(lhs_msb, rhs_msb, ty, op);
-            _ = try func.cmp(lhs_msb, rhs_msb, ty, .eq);
-            try func.addTag(.select);
+            const lhs_lsb = try cg.load(lhs, Type.u64, 0);
+            const rhs_lsb = try cg.load(rhs, Type.u64, 0);
+            _ = try cg.cmp(lhs_lsb, rhs_lsb, Type.u64, op);
+            _ = try cg.cmp(lhs_msb, rhs_msb, ty, op);
+            _ = try cg.cmp(lhs_msb, rhs_msb, ty, .eq);
+            try cg.addTag(.select);
         },
     }
 
     return .stack;
 }
 
-fn airSetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
+fn airSetUnionTag(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
-    const un_ty = func.typeOf(bin_op.lhs).childType(zcu);
-    const tag_ty = func.typeOf(bin_op.rhs);
+    const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+    const un_ty = cg.typeOf(bin_op.lhs).childType(zcu);
+    const tag_ty = cg.typeOf(bin_op.rhs);
     const layout = un_ty.unionGetLayout(zcu);
-    if (layout.tag_size == 0) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
+    if (layout.tag_size == 0) return cg.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
 
-    const union_ptr = try func.resolveInst(bin_op.lhs);
-    const new_tag = try func.resolveInst(bin_op.rhs);
+    const union_ptr = try cg.resolveInst(bin_op.lhs);
+    const new_tag = try cg.resolveInst(bin_op.rhs);
     if (layout.payload_size == 0) {
-        try func.store(union_ptr, new_tag, tag_ty, 0);
-        return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
+        try cg.store(union_ptr, new_tag, tag_ty, 0);
+        return cg.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
     }
 
     // when the tag alignment is smaller than the payload, the field will be stored
@@ -5546,52 +5544,52 @@ fn airSetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const offset: u32 = if (layout.tag_align.compare(.lt, layout.payload_align)) blk: {
         break :blk @intCast(layout.payload_size);
     } else 0;
-    try func.store(union_ptr, new_tag, tag_ty, offset);
-    return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
+    try cg.store(union_ptr, new_tag, tag_ty, offset);
+    return cg.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
 }
 
-fn airGetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const zcu = func.pt.zcu;
-    const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+fn airGetUnionTag(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const zcu = cg.pt.zcu;
+    const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
-    const un_ty = func.typeOf(ty_op.operand);
-    const tag_ty = func.typeOfIndex(inst);
+    const un_ty = cg.typeOf(ty_op.operand);
+    const tag_ty = cg.typeOfIndex(inst);
     const layout = un_ty.unionGetLayout(zcu);
-    if (layout.tag_size == 0) return func.finishAir(inst, .none, &.{ty_op.operand});
+    if (layout.tag_size == 0) return cg.finishAir(inst, .none, &.{ty_op.operand});
 
-    const operand = try func.resolveInst(ty_op.operand);
+    const operand = try cg.resolveInst(ty_op.operand);
     // when the tag alignment is smaller than the payload, the field will be stored
     // after the payload.
     const offset: u32 = if (layout.tag_align.compare(.lt, layout.payload_align))
         @intCast(layout.payload_size)
     else
         0;
-    const result = try func.load(operand, tag_ty, offset);
-    return func.finishAir(inst, result, &.{ty_op.operand});
+    const result = try cg.load(operand, tag_ty, offset);
+    return cg.finishAir(inst, result, &.{ty_op.operand});
 }
 
-fn airFpext(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+fn airFpext(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
-    const dest_ty = func.typeOfIndex(inst);
-    const operand = try func.resolveInst(ty_op.operand);
-    const result = try func.fpext(operand, func.typeOf(ty_op.operand), dest_ty);
-    return func.finishAir(inst, result, &.{ty_op.operand});
+    const dest_ty = cg.typeOfIndex(inst);
+    const operand = try cg.resolveInst(ty_op.operand);
+    const result = try cg.fpext(operand, cg.typeOf(ty_op.operand), dest_ty);
+    return cg.finishAir(inst, result, &.{ty_op.operand});
 }
 
 /// Extends a float from a given `Type` to a larger wanted `Type`
 /// NOTE: Leaves the result on the stack
-fn fpext(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!WValue {
-    const given_bits = given.floatBits(func.target.*);
-    const wanted_bits = wanted.floatBits(func.target.*);
+fn fpext(cg: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!WValue {
+    const given_bits = given.floatBits(cg.target.*);
+    const wanted_bits = wanted.floatBits(cg.target.*);
 
     if (wanted_bits == 64 and given_bits == 32) {
-        try func.emitWValue(operand);
-        try func.addTag(.f64_promote_f32);
+        try cg.emitWValue(operand);
+        try cg.addTag(.f64_promote_f32);
         return .stack;
     } else if (given_bits == 16 and wanted_bits <= 64) {
         // call __extendhfsf2(f16) f32
-        const f32_result = try func.callIntrinsic(
+        const f32_result = try cg.callIntrinsic(
             "__extendhfsf2",
             &.{.f16_type},
             Type.f32,
@@ -5600,7 +5598,7 @@ fn fpext(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!
         assert(f32_result == .stack);
 
         if (wanted_bits == 64) {
-            try func.addTag(.f64_promote_f32);
+            try cg.addTag(.f64_promote_f32);
         }
         return .stack;
     }
@@ -5611,37 +5609,37 @@ fn fpext(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!
         target_util.compilerRtFloatAbbrev(wanted_bits),
     }) catch unreachable;
 
-    return func.callIntrinsic(fn_name, &.{given.ip_index}, wanted, &.{operand});
+    return cg.callIntrinsic(fn_name, &.{given.ip_index}, wanted, &.{operand});
 }
 
-fn airFptrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+fn airFptrunc(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
-    const dest_ty = func.typeOfIndex(inst);
-    const operand = try func.resolveInst(ty_op.operand);
-    const result = try func.fptrunc(operand, func.typeOf(ty_op.operand), dest_ty);
-    return func.finishAir(inst, result, &.{ty_op.operand});
+    const dest_ty = cg.typeOfIndex(inst);
+    const operand = try cg.resolveInst(ty_op.operand);
+    const result = try cg.fptrunc(operand, cg.typeOf(ty_op.operand), dest_ty);
+    return cg.finishAir(inst, result, &.{ty_op.operand});
 }
 
 /// Truncates a float from a given `Type` to its wanted `Type`
 /// NOTE: The result value remains on the stack
-fn fptrunc(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!WValue {
-    const given_bits = given.floatBits(func.target.*);
-    const wanted_bits = wanted.floatBits(func.target.*);
+fn fptrunc(cg: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!WValue {
+    const given_bits = given.floatBits(cg.target.*);
+    const wanted_bits = wanted.floatBits(cg.target.*);
 
     if (wanted_bits == 32 and given_bits == 64) {
-        try func.emitWValue(operand);
-        try func.addTag(.f32_demote_f64);
+        try cg.emitWValue(operand);
+        try cg.addTag(.f32_demote_f64);
         return .stack;
     } else if (wanted_bits == 16 and given_bits <= 64) {
         const op: WValue = if (given_bits == 64) blk: {
-            try func.emitWValue(operand);
-            try func.addTag(.f32_demote_f64);
+            try cg.emitWValue(operand);
+            try cg.addTag(.f32_demote_f64);
             break :blk .stack;
         } else operand;
 
         // call __truncsfhf2(f32) f16
-        return func.callIntrinsic("__truncsfhf2", &.{.f32_type}, Type.f16, &.{op});
+        return cg.callIntrinsic("__truncsfhf2", &.{.f32_type}, Type.f16, &.{op});
     }
 
     var fn_name_buf: [12]u8 = undefined;
@@ -5650,20 +5648,20 @@ fn fptrunc(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerErro
         target_util.compilerRtFloatAbbrev(wanted_bits),
     }) catch unreachable;
 
-    return func.callIntrinsic(fn_name, &.{given.ip_index}, wanted, &.{operand});
+    return cg.callIntrinsic(fn_name, &.{given.ip_index}, wanted, &.{operand});
 }
 
-fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
+fn airErrUnionPayloadPtrSet(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+    const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
-    const err_set_ty = func.typeOf(ty_op.operand).childType(zcu);
+    const err_set_ty = cg.typeOf(ty_op.operand).childType(zcu);
     const payload_ty = err_set_ty.errorUnionPayload(zcu);
-    const operand = try func.resolveInst(ty_op.operand);
+    const operand = try cg.resolveInst(ty_op.operand);
 
     // set error-tag to '0' to annotate error union is non-error
-    try func.store(
+    try cg.store(
         operand,
         .{ .imm32 = 0 },
         Type.anyerror,
@@ -5672,63 +5670,63 @@ fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!voi
 
     const result = result: {
         if (!payload_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
-            break :result func.reuseOperand(ty_op.operand, operand);
+            break :result cg.reuseOperand(ty_op.operand, operand);
         }
 
-        break :result try func.buildPointerOffset(operand, @as(u32, @intCast(errUnionPayloadOffset(payload_ty, zcu))), .new);
+        break :result try cg.buildPointerOffset(operand, @as(u32, @intCast(errUnionPayloadOffset(payload_ty, zcu))), .new);
     };
-    return func.finishAir(inst, result, &.{ty_op.operand});
+    return cg.finishAir(inst, result, &.{ty_op.operand});
 }
 
-fn airFieldParentPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
+fn airFieldParentPtr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
-    const extra = func.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
+    const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+    const extra = cg.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
 
-    const field_ptr = try func.resolveInst(extra.field_ptr);
+    const field_ptr = try cg.resolveInst(extra.field_ptr);
     const parent_ty = ty_pl.ty.toType().childType(zcu);
     const field_offset = parent_ty.structFieldOffset(extra.field_index, zcu);
 
     const result = if (field_offset != 0) result: {
-        const base = try func.buildPointerOffset(field_ptr, 0, .new);
-        try func.addLabel(.local_get, base.local.value);
-        try func.addImm32(@intCast(field_offset));
-        try func.addTag(.i32_sub);
-        try func.addLabel(.local_set, base.local.value);
+        const base = try cg.buildPointerOffset(field_ptr, 0, .new);
+        try cg.addLabel(.local_get, base.local.value);
+        try cg.addImm32(@intCast(field_offset));
+        try cg.addTag(.i32_sub);
+        try cg.addLabel(.local_set, base.local.value);
         break :result base;
-    } else func.reuseOperand(extra.field_ptr, field_ptr);
+    } else cg.reuseOperand(extra.field_ptr, field_ptr);
 
-    return func.finishAir(inst, result, &.{extra.field_ptr});
+    return cg.finishAir(inst, result, &.{extra.field_ptr});
 }
 
-fn sliceOrArrayPtr(func: *CodeGen, ptr: WValue, ptr_ty: Type) InnerError!WValue {
-    const pt = func.pt;
+fn sliceOrArrayPtr(cg: *CodeGen, ptr: WValue, ptr_ty: Type) InnerError!WValue {
+    const pt = cg.pt;
     const zcu = pt.zcu;
     if (ptr_ty.isSlice(zcu)) {
-        return func.slicePtr(ptr);
+        return cg.slicePtr(ptr);
     } else {
         return ptr;
     }
 }
 
-fn airMemcpy(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
+fn airMemcpy(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
-    const dst = try func.resolveInst(bin_op.lhs);
-    const dst_ty = func.typeOf(bin_op.lhs);
+    const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+    const dst = try cg.resolveInst(bin_op.lhs);
+    const dst_ty = cg.typeOf(bin_op.lhs);
     const ptr_elem_ty = dst_ty.childType(zcu);
-    const src = try func.resolveInst(bin_op.rhs);
-    const src_ty = func.typeOf(bin_op.rhs);
+    const src = try cg.resolveInst(bin_op.rhs);
+    const src_ty = cg.typeOf(bin_op.rhs);
     const len = switch (dst_ty.ptrSize(zcu)) {
         .Slice => blk: {
-            const slice_len = try func.sliceLen(dst);
+            const slice_len = try cg.sliceLen(dst);
             if (ptr_elem_ty.abiSize(zcu) != 1) {
-                try func.emitWValue(slice_len);
-                try func.emitWValue(.{ .imm32 = @as(u32, @intCast(ptr_elem_ty.abiSize(zcu))) });
-                try func.addTag(.i32_mul);
-                try func.addLabel(.local_set, slice_len.local.value);
+                try cg.emitWValue(slice_len);
+                try cg.emitWValue(.{ .imm32 = @as(u32, @intCast(ptr_elem_ty.abiSize(zcu))) });
+                try cg.addTag(.i32_mul);
+                try cg.addLabel(.local_set, slice_len.local.value);
             }
             break :blk slice_len;
         },
@@ -5737,94 +5735,94 @@ fn airMemcpy(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
         }),
         .C, .Many => unreachable,
     };
-    const dst_ptr = try func.sliceOrArrayPtr(dst, dst_ty);
-    const src_ptr = try func.sliceOrArrayPtr(src, src_ty);
-    try func.memcpy(dst_ptr, src_ptr, len);
+    const dst_ptr = try cg.sliceOrArrayPtr(dst, dst_ty);
+    const src_ptr = try cg.sliceOrArrayPtr(src, src_ty);
+    try cg.memcpy(dst_ptr, src_ptr, len);
 
-    return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
+    return cg.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
 }
 
-fn airRetAddr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+fn airRetAddr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     // TODO: Implement this properly once stack serialization is solved
-    return func.finishAir(inst, switch (func.ptr_size) {
+    return cg.finishAir(inst, switch (cg.ptr_size) {
         .wasm32 => .{ .imm32 = 0 },
         .wasm64 => .{ .imm64 = 0 },
     }, &.{});
 }
 
-fn airPopcount(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
+fn airPopcount(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+    const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
-    const operand = try func.resolveInst(ty_op.operand);
-    const op_ty = func.typeOf(ty_op.operand);
+    const operand = try cg.resolveInst(ty_op.operand);
+    const op_ty = cg.typeOf(ty_op.operand);
 
     if (op_ty.zigTypeTag(zcu) == .vector) {
-        return func.fail("TODO: Implement @popCount for vectors", .{});
+        return cg.fail("TODO: Implement @popCount for vectors", .{});
     }
 
     const int_info = op_ty.intInfo(zcu);
     const bits = int_info.bits;
     const wasm_bits = toWasmBits(bits) orelse {
-        return func.fail("TODO: Implement @popCount for integers with bitsize '{d}'", .{bits});
+        return cg.fail("TODO: Implement @popCount for integers with bitsize '{d}'", .{bits});
     };
 
     switch (wasm_bits) {
         32 => {
-            try func.emitWValue(operand);
+            try cg.emitWValue(operand);
             if (op_ty.isSignedInt(zcu) and bits != wasm_bits) {
-                _ = try func.wrapOperand(.stack, try pt.intType(.unsigned, bits));
+                _ = try cg.wrapOperand(.stack, try pt.intType(.unsigned, bits));
             }
-            try func.addTag(.i32_popcnt);
+            try cg.addTag(.i32_popcnt);
         },
         64 => {
-            try func.emitWValue(operand);
+            try cg.emitWValue(operand);
             if (op_ty.isSignedInt(zcu) and bits != wasm_bits) {
-                _ = try func.wrapOperand(.stack, try pt.intType(.unsigned, bits));
+                _ = try cg.wrapOperand(.stack, try pt.intType(.unsigned, bits));
             }
-            try func.addTag(.i64_popcnt);
-            try func.addTag(.i32_wrap_i64);
-            try func.emitWValue(operand);
+            try cg.addTag(.i64_popcnt);
+            try cg.addTag(.i32_wrap_i64);
+            try cg.emitWValue(operand);
         },
         128 => {
-            _ = try func.load(operand, Type.u64, 0);
-            try func.addTag(.i64_popcnt);
-            _ = try func.load(operand, Type.u64, 8);
+            _ = try cg.load(operand, Type.u64, 0);
+            try cg.addTag(.i64_popcnt);
+            _ = try cg.load(operand, Type.u64, 8);
             if (op_ty.isSignedInt(zcu) and bits != wasm_bits) {
-                _ = try func.wrapOperand(.stack, try pt.intType(.unsigned, bits - 64));
+                _ = try cg.wrapOperand(.stack, try pt.intType(.unsigned, bits - 64));
             }
-            try func.addTag(.i64_popcnt);
-            try func.addTag(.i64_add);
-            try func.addTag(.i32_wrap_i64);
+            try cg.addTag(.i64_popcnt);
+            try cg.addTag(.i64_add);
+            try cg.addTag(.i32_wrap_i64);
         },
         else => unreachable,
     }
 
-    return func.finishAir(inst, .stack, &.{ty_op.operand});
+    return cg.finishAir(inst, .stack, &.{ty_op.operand});
 }
 
-fn airBitReverse(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
+fn airBitReverse(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+    const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
-    const operand = try func.resolveInst(ty_op.operand);
-    const ty = func.typeOf(ty_op.operand);
+    const operand = try cg.resolveInst(ty_op.operand);
+    const ty = cg.typeOf(ty_op.operand);
 
     if (ty.zigTypeTag(zcu) == .vector) {
-        return func.fail("TODO: Implement @bitReverse for vectors", .{});
+        return cg.fail("TODO: Implement @bitReverse for vectors", .{});
     }
 
     const int_info = ty.intInfo(zcu);
     const bits = int_info.bits;
     const wasm_bits = toWasmBits(bits) orelse {
-        return func.fail("TODO: Implement @bitReverse for integers with bitsize '{d}'", .{bits});
+        return cg.fail("TODO: Implement @bitReverse for integers with bitsize '{d}'", .{bits});
     };
 
     switch (wasm_bits) {
         32 => {
-            const intrin_ret = try func.callIntrinsic(
+            const intrin_ret = try cg.callIntrinsic(
                 "__bitreversesi2",
                 &.{.u32_type},
                 Type.u32,
@@ -5833,11 +5831,11 @@ fn airBitReverse(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
             const result = if (bits == 32)
                 intrin_ret
             else
-                try func.binOp(intrin_ret, .{ .imm32 = 32 - bits }, ty, .shr);
-            return func.finishAir(inst, result, &.{ty_op.operand});
+                try cg.binOp(intrin_ret, .{ .imm32 = 32 - bits }, ty, .shr);
+            return cg.finishAir(inst, result, &.{ty_op.operand});
         },
         64 => {
-            const intrin_ret = try func.callIntrinsic(
+            const intrin_ret = try cg.callIntrinsic(
                 "__bitreversedi2",
                 &.{.u64_type},
                 Type.u64,
@@ -5846,64 +5844,64 @@ fn airBitReverse(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
             const result = if (bits == 64)
                 intrin_ret
             else
-                try func.binOp(intrin_ret, .{ .imm64 = 64 - bits }, ty, .shr);
-            return func.finishAir(inst, result, &.{ty_op.operand});
+                try cg.binOp(intrin_ret, .{ .imm64 = 64 - bits }, ty, .shr);
+            return cg.finishAir(inst, result, &.{ty_op.operand});
         },
         128 => {
-            const result = try func.allocStack(ty);
+            const result = try cg.allocStack(ty);
 
-            try func.emitWValue(result);
-            const first_half = try func.load(operand, Type.u64, 8);
-            const intrin_ret_first = try func.callIntrinsic(
+            try cg.emitWValue(result);
+            const first_half = try cg.load(operand, Type.u64, 8);
+            const intrin_ret_first = try cg.callIntrinsic(
                 "__bitreversedi2",
                 &.{.u64_type},
                 Type.u64,
                 &.{first_half},
             );
-            try func.emitWValue(intrin_ret_first);
+            try cg.emitWValue(intrin_ret_first);
             if (bits < 128) {
-                try func.emitWValue(.{ .imm64 = 128 - bits });
-                try func.addTag(.i64_shr_u);
+                try cg.emitWValue(.{ .imm64 = 128 - bits });
+                try cg.addTag(.i64_shr_u);
             }
-            try func.emitWValue(result);
-            const second_half = try func.load(operand, Type.u64, 0);
-            const intrin_ret_second = try func.callIntrinsic(
+            try cg.emitWValue(result);
+            const second_half = try cg.load(operand, Type.u64, 0);
+            const intrin_ret_second = try cg.callIntrinsic(
                 "__bitreversedi2",
                 &.{.u64_type},
                 Type.u64,
                 &.{second_half},
             );
-            try func.emitWValue(intrin_ret_second);
+            try cg.emitWValue(intrin_ret_second);
             if (bits == 128) {
-                try func.store(.stack, .stack, Type.u64, result.offset() + 8);
-                try func.store(.stack, .stack, Type.u64, result.offset());
+                try cg.store(.stack, .stack, Type.u64, result.offset() + 8);
+                try cg.store(.stack, .stack, Type.u64, result.offset());
             } else {
-                var tmp = try func.allocLocal(Type.u64);
-                defer tmp.free(func);
-                try func.addLabel(.local_tee, tmp.local.value);
-                try func.emitWValue(.{ .imm64 = 128 - bits });
+                var tmp = try cg.allocLocal(Type.u64);
+                defer tmp.free(cg);
+                try cg.addLabel(.local_tee, tmp.local.value);
+                try cg.emitWValue(.{ .imm64 = 128 - bits });
                 if (ty.isSignedInt(zcu)) {
-                    try func.addTag(.i64_shr_s);
+                    try cg.addTag(.i64_shr_s);
                 } else {
-                    try func.addTag(.i64_shr_u);
+                    try cg.addTag(.i64_shr_u);
                 }
-                try func.store(.stack, .stack, Type.u64, result.offset() + 8);
-                try func.addLabel(.local_get, tmp.local.value);
-                try func.emitWValue(.{ .imm64 = bits - 64 });
-                try func.addTag(.i64_shl);
-                try func.addTag(.i64_or);
-                try func.store(.stack, .stack, Type.u64, result.offset());
+                try cg.store(.stack, .stack, Type.u64, result.offset() + 8);
+                try cg.addLabel(.local_get, tmp.local.value);
+                try cg.emitWValue(.{ .imm64 = bits - 64 });
+                try cg.addTag(.i64_shl);
+                try cg.addTag(.i64_or);
+                try cg.store(.stack, .stack, Type.u64, result.offset());
             }
-            return func.finishAir(inst, result, &.{ty_op.operand});
+            return cg.finishAir(inst, result, &.{ty_op.operand});
         },
         else => unreachable,
     }
 }
 
-fn airErrorName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
+fn airErrorName(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const un_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
 
-    const operand = try func.resolveInst(un_op);
+    const operand = try cg.resolveInst(un_op);
     // First retrieve the symbol index to the error name table
     // that will be used to emit a relocation for the pointer
     // to the error name table.
@@ -5915,81 +5913,81 @@ fn airErrorName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     //
     // As the names are global and the slice elements are constant, we do not have
     // to make a copy of the ptr+value but can point towards them directly.
-    const pt = func.pt;
-    const error_table_symbol = try func.wasm.getErrorTableSymbol(pt);
+    const pt = cg.pt;
+    const error_table_symbol = try cg.wasm.getErrorTableSymbol(pt);
     const name_ty = Type.slice_const_u8_sentinel_0;
     const abi_size = name_ty.abiSize(pt.zcu);
 
     const error_name_value: WValue = .{ .memory = error_table_symbol }; // emitting this will create a relocation
-    try func.emitWValue(error_name_value);
-    try func.emitWValue(operand);
-    switch (func.ptr_size) {
+    try cg.emitWValue(error_name_value);
+    try cg.emitWValue(operand);
+    switch (cg.ptr_size) {
         .wasm32 => {
-            try func.addImm32(@intCast(abi_size));
-            try func.addTag(.i32_mul);
-            try func.addTag(.i32_add);
+            try cg.addImm32(@intCast(abi_size));
+            try cg.addTag(.i32_mul);
+            try cg.addTag(.i32_add);
         },
         .wasm64 => {
-            try func.addImm64(abi_size);
-            try func.addTag(.i64_mul);
-            try func.addTag(.i64_add);
+            try cg.addImm64(abi_size);
+            try cg.addTag(.i64_mul);
+            try cg.addTag(.i64_add);
         },
     }
 
-    return func.finishAir(inst, .stack, &.{un_op});
+    return cg.finishAir(inst, .stack, &.{un_op});
 }
 
-fn airPtrSliceFieldPtr(func: *CodeGen, inst: Air.Inst.Index, offset: u32) InnerError!void {
-    const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
-    const slice_ptr = try func.resolveInst(ty_op.operand);
-    const result = try func.buildPointerOffset(slice_ptr, offset, .new);
-    return func.finishAir(inst, result, &.{ty_op.operand});
+fn airPtrSliceFieldPtr(cg: *CodeGen, inst: Air.Inst.Index, offset: u32) InnerError!void {
+    const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+    const slice_ptr = try cg.resolveInst(ty_op.operand);
+    const result = try cg.buildPointerOffset(slice_ptr, offset, .new);
+    return cg.finishAir(inst, result, &.{ty_op.operand});
 }
 
 /// NOTE: Allocates place for result on virtual stack, when integer size > 64 bits
-fn intZeroValue(func: *CodeGen, ty: Type) InnerError!WValue {
-    const zcu = func.wasm.base.comp.zcu.?;
+fn intZeroValue(cg: *CodeGen, ty: Type) InnerError!WValue {
+    const zcu = cg.wasm.base.comp.zcu.?;
     const int_info = ty.intInfo(zcu);
     const wasm_bits = toWasmBits(int_info.bits) orelse {
-        return func.fail("TODO: Implement intZeroValue for integer bitsize: {d}", .{int_info.bits});
+        return cg.fail("TODO: Implement intZeroValue for integer bitsize: {d}", .{int_info.bits});
     };
     switch (wasm_bits) {
         32 => return .{ .imm32 = 0 },
         64 => return .{ .imm64 = 0 },
         128 => {
-            const result = try func.allocStack(ty);
-            try func.store(result, .{ .imm64 = 0 }, Type.u64, 0);
-            try func.store(result, .{ .imm64 = 0 }, Type.u64, 8);
+            const result = try cg.allocStack(ty);
+            try cg.store(result, .{ .imm64 = 0 }, Type.u64, 0);
+            try cg.store(result, .{ .imm64 = 0 }, Type.u64, 8);
             return result;
         },
         else => unreachable,
     }
 }
 
-fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
+fn airAddSubWithOverflow(cg: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
     assert(op == .add or op == .sub);
-    const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
-    const extra = func.air.extraData(Air.Bin, ty_pl.payload).data;
+    const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+    const extra = cg.air.extraData(Air.Bin, ty_pl.payload).data;
 
-    const lhs = try func.resolveInst(extra.lhs);
-    const rhs = try func.resolveInst(extra.rhs);
-    const ty = func.typeOf(extra.lhs);
-    const pt = func.pt;
+    const lhs = try cg.resolveInst(extra.lhs);
+    const rhs = try cg.resolveInst(extra.rhs);
+    const ty = cg.typeOf(extra.lhs);
+    const pt = cg.pt;
     const zcu = pt.zcu;
 
     if (ty.zigTypeTag(zcu) == .vector) {
-        return func.fail("TODO: Implement overflow arithmetic for vectors", .{});
+        return cg.fail("TODO: Implement overflow arithmetic for vectors", .{});
     }
 
     const int_info = ty.intInfo(zcu);
     const is_signed = int_info.signedness == .signed;
     if (int_info.bits > 128) {
-        return func.fail("TODO: Implement {{add/sub}}_with_overflow for integer bitsize: {d}", .{int_info.bits});
+        return cg.fail("TODO: Implement {{add/sub}}_with_overflow for integer bitsize: {d}", .{int_info.bits});
     }
 
-    const op_result = try func.wrapBinOp(lhs, rhs, ty, op);
-    var op_tmp = try op_result.toLocal(func, ty);
-    defer op_tmp.free(func);
+    const op_result = try cg.wrapBinOp(lhs, rhs, ty, op);
+    var op_tmp = try op_result.toLocal(cg, ty);
+    defer op_tmp.free(cg);
 
     const cmp_op: std.math.CompareOperator = switch (op) {
         .add => .lt,
@@ -5997,40 +5995,40 @@ fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerErro
         else => unreachable,
     };
     const overflow_bit = if (is_signed) blk: {
-        const zero = try intZeroValue(func, ty);
-        const rhs_is_neg = try func.cmp(rhs, zero, ty, .lt);
-        const overflow_cmp = try func.cmp(op_tmp, lhs, ty, cmp_op);
-        break :blk try func.cmp(rhs_is_neg, overflow_cmp, Type.u1, .neq);
-    } else try func.cmp(op_tmp, lhs, ty, cmp_op);
-    var bit_tmp = try overflow_bit.toLocal(func, Type.u1);
-    defer bit_tmp.free(func);
-
-    const result = try func.allocStack(func.typeOfIndex(inst));
+        const zero = try intZeroValue(cg, ty);
+        const rhs_is_neg = try cg.cmp(rhs, zero, ty, .lt);
+        const overflow_cmp = try cg.cmp(op_tmp, lhs, ty, cmp_op);
+        break :blk try cg.cmp(rhs_is_neg, overflow_cmp, Type.u1, .neq);
+    } else try cg.cmp(op_tmp, lhs, ty, cmp_op);
+    var bit_tmp = try overflow_bit.toLocal(cg, Type.u1);
+    defer bit_tmp.free(cg);
+
+    const result = try cg.allocStack(cg.typeOfIndex(inst));
     const offset: u32 = @intCast(ty.abiSize(zcu));
-    try func.store(result, op_tmp, ty, 0);
-    try func.store(result, bit_tmp, Type.u1, offset);
+    try cg.store(result, op_tmp, ty, 0);
+    try cg.store(result, bit_tmp, Type.u1, offset);
 
-    return func.finishAir(inst, result, &.{ extra.lhs, extra.rhs });
+    return cg.finishAir(inst, result, &.{ extra.lhs, extra.rhs });
 }
 
-fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
+fn airShlWithOverflow(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
-    const extra = func.air.extraData(Air.Bin, ty_pl.payload).data;
+    const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+    const extra = cg.air.extraData(Air.Bin, ty_pl.payload).data;
 
-    const lhs = try func.resolveInst(extra.lhs);
-    const rhs = try func.resolveInst(extra.rhs);
-    const ty = func.typeOf(extra.lhs);
-    const rhs_ty = func.typeOf(extra.rhs);
+    const lhs = try cg.resolveInst(extra.lhs);
+    const rhs = try cg.resolveInst(extra.rhs);
+    const ty = cg.typeOf(extra.lhs);
+    const rhs_ty = cg.typeOf(extra.rhs);
 
     if (ty.zigTypeTag(zcu) == .vector) {
-        return func.fail("TODO: Implement overflow arithmetic for vectors", .{});
+        return cg.fail("TODO: Implement overflow arithmetic for vectors", .{});
     }
 
     const int_info = ty.intInfo(zcu);
     const wasm_bits = toWasmBits(int_info.bits) orelse {
-        return func.fail("TODO: Implement shl_with_overflow for integer bitsize: {d}", .{int_info.bits});
+        return cg.fail("TODO: Implement shl_with_overflow for integer bitsize: {d}", .{int_info.bits});
     };
 
     // Ensure rhs is coerced to lhs as they must have the same WebAssembly types
@@ -6038,50 +6036,50 @@ fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     const rhs_wasm_bits = toWasmBits(rhs_ty.intInfo(zcu).bits).?;
     // If wasm_bits == 128, compiler-rt expects i32 for shift
     const rhs_final = if (wasm_bits != rhs_wasm_bits and wasm_bits == 64) blk: {
-        const rhs_casted = try func.intcast(rhs, rhs_ty, ty);
-        break :blk try rhs_casted.toLocal(func, ty);
+        const rhs_casted = try cg.intcast(rhs, rhs_ty, ty);
+        break :blk try rhs_casted.toLocal(cg, ty);
     } else rhs;
 
-    var shl = try (try func.wrapBinOp(lhs, rhs_final, ty, .shl)).toLocal(func, ty);
-    defer shl.free(func);
+    var shl = try (try cg.wrapBinOp(lhs, rhs_final, ty, .shl)).toLocal(cg, ty);
+    defer shl.free(cg);
 
     const overflow_bit = blk: {
-        const shr = try func.binOp(shl, rhs_final, ty, .shr);
-        break :blk try func.cmp(shr, lhs, ty, .neq);
+        const shr = try cg.binOp(shl, rhs_final, ty, .shr);
+        break :blk try cg.cmp(shr, lhs, ty, .neq);
     };
-    var overflow_local = try overflow_bit.toLocal(func, Type.u1);
-    defer overflow_local.free(func);
+    var overflow_local = try overflow_bit.toLocal(cg, Type.u1);
+    defer overflow_local.free(cg);
 
-    const result = try func.allocStack(func.typeOfIndex(inst));
+    const result = try cg.allocStack(cg.typeOfIndex(inst));
     const offset: u32 = @intCast(ty.abiSize(zcu));
-    try func.store(result, shl, ty, 0);
-    try func.store(result, overflow_local, Type.u1, offset);
+    try cg.store(result, shl, ty, 0);
+    try cg.store(result, overflow_local, Type.u1, offset);
 
-    return func.finishAir(inst, result, &.{ extra.lhs, extra.rhs });
+    return cg.finishAir(inst, result, &.{ extra.lhs, extra.rhs });
 }
 
-fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
-    const extra = func.air.extraData(Air.Bin, ty_pl.payload).data;
+fn airMulWithOverflow(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+    const extra = cg.air.extraData(Air.Bin, ty_pl.payload).data;
 
-    const lhs = try func.resolveInst(extra.lhs);
-    const rhs = try func.resolveInst(extra.rhs);
-    const ty = func.typeOf(extra.lhs);
-    const pt = func.pt;
+    const lhs = try cg.resolveInst(extra.lhs);
+    const rhs = try cg.resolveInst(extra.rhs);
+    const ty = cg.typeOf(extra.lhs);
+    const pt = cg.pt;
     const zcu = pt.zcu;
 
     if (ty.zigTypeTag(zcu) == .vector) {
-        return func.fail("TODO: Implement overflow arithmetic for vectors", .{});
+        return cg.fail("TODO: Implement overflow arithmetic for vectors", .{});
     }
 
     // We store the bit if it's overflowed or not in this. As it's zero-initialized
     // we only need to update it if an overflow (or underflow) occurred.
-    var overflow_bit = try func.ensureAllocLocal(Type.u1);
-    defer overflow_bit.free(func);
+    var overflow_bit = try cg.ensureAllocLocal(Type.u1);
+    defer overflow_bit.free(cg);
 
     const int_info = ty.intInfo(zcu);
     const wasm_bits = toWasmBits(int_info.bits) orelse {
-        return func.fail("TODO: Implement `@mulWithOverflow` for integer bitsize: {d}", .{int_info.bits});
+        return cg.fail("TODO: Implement `@mulWithOverflow` for integer bitsize: {d}", .{int_info.bits});
     };
 
     const zero: WValue = switch (wasm_bits) {
@@ -6093,248 +6091,248 @@ fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     // for 32 bit integers we upcast it to a 64bit integer
     const mul = if (wasm_bits == 32) blk: {
         const new_ty = if (int_info.signedness == .signed) Type.i64 else Type.u64;
-        const lhs_upcast = try func.intcast(lhs, ty, new_ty);
-        const rhs_upcast = try func.intcast(rhs, ty, new_ty);
-        const bin_op = try (try func.binOp(lhs_upcast, rhs_upcast, new_ty, .mul)).toLocal(func, new_ty);
-        const res = try (try func.trunc(bin_op, ty, new_ty)).toLocal(func, ty);
-        const res_upcast = try func.intcast(res, ty, new_ty);
-        _ = try func.cmp(res_upcast, bin_op, new_ty, .neq);
-        try func.addLabel(.local_set, overflow_bit.local.value);
+        const lhs_upcast = try cg.intcast(lhs, ty, new_ty);
+        const rhs_upcast = try cg.intcast(rhs, ty, new_ty);
+        const bin_op = try (try cg.binOp(lhs_upcast, rhs_upcast, new_ty, .mul)).toLocal(cg, new_ty);
+        const res = try (try cg.trunc(bin_op, ty, new_ty)).toLocal(cg, ty);
+        const res_upcast = try cg.intcast(res, ty, new_ty);
+        _ = try cg.cmp(res_upcast, bin_op, new_ty, .neq);
+        try cg.addLabel(.local_set, overflow_bit.local.value);
         break :blk res;
     } else if (wasm_bits == 64) blk: {
         const new_ty = if (int_info.signedness == .signed) Type.i128 else Type.u128;
-        const lhs_upcast = try func.intcast(lhs, ty, new_ty);
-        const rhs_upcast = try func.intcast(rhs, ty, new_ty);
-        const bin_op = try (try func.binOp(lhs_upcast, rhs_upcast, new_ty, .mul)).toLocal(func, new_ty);
-        const res = try (try func.trunc(bin_op, ty, new_ty)).toLocal(func, ty);
-        const res_upcast = try func.intcast(res, ty, new_ty);
-        _ = try func.cmp(res_upcast, bin_op, new_ty, .neq);
-        try func.addLabel(.local_set, overflow_bit.local.value);
+        const lhs_upcast = try cg.intcast(lhs, ty, new_ty);
+        const rhs_upcast = try cg.intcast(rhs, ty, new_ty);
+        const bin_op = try (try cg.binOp(lhs_upcast, rhs_upcast, new_ty, .mul)).toLocal(cg, new_ty);
+        const res = try (try cg.trunc(bin_op, ty, new_ty)).toLocal(cg, ty);
+        const res_upcast = try cg.intcast(res, ty, new_ty);
+        _ = try cg.cmp(res_upcast, bin_op, new_ty, .neq);
+        try cg.addLabel(.local_set, overflow_bit.local.value);
         break :blk res;
     } else if (int_info.bits == 128 and int_info.signedness == .unsigned) blk: {
-        var lhs_lsb = try (try func.load(lhs, Type.u64, 0)).toLocal(func, Type.u64);
-        defer lhs_lsb.free(func);
-        var lhs_msb = try (try func.load(lhs, Type.u64, 8)).toLocal(func, Type.u64);
-        defer lhs_msb.free(func);
-        var rhs_lsb = try (try func.load(rhs, Type.u64, 0)).toLocal(func, Type.u64);
-        defer rhs_lsb.free(func);
-        var rhs_msb = try (try func.load(rhs, Type.u64, 8)).toLocal(func, Type.u64);
-        defer rhs_msb.free(func);
-
-        const cross_1 = try func.callIntrinsic(
+        var lhs_lsb = try (try cg.load(lhs, Type.u64, 0)).toLocal(cg, Type.u64);
+        defer lhs_lsb.free(cg);
+        var lhs_msb = try (try cg.load(lhs, Type.u64, 8)).toLocal(cg, Type.u64);
+        defer lhs_msb.free(cg);
+        var rhs_lsb = try (try cg.load(rhs, Type.u64, 0)).toLocal(cg, Type.u64);
+        defer rhs_lsb.free(cg);
+        var rhs_msb = try (try cg.load(rhs, Type.u64, 8)).toLocal(cg, Type.u64);
+        defer rhs_msb.free(cg);
+
+        const cross_1 = try cg.callIntrinsic(
             "__multi3",
             &[_]InternPool.Index{.i64_type} ** 4,
             Type.i128,
             &.{ lhs_msb, zero, rhs_lsb, zero },
         );
-        const cross_2 = try func.callIntrinsic(
+        const cross_2 = try cg.callIntrinsic(
             "__multi3",
             &[_]InternPool.Index{.i64_type} ** 4,
             Type.i128,
             &.{ rhs_msb, zero, lhs_lsb, zero },
         );
-        const mul_lsb = try func.callIntrinsic(
+        const mul_lsb = try cg.callIntrinsic(
             "__multi3",
             &[_]InternPool.Index{.i64_type} ** 4,
             Type.i128,
             &.{ rhs_lsb, zero, lhs_lsb, zero },
         );
 
-        const rhs_msb_not_zero = try func.cmp(rhs_msb, zero, Type.u64, .neq);
-        const lhs_msb_not_zero = try func.cmp(lhs_msb, zero, Type.u64, .neq);
-        const both_msb_not_zero = try func.binOp(rhs_msb_not_zero, lhs_msb_not_zero, Type.bool, .@"and");
-        const cross_1_msb = try func.load(cross_1, Type.u64, 8);
-        const cross_1_msb_not_zero = try func.cmp(cross_1_msb, zero, Type.u64, .neq);
-        const cond_1 = try func.binOp(both_msb_not_zero, cross_1_msb_not_zero, Type.bool, .@"or");
-        const cross_2_msb = try func.load(cross_2, Type.u64, 8);
-        const cross_2_msb_not_zero = try func.cmp(cross_2_msb, zero, Type.u64, .neq);
-        const cond_2 = try func.binOp(cond_1, cross_2_msb_not_zero, Type.bool, .@"or");
-
-        const cross_1_lsb = try func.load(cross_1, Type.u64, 0);
-        const cross_2_lsb = try func.load(cross_2, Type.u64, 0);
-        const cross_add = try func.binOp(cross_1_lsb, cross_2_lsb, Type.u64, .add);
-
-        var mul_lsb_msb = try (try func.load(mul_lsb, Type.u64, 8)).toLocal(func, Type.u64);
-        defer mul_lsb_msb.free(func);
-        var all_add = try (try func.binOp(cross_add, mul_lsb_msb, Type.u64, .add)).toLocal(func, Type.u64);
-        defer all_add.free(func);
-        const add_overflow = try func.cmp(all_add, mul_lsb_msb, Type.u64, .lt);
+        const rhs_msb_not_zero = try cg.cmp(rhs_msb, zero, Type.u64, .neq);
+        const lhs_msb_not_zero = try cg.cmp(lhs_msb, zero, Type.u64, .neq);
+        const both_msb_not_zero = try cg.binOp(rhs_msb_not_zero, lhs_msb_not_zero, Type.bool, .@"and");
+        const cross_1_msb = try cg.load(cross_1, Type.u64, 8);
+        const cross_1_msb_not_zero = try cg.cmp(cross_1_msb, zero, Type.u64, .neq);
+        const cond_1 = try cg.binOp(both_msb_not_zero, cross_1_msb_not_zero, Type.bool, .@"or");
+        const cross_2_msb = try cg.load(cross_2, Type.u64, 8);
+        const cross_2_msb_not_zero = try cg.cmp(cross_2_msb, zero, Type.u64, .neq);
+        const cond_2 = try cg.binOp(cond_1, cross_2_msb_not_zero, Type.bool, .@"or");
+
+        const cross_1_lsb = try cg.load(cross_1, Type.u64, 0);
+        const cross_2_lsb = try cg.load(cross_2, Type.u64, 0);
+        const cross_add = try cg.binOp(cross_1_lsb, cross_2_lsb, Type.u64, .add);
+
+        var mul_lsb_msb = try (try cg.load(mul_lsb, Type.u64, 8)).toLocal(cg, Type.u64);
+        defer mul_lsb_msb.free(cg);
+        var all_add = try (try cg.binOp(cross_add, mul_lsb_msb, Type.u64, .add)).toLocal(cg, Type.u64);
+        defer all_add.free(cg);
+        const add_overflow = try cg.cmp(all_add, mul_lsb_msb, Type.u64, .lt);
 
         // result for overflow bit
-        _ = try func.binOp(cond_2, add_overflow, Type.bool, .@"or");
-        try func.addLabel(.local_set, overflow_bit.local.value);
-
-        const tmp_result = try func.allocStack(Type.u128);
-        try func.emitWValue(tmp_result);
-        const mul_lsb_lsb = try func.load(mul_lsb, Type.u64, 0);
-        try func.store(.stack, mul_lsb_lsb, Type.u64, tmp_result.offset());
-        try func.store(tmp_result, all_add, Type.u64, 8);
+        _ = try cg.binOp(cond_2, add_overflow, Type.bool, .@"or");
+        try cg.addLabel(.local_set, overflow_bit.local.value);
+
+        const tmp_result = try cg.allocStack(Type.u128);
+        try cg.emitWValue(tmp_result);
+        const mul_lsb_lsb = try cg.load(mul_lsb, Type.u64, 0);
+        try cg.store(.stack, mul_lsb_lsb, Type.u64, tmp_result.offset());
+        try cg.store(tmp_result, all_add, Type.u64, 8);
         break :blk tmp_result;
     } else if (int_info.bits == 128 and int_info.signedness == .signed) blk: {
-        const overflow_ret = try func.allocStack(Type.i32);
-        const res = try func.callIntrinsic(
+        const overflow_ret = try cg.allocStack(Type.i32);
+        const res = try cg.callIntrinsic(
             "__muloti4",
             &[_]InternPool.Index{ .i128_type, .i128_type, .usize_type },
             Type.i128,
             &.{ lhs, rhs, overflow_ret },
         );
-        _ = try func.load(overflow_ret, Type.i32, 0);
-        try func.addLabel(.local_set, overflow_bit.local.value);
+        _ = try cg.load(overflow_ret, Type.i32, 0);
+        try cg.addLabel(.local_set, overflow_bit.local.value);
         break :blk res;
-    } else return func.fail("TODO: @mulWithOverflow for {}", .{ty.fmt(pt)});
-    var bin_op_local = try mul.toLocal(func, ty);
-    defer bin_op_local.free(func);
+    } else return cg.fail("TODO: @mulWithOverflow for {}", .{ty.fmt(pt)});
+    var bin_op_local = try mul.toLocal(cg, ty);
+    defer bin_op_local.free(cg);
 
-    const result = try func.allocStack(func.typeOfIndex(inst));
+    const result = try cg.allocStack(cg.typeOfIndex(inst));
     const offset: u32 = @intCast(ty.abiSize(zcu));
-    try func.store(result, bin_op_local, ty, 0);
-    try func.store(result, overflow_bit, Type.u1, offset);
+    try cg.store(result, bin_op_local, ty, 0);
+    try cg.store(result, overflow_bit, Type.u1, offset);
 
-    return func.finishAir(inst, result, &.{ extra.lhs, extra.rhs });
+    return cg.finishAir(inst, result, &.{ extra.lhs, extra.rhs });
 }
 
-fn airMaxMin(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
+fn airMaxMin(cg: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
     assert(op == .max or op == .min);
-    const pt = func.pt;
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+    const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
 
-    const ty = func.typeOfIndex(inst);
+    const ty = cg.typeOfIndex(inst);
     if (ty.zigTypeTag(zcu) == .vector) {
-        return func.fail("TODO: `@maximum` and `@minimum` for vectors", .{});
+        return cg.fail("TODO: `@maximum` and `@minimum` for vectors", .{});
     }
 
     if (ty.abiSize(zcu) > 16) {
-        return func.fail("TODO: `@maximum` and `@minimum` for types larger than 16 bytes", .{});
+        return cg.fail("TODO: `@maximum` and `@minimum` for types larger than 16 bytes", .{});
     }
 
-    const lhs = try func.resolveInst(bin_op.lhs);
-    const rhs = try func.resolveInst(bin_op.rhs);
+    const lhs = try cg.resolveInst(bin_op.lhs);
+    const rhs = try cg.resolveInst(bin_op.rhs);
 
     if (ty.zigTypeTag(zcu) == .float) {
         var fn_name_buf: [64]u8 = undefined;
-        const float_bits = ty.floatBits(func.target.*);
+        const float_bits = ty.floatBits(cg.target.*);
         const fn_name = std.fmt.bufPrint(&fn_name_buf, "{s}f{s}{s}", .{
             target_util.libcFloatPrefix(float_bits),
             @tagName(op),
             target_util.libcFloatSuffix(float_bits),
         }) catch unreachable;
-        const result = try func.callIntrinsic(fn_name, &.{ ty.ip_index, ty.ip_index }, ty, &.{ lhs, rhs });
-        try func.lowerToStack(result);
+        const result = try cg.callIntrinsic(fn_name, &.{ ty.ip_index, ty.ip_index }, ty, &.{ lhs, rhs });
+        try cg.lowerToStack(result);
     } else {
         // operands to select from
-        try func.lowerToStack(lhs);
-        try func.lowerToStack(rhs);
-        _ = try func.cmp(lhs, rhs, ty, if (op == .max) .gt else .lt);
+        try cg.lowerToStack(lhs);
+        try cg.lowerToStack(rhs);
+        _ = try cg.cmp(lhs, rhs, ty, if (op == .max) .gt else .lt);
 
         // based on the result from comparison, return operand 0 or 1.
-        try func.addTag(.select);
+        try cg.addTag(.select);
     }
 
-    return func.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs });
+    return cg.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs });
 }
 
-fn airMulAdd(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
+fn airMulAdd(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
-    const bin_op = func.air.extraData(Air.Bin, pl_op.payload).data;
+    const pl_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
+    const bin_op = cg.air.extraData(Air.Bin, pl_op.payload).data;
 
-    const ty = func.typeOfIndex(inst);
+    const ty = cg.typeOfIndex(inst);
     if (ty.zigTypeTag(zcu) == .vector) {
-        return func.fail("TODO: `@mulAdd` for vectors", .{});
+        return cg.fail("TODO: `@mulAdd` for vectors", .{});
     }
 
-    const addend = try func.resolveInst(pl_op.operand);
-    const lhs = try func.resolveInst(bin_op.lhs);
-    const rhs = try func.resolveInst(bin_op.rhs);
+    const addend = try cg.resolveInst(pl_op.operand);
+    const lhs = try cg.resolveInst(bin_op.lhs);
+    const rhs = try cg.resolveInst(bin_op.rhs);
 
-    const result = if (ty.floatBits(func.target.*) == 16) fl_result: {
-        const rhs_ext = try func.fpext(rhs, ty, Type.f32);
-        const lhs_ext = try func.fpext(lhs, ty, Type.f32);
-        const addend_ext = try func.fpext(addend, ty, Type.f32);
+    const result = if (ty.floatBits(cg.target.*) == 16) fl_result: {
+        const rhs_ext = try cg.fpext(rhs, ty, Type.f32);
+        const lhs_ext = try cg.fpext(lhs, ty, Type.f32);
+        const addend_ext = try cg.fpext(addend, ty, Type.f32);
         // call to compiler-rt `fn fmaf(f32, f32, f32) f32`
-        const result = try func.callIntrinsic(
+        const result = try cg.callIntrinsic(
             "fmaf",
             &.{ .f32_type, .f32_type, .f32_type },
             Type.f32,
             &.{ rhs_ext, lhs_ext, addend_ext },
         );
-        break :fl_result try func.fptrunc(result, Type.f32, ty);
+        break :fl_result try cg.fptrunc(result, Type.f32, ty);
     } else result: {
-        const mul_result = try func.binOp(lhs, rhs, ty, .mul);
-        break :result try func.binOp(mul_result, addend, ty, .add);
+        const mul_result = try cg.binOp(lhs, rhs, ty, .mul);
+        break :result try cg.binOp(mul_result, addend, ty, .add);
     };
 
-    return func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs, pl_op.operand });
+    return cg.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs, pl_op.operand });
 }
 
-fn airClz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
+fn airClz(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+    const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
-    const ty = func.typeOf(ty_op.operand);
+    const ty = cg.typeOf(ty_op.operand);
     if (ty.zigTypeTag(zcu) == .vector) {
-        return func.fail("TODO: `@clz` for vectors", .{});
+        return cg.fail("TODO: `@clz` for vectors", .{});
     }
 
-    const operand = try func.resolveInst(ty_op.operand);
+    const operand = try cg.resolveInst(ty_op.operand);
     const int_info = ty.intInfo(zcu);
     const wasm_bits = toWasmBits(int_info.bits) orelse {
-        return func.fail("TODO: `@clz` for integers with bitsize '{d}'", .{int_info.bits});
+        return cg.fail("TODO: `@clz` for integers with bitsize '{d}'", .{int_info.bits});
     };
 
     switch (wasm_bits) {
         32 => {
-            try func.emitWValue(operand);
-            try func.addTag(.i32_clz);
+            try cg.emitWValue(operand);
+            try cg.addTag(.i32_clz);
         },
         64 => {
-            try func.emitWValue(operand);
-            try func.addTag(.i64_clz);
-            try func.addTag(.i32_wrap_i64);
+            try cg.emitWValue(operand);
+            try cg.addTag(.i64_clz);
+            try cg.addTag(.i32_wrap_i64);
         },
         128 => {
-            var msb = try (try func.load(operand, Type.u64, 8)).toLocal(func, Type.u64);
-            defer msb.free(func);
-
-            try func.emitWValue(msb);
-            try func.addTag(.i64_clz);
-            _ = try func.load(operand, Type.u64, 0);
-            try func.addTag(.i64_clz);
-            try func.emitWValue(.{ .imm64 = 64 });
-            try func.addTag(.i64_add);
-            _ = try func.cmp(msb, .{ .imm64 = 0 }, Type.u64, .neq);
-            try func.addTag(.select);
-            try func.addTag(.i32_wrap_i64);
+            var msb = try (try cg.load(operand, Type.u64, 8)).toLocal(cg, Type.u64);
+            defer msb.free(cg);
+
+            try cg.emitWValue(msb);
+            try cg.addTag(.i64_clz);
+            _ = try cg.load(operand, Type.u64, 0);
+            try cg.addTag(.i64_clz);
+            try cg.emitWValue(.{ .imm64 = 64 });
+            try cg.addTag(.i64_add);
+            _ = try cg.cmp(msb, .{ .imm64 = 0 }, Type.u64, .neq);
+            try cg.addTag(.select);
+            try cg.addTag(.i32_wrap_i64);
         },
         else => unreachable,
     }
 
     if (wasm_bits != int_info.bits) {
-        try func.emitWValue(.{ .imm32 = wasm_bits - int_info.bits });
-        try func.addTag(.i32_sub);
+        try cg.emitWValue(.{ .imm32 = wasm_bits - int_info.bits });
+        try cg.addTag(.i32_sub);
     }
 
-    return func.finishAir(inst, .stack, &.{ty_op.operand});
+    return cg.finishAir(inst, .stack, &.{ty_op.operand});
 }
 
-fn airCtz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
+fn airCtz(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+    const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
-    const ty = func.typeOf(ty_op.operand);
+    const ty = cg.typeOf(ty_op.operand);
 
     if (ty.zigTypeTag(zcu) == .vector) {
-        return func.fail("TODO: `@ctz` for vectors", .{});
+        return cg.fail("TODO: `@ctz` for vectors", .{});
     }
 
-    const operand = try func.resolveInst(ty_op.operand);
+    const operand = try cg.resolveInst(ty_op.operand);
     const int_info = ty.intInfo(zcu);
     const wasm_bits = toWasmBits(int_info.bits) orelse {
-        return func.fail("TODO: `@clz` for integers with bitsize '{d}'", .{int_info.bits});
+        return cg.fail("TODO: `@clz` for integers with bitsize '{d}'", .{int_info.bits});
     };
 
     switch (wasm_bits) {
@@ -6342,110 +6340,110 @@ fn airCtz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
             if (wasm_bits != int_info.bits) {
                 const val: u32 = @as(u32, 1) << @as(u5, @intCast(int_info.bits));
                 // leave value on the stack
-                _ = try func.binOp(operand, .{ .imm32 = val }, ty, .@"or");
-            } else try func.emitWValue(operand);
-            try func.addTag(.i32_ctz);
+                _ = try cg.binOp(operand, .{ .imm32 = val }, ty, .@"or");
+            } else try cg.emitWValue(operand);
+            try cg.addTag(.i32_ctz);
         },
         64 => {
             if (wasm_bits != int_info.bits) {
                 const val: u64 = @as(u64, 1) << @as(u6, @intCast(int_info.bits));
                 // leave value on the stack
-                _ = try func.binOp(operand, .{ .imm64 = val }, ty, .@"or");
-            } else try func.emitWValue(operand);
-            try func.addTag(.i64_ctz);
-            try func.addTag(.i32_wrap_i64);
+                _ = try cg.binOp(operand, .{ .imm64 = val }, ty, .@"or");
+            } else try cg.emitWValue(operand);
+            try cg.addTag(.i64_ctz);
+            try cg.addTag(.i32_wrap_i64);
         },
         128 => {
-            var lsb = try (try func.load(operand, Type.u64, 0)).toLocal(func, Type.u64);
-            defer lsb.free(func);
+            var lsb = try (try cg.load(operand, Type.u64, 0)).toLocal(cg, Type.u64);
+            defer lsb.free(cg);
 
-            try func.emitWValue(lsb);
-            try func.addTag(.i64_ctz);
-            _ = try func.load(operand, Type.u64, 8);
+            try cg.emitWValue(lsb);
+            try cg.addTag(.i64_ctz);
+            _ = try cg.load(operand, Type.u64, 8);
             if (wasm_bits != int_info.bits) {
-                try func.addImm64(@as(u64, 1) << @as(u6, @intCast(int_info.bits - 64)));
-                try func.addTag(.i64_or);
+                try cg.addImm64(@as(u64, 1) << @as(u6, @intCast(int_info.bits - 64)));
+                try cg.addTag(.i64_or);
             }
-            try func.addTag(.i64_ctz);
-            try func.addImm64(64);
+            try cg.addTag(.i64_ctz);
+            try cg.addImm64(64);
             if (wasm_bits != int_info.bits) {
-                try func.addTag(.i64_or);
+                try cg.addTag(.i64_or);
             } else {
-                try func.addTag(.i64_add);
+                try cg.addTag(.i64_add);
             }
-            _ = try func.cmp(lsb, .{ .imm64 = 0 }, Type.u64, .neq);
-            try func.addTag(.select);
-            try func.addTag(.i32_wrap_i64);
+            _ = try cg.cmp(lsb, .{ .imm64 = 0 }, Type.u64, .neq);
+            try cg.addTag(.select);
+            try cg.addTag(.i32_wrap_i64);
         },
         else => unreachable,
     }
 
-    return func.finishAir(inst, .stack, &.{ty_op.operand});
+    return cg.finishAir(inst, .stack, &.{ty_op.operand});
 }
 
-fn airDbgStmt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const dbg_stmt = func.air.instructions.items(.data)[@intFromEnum(inst)].dbg_stmt;
-    try func.addInst(.{ .tag = .dbg_line, .data = .{
-        .payload = try func.addExtra(Mir.DbgLineColumn{
+fn airDbgStmt(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const dbg_stmt = cg.air.instructions.items(.data)[@intFromEnum(inst)].dbg_stmt;
+    try cg.addInst(.{ .tag = .dbg_line, .data = .{
+        .payload = try cg.addExtra(Mir.DbgLineColumn{
             .line = dbg_stmt.line,
             .column = dbg_stmt.column,
         }),
     } });
-    return func.finishAir(inst, .none, &.{});
+    return cg.finishAir(inst, .none, &.{});
 }
 
-fn airDbgInlineBlock(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
-    const extra = func.air.extraData(Air.DbgInlineBlock, ty_pl.payload);
+fn airDbgInlineBlock(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+    const extra = cg.air.extraData(Air.DbgInlineBlock, ty_pl.payload);
     // TODO
-    try func.lowerBlock(inst, ty_pl.ty.toType(), @ptrCast(func.air.extra[extra.end..][0..extra.data.body_len]));
+    try cg.lowerBlock(inst, ty_pl.ty.toType(), @ptrCast(cg.air.extra[extra.end..][0..extra.data.body_len]));
 }
 
 fn airDbgVar(
-    func: *CodeGen,
+    cg: *CodeGen,
     inst: Air.Inst.Index,
     local_tag: link.File.Dwarf.WipNav.LocalTag,
     is_ptr: bool,
 ) InnerError!void {
     _ = is_ptr;
     _ = local_tag;
-    return func.finishAir(inst, .none, &.{});
+    return cg.finishAir(inst, .none, &.{});
 }
 
-fn airTry(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
-    const err_union = try func.resolveInst(pl_op.operand);
-    const extra = func.air.extraData(Air.Try, pl_op.payload);
-    const body: []const Air.Inst.Index = @ptrCast(func.air.extra[extra.end..][0..extra.data.body_len]);
-    const err_union_ty = func.typeOf(pl_op.operand);
-    const result = try lowerTry(func, inst, err_union, body, err_union_ty, false);
-    return func.finishAir(inst, result, &.{pl_op.operand});
+fn airTry(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pl_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
+    const err_union = try cg.resolveInst(pl_op.operand);
+    const extra = cg.air.extraData(Air.Try, pl_op.payload);
+    const body: []const Air.Inst.Index = @ptrCast(cg.air.extra[extra.end..][0..extra.data.body_len]);
+    const err_union_ty = cg.typeOf(pl_op.operand);
+    const result = try lowerTry(cg, inst, err_union, body, err_union_ty, false);
+    return cg.finishAir(inst, result, &.{pl_op.operand});
 }
 
-fn airTryPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
+fn airTryPtr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
-    const extra = func.air.extraData(Air.TryPtr, ty_pl.payload);
-    const err_union_ptr = try func.resolveInst(extra.data.ptr);
-    const body: []const Air.Inst.Index = @ptrCast(func.air.extra[extra.end..][0..extra.data.body_len]);
-    const err_union_ty = func.typeOf(extra.data.ptr).childType(zcu);
-    const result = try lowerTry(func, inst, err_union_ptr, body, err_union_ty, true);
-    return func.finishAir(inst, result, &.{extra.data.ptr});
+    const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+    const extra = cg.air.extraData(Air.TryPtr, ty_pl.payload);
+    const err_union_ptr = try cg.resolveInst(extra.data.ptr);
+    const body: []const Air.Inst.Index = @ptrCast(cg.air.extra[extra.end..][0..extra.data.body_len]);
+    const err_union_ty = cg.typeOf(extra.data.ptr).childType(zcu);
+    const result = try lowerTry(cg, inst, err_union_ptr, body, err_union_ty, true);
+    return cg.finishAir(inst, result, &.{extra.data.ptr});
 }
 
 fn lowerTry(
-    func: *CodeGen,
+    cg: *CodeGen,
     inst: Air.Inst.Index,
     err_union: WValue,
     body: []const Air.Inst.Index,
     err_union_ty: Type,
     operand_is_ptr: bool,
 ) InnerError!WValue {
-    const pt = func.pt;
+    const pt = cg.pt;
     const zcu = pt.zcu;
     if (operand_is_ptr) {
-        return func.fail("TODO: lowerTry for pointers", .{});
+        return cg.fail("TODO: lowerTry for pointers", .{});
     }
 
     const pl_ty = err_union_ty.errorUnionPayload(zcu);
@@ -6453,29 +6451,29 @@ fn lowerTry(
 
     if (!err_union_ty.errorUnionSet(zcu).errorSetIsEmpty(zcu)) {
         // Block we can jump out of when error is not set
-        try func.startBlock(.block, std.wasm.block_empty);
+        try cg.startBlock(.block, std.wasm.block_empty);
 
         // check if the error tag is set for the error union.
-        try func.emitWValue(err_union);
+        try cg.emitWValue(err_union);
         if (pl_has_bits) {
             const err_offset: u32 = @intCast(errUnionErrorOffset(pl_ty, zcu));
-            try func.addMemArg(.i32_load16_u, .{
+            try cg.addMemArg(.i32_load16_u, .{
                 .offset = err_union.offset() + err_offset,
                 .alignment = @intCast(Type.anyerror.abiAlignment(zcu).toByteUnits().?),
             });
         }
-        try func.addTag(.i32_eqz);
-        try func.addLabel(.br_if, 0); // jump out of block when error is '0'
+        try cg.addTag(.i32_eqz);
+        try cg.addLabel(.br_if, 0); // jump out of block when error is '0'
 
-        const liveness = func.liveness.getCondBr(inst);
-        try func.branches.append(func.gpa, .{});
-        try func.currentBranch().values.ensureUnusedCapacity(func.gpa, liveness.else_deaths.len + liveness.then_deaths.len);
+        const liveness = cg.liveness.getCondBr(inst);
+        try cg.branches.append(cg.gpa, .{});
+        try cg.currentBranch().values.ensureUnusedCapacity(cg.gpa, liveness.else_deaths.len + liveness.then_deaths.len);
         defer {
-            var branch = func.branches.pop();
-            branch.deinit(func.gpa);
+            var branch = cg.branches.pop();
+            branch.deinit(cg.gpa);
         }
-        try func.genBody(body);
-        try func.endBlock();
+        try cg.genBody(body);
+        try cg.endBlock();
     }
 
     // if we reach here it means error was not set, and we want the payload
@@ -6484,38 +6482,38 @@ fn lowerTry(
     }
 
     const pl_offset: u32 = @intCast(errUnionPayloadOffset(pl_ty, zcu));
-    if (isByRef(pl_ty, pt, func.target)) {
-        return buildPointerOffset(func, err_union, pl_offset, .new);
+    if (isByRef(pl_ty, pt, cg.target)) {
+        return buildPointerOffset(cg, err_union, pl_offset, .new);
     }
-    const payload = try func.load(err_union, pl_ty, pl_offset);
-    return payload.toLocal(func, pl_ty);
+    const payload = try cg.load(err_union, pl_ty, pl_offset);
+    return payload.toLocal(cg, pl_ty);
 }
 
-fn airByteSwap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
+fn airByteSwap(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+    const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
-    const ty = func.typeOfIndex(inst);
-    const operand = try func.resolveInst(ty_op.operand);
+    const ty = cg.typeOfIndex(inst);
+    const operand = try cg.resolveInst(ty_op.operand);
 
     if (ty.zigTypeTag(zcu) == .vector) {
-        return func.fail("TODO: @byteSwap for vectors", .{});
+        return cg.fail("TODO: @byteSwap for vectors", .{});
     }
     const int_info = ty.intInfo(zcu);
     const wasm_bits = toWasmBits(int_info.bits) orelse {
-        return func.fail("TODO: @byteSwap for integers with bitsize {d}", .{int_info.bits});
+        return cg.fail("TODO: @byteSwap for integers with bitsize {d}", .{int_info.bits});
     };
 
     // bytes are no-op
     if (int_info.bits == 8) {
-        return func.finishAir(inst, func.reuseOperand(ty_op.operand, operand), &.{ty_op.operand});
+        return cg.finishAir(inst, cg.reuseOperand(ty_op.operand, operand), &.{ty_op.operand});
     }
 
     const result = result: {
         switch (wasm_bits) {
             32 => {
-                const intrin_ret = try func.callIntrinsic(
+                const intrin_ret = try cg.callIntrinsic(
                     "__bswapsi2",
                     &.{.u32_type},
                     Type.u32,
@@ -6524,10 +6522,10 @@ fn airByteSwap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                 break :result if (int_info.bits == 32)
                     intrin_ret
                 else
-                    try func.binOp(intrin_ret, .{ .imm32 = 32 - int_info.bits }, ty, .shr);
+                    try cg.binOp(intrin_ret, .{ .imm32 = 32 - int_info.bits }, ty, .shr);
             },
             64 => {
-                const intrin_ret = try func.callIntrinsic(
+                const intrin_ret = try cg.callIntrinsic(
                     "__bswapdi2",
                     &.{.u64_type},
                     Type.u64,
@@ -6536,61 +6534,61 @@ fn airByteSwap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                 break :result if (int_info.bits == 64)
                     intrin_ret
                 else
-                    try func.binOp(intrin_ret, .{ .imm64 = 64 - int_info.bits }, ty, .shr);
+                    try cg.binOp(intrin_ret, .{ .imm64 = 64 - int_info.bits }, ty, .shr);
             },
-            else => return func.fail("TODO: @byteSwap for integers with bitsize {d}", .{int_info.bits}),
+            else => return cg.fail("TODO: @byteSwap for integers with bitsize {d}", .{int_info.bits}),
         }
     };
-    return func.finishAir(inst, result, &.{ty_op.operand});
+    return cg.finishAir(inst, result, &.{ty_op.operand});
 }
 
-fn airDiv(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+fn airDiv(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
 
-    const ty = func.typeOfIndex(inst);
-    const lhs = try func.resolveInst(bin_op.lhs);
-    const rhs = try func.resolveInst(bin_op.rhs);
+    const ty = cg.typeOfIndex(inst);
+    const lhs = try cg.resolveInst(bin_op.lhs);
+    const rhs = try cg.resolveInst(bin_op.rhs);
 
-    const result = try func.binOp(lhs, rhs, ty, .div);
-    return func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
+    const result = try cg.binOp(lhs, rhs, ty, .div);
+    return cg.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
 }
 
-fn airDivTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+fn airDivTrunc(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
 
-    const ty = func.typeOfIndex(inst);
-    const lhs = try func.resolveInst(bin_op.lhs);
-    const rhs = try func.resolveInst(bin_op.rhs);
+    const ty = cg.typeOfIndex(inst);
+    const lhs = try cg.resolveInst(bin_op.lhs);
+    const rhs = try cg.resolveInst(bin_op.rhs);
 
-    const div_result = try func.binOp(lhs, rhs, ty, .div);
+    const div_result = try cg.binOp(lhs, rhs, ty, .div);
 
     if (ty.isAnyFloat()) {
-        const trunc_result = try func.floatOp(.trunc, ty, &.{div_result});
-        return func.finishAir(inst, trunc_result, &.{ bin_op.lhs, bin_op.rhs });
+        const trunc_result = try cg.floatOp(.trunc, ty, &.{div_result});
+        return cg.finishAir(inst, trunc_result, &.{ bin_op.lhs, bin_op.rhs });
     }
 
-    return func.finishAir(inst, div_result, &.{ bin_op.lhs, bin_op.rhs });
+    return cg.finishAir(inst, div_result, &.{ bin_op.lhs, bin_op.rhs });
 }
 
-fn airDivFloor(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+fn airDivFloor(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
 
-    const pt = func.pt;
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const ty = func.typeOfIndex(inst);
-    const lhs = try func.resolveInst(bin_op.lhs);
-    const rhs = try func.resolveInst(bin_op.rhs);
+    const ty = cg.typeOfIndex(inst);
+    const lhs = try cg.resolveInst(bin_op.lhs);
+    const rhs = try cg.resolveInst(bin_op.rhs);
 
     if (ty.isUnsignedInt(zcu)) {
-        _ = try func.binOp(lhs, rhs, ty, .div);
+        _ = try cg.binOp(lhs, rhs, ty, .div);
     } else if (ty.isSignedInt(zcu)) {
         const int_bits = ty.intInfo(zcu).bits;
         const wasm_bits = toWasmBits(int_bits) orelse {
-            return func.fail("TODO: `@divFloor` for signed integers larger than 64 bits ({d} bits requested)", .{int_bits});
+            return cg.fail("TODO: `@divFloor` for signed integers larger than 64 bits ({d} bits requested)", .{int_bits});
         };
 
         if (wasm_bits > 64) {
-            return func.fail("TODO: `@divFloor` for signed integers larger than 64 bits ({d} bits requested)", .{int_bits});
+            return cg.fail("TODO: `@divFloor` for signed integers larger than 64 bits ({d} bits requested)", .{int_bits});
         }
 
         const zero: WValue = switch (wasm_bits) {
@@ -6600,108 +6598,108 @@ fn airDivFloor(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
         };
 
         // tee leaves the value on the stack and stores it in a local.
-        const quotient = try func.allocLocal(ty);
-        _ = try func.binOp(lhs, rhs, ty, .div);
-        try func.addLabel(.local_tee, quotient.local.value);
+        const quotient = try cg.allocLocal(ty);
+        _ = try cg.binOp(lhs, rhs, ty, .div);
+        try cg.addLabel(.local_tee, quotient.local.value);
 
         // select takes a 32 bit value as the condition, so in the 64 bit case we use eqz to narrow
         // the 64 bit value we want to use as the condition to 32 bits.
         // This also inverts the condition (non 0 => 0, 0 => 1), so we put the adjusted and
         // non-adjusted quotients on the stack in the opposite order for 32 vs 64 bits.
         if (wasm_bits == 64) {
-            try func.emitWValue(quotient);
+            try cg.emitWValue(quotient);
         }
 
         // 0 if the signs of rhs_wasm and lhs_wasm are the same, 1 otherwise.
-        _ = try func.binOp(lhs, rhs, ty, .xor);
-        _ = try func.cmp(.stack, zero, ty, .lt);
+        _ = try cg.binOp(lhs, rhs, ty, .xor);
+        _ = try cg.cmp(.stack, zero, ty, .lt);
 
         switch (wasm_bits) {
             32 => {
-                try func.addTag(.i32_sub);
-                try func.emitWValue(quotient);
+                try cg.addTag(.i32_sub);
+                try cg.emitWValue(quotient);
             },
             64 => {
-                try func.addTag(.i64_extend_i32_u);
-                try func.addTag(.i64_sub);
+                try cg.addTag(.i64_extend_i32_u);
+                try cg.addTag(.i64_sub);
             },
             else => unreachable,
         }
 
-        _ = try func.binOp(lhs, rhs, ty, .rem);
+        _ = try cg.binOp(lhs, rhs, ty, .rem);
 
         if (wasm_bits == 64) {
-            try func.addTag(.i64_eqz);
+            try cg.addTag(.i64_eqz);
         }
 
-        try func.addTag(.select);
+        try cg.addTag(.select);
 
         // We need to zero the high bits because N bit comparisons consider all 32 or 64 bits, and
         // expect all but the lowest N bits to be 0.
         // TODO: Should we be zeroing the high bits here or should we be ignoring the high bits
         // when performing comparisons?
         if (int_bits != wasm_bits) {
-            _ = try func.wrapOperand(.stack, ty);
+            _ = try cg.wrapOperand(.stack, ty);
         }
     } else {
-        const float_bits = ty.floatBits(func.target.*);
+        const float_bits = ty.floatBits(cg.target.*);
         if (float_bits > 64) {
-            return func.fail("TODO: `@divFloor` for floats with bitsize: {d}", .{float_bits});
+            return cg.fail("TODO: `@divFloor` for floats with bitsize: {d}", .{float_bits});
         }
         const is_f16 = float_bits == 16;
 
-        const lhs_wasm = if (is_f16) try func.fpext(lhs, Type.f16, Type.f32) else lhs;
-        const rhs_wasm = if (is_f16) try func.fpext(rhs, Type.f16, Type.f32) else rhs;
+        const lhs_wasm = if (is_f16) try cg.fpext(lhs, Type.f16, Type.f32) else lhs;
+        const rhs_wasm = if (is_f16) try cg.fpext(rhs, Type.f16, Type.f32) else rhs;
 
-        try func.emitWValue(lhs_wasm);
-        try func.emitWValue(rhs_wasm);
+        try cg.emitWValue(lhs_wasm);
+        try cg.emitWValue(rhs_wasm);
 
         switch (float_bits) {
             16, 32 => {
-                try func.addTag(.f32_div);
-                try func.addTag(.f32_floor);
+                try cg.addTag(.f32_div);
+                try cg.addTag(.f32_floor);
             },
             64 => {
-                try func.addTag(.f64_div);
-                try func.addTag(.f64_floor);
+                try cg.addTag(.f64_div);
+                try cg.addTag(.f64_floor);
             },
             else => unreachable,
         }
 
         if (is_f16) {
-            _ = try func.fptrunc(.stack, Type.f32, Type.f16);
+            _ = try cg.fptrunc(.stack, Type.f32, Type.f16);
         }
     }
 
-    return func.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs });
+    return cg.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs });
 }
 
-fn airRem(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+fn airRem(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
 
-    const ty = func.typeOfIndex(inst);
-    const lhs = try func.resolveInst(bin_op.lhs);
-    const rhs = try func.resolveInst(bin_op.rhs);
+    const ty = cg.typeOfIndex(inst);
+    const lhs = try cg.resolveInst(bin_op.lhs);
+    const rhs = try cg.resolveInst(bin_op.rhs);
 
-    const result = try func.binOp(lhs, rhs, ty, .rem);
+    const result = try cg.binOp(lhs, rhs, ty, .rem);
 
-    return func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
+    return cg.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
 }
 
 /// Remainder after floor division, defined by:
 /// @divFloor(a, b) * b + @mod(a, b) = a
-fn airMod(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+fn airMod(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
 
-    const pt = func.pt;
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const ty = func.typeOfIndex(inst);
-    const lhs = try func.resolveInst(bin_op.lhs);
-    const rhs = try func.resolveInst(bin_op.rhs);
+    const ty = cg.typeOfIndex(inst);
+    const lhs = try cg.resolveInst(bin_op.lhs);
+    const rhs = try cg.resolveInst(bin_op.rhs);
 
     const result = result: {
         if (ty.isUnsignedInt(zcu)) {
-            break :result try func.binOp(lhs, rhs, ty, .rem);
+            break :result try cg.binOp(lhs, rhs, ty, .rem);
         }
         if (ty.isSignedInt(zcu)) {
             // The wasm rem instruction gives the remainder after truncating division (rounding towards
@@ -6710,153 +6708,153 @@ fn airMod(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
             // @mod(a, b) = @rem(@rem(a, b) + b, b)
             const int_bits = ty.intInfo(zcu).bits;
             const wasm_bits = toWasmBits(int_bits) orelse {
-                return func.fail("TODO: `@mod` for signed integers larger than 64 bits ({d} bits requested)", .{int_bits});
+                return cg.fail("TODO: `@mod` for signed integers larger than 64 bits ({d} bits requested)", .{int_bits});
             };
 
             if (wasm_bits > 64) {
-                return func.fail("TODO: `@mod` for signed integers larger than 64 bits ({d} bits requested)", .{int_bits});
+                return cg.fail("TODO: `@mod` for signed integers larger than 64 bits ({d} bits requested)", .{int_bits});
             }
 
-            _ = try func.binOp(lhs, rhs, ty, .rem);
-            _ = try func.binOp(.stack, rhs, ty, .add);
-            break :result try func.binOp(.stack, rhs, ty, .rem);
+            _ = try cg.binOp(lhs, rhs, ty, .rem);
+            _ = try cg.binOp(.stack, rhs, ty, .add);
+            break :result try cg.binOp(.stack, rhs, ty, .rem);
         }
         if (ty.isAnyFloat()) {
-            const rem = try func.binOp(lhs, rhs, ty, .rem);
-            const add = try func.binOp(rem, rhs, ty, .add);
-            break :result try func.binOp(add, rhs, ty, .rem);
+            const rem = try cg.binOp(lhs, rhs, ty, .rem);
+            const add = try cg.binOp(rem, rhs, ty, .add);
+            break :result try cg.binOp(add, rhs, ty, .rem);
         }
-        return func.fail("TODO: @mod for {}", .{ty.fmt(pt)});
+        return cg.fail("TODO: @mod for {}", .{ty.fmt(pt)});
     };
 
-    return func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
+    return cg.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
 }
 
-fn airSatMul(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+fn airSatMul(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
 
-    const pt = func.pt;
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const ty = func.typeOfIndex(inst);
+    const ty = cg.typeOfIndex(inst);
     const int_info = ty.intInfo(zcu);
     const is_signed = int_info.signedness == .signed;
 
-    const lhs = try func.resolveInst(bin_op.lhs);
-    const rhs = try func.resolveInst(bin_op.rhs);
+    const lhs = try cg.resolveInst(bin_op.lhs);
+    const rhs = try cg.resolveInst(bin_op.rhs);
     const wasm_bits = toWasmBits(int_info.bits) orelse {
-        return func.fail("TODO: mul_sat for {}", .{ty.fmt(pt)});
+        return cg.fail("TODO: mul_sat for {}", .{ty.fmt(pt)});
     };
 
     switch (wasm_bits) {
         32 => {
             const upcast_ty: Type = if (is_signed) Type.i64 else Type.u64;
-            const lhs_up = try func.intcast(lhs, ty, upcast_ty);
-            const rhs_up = try func.intcast(rhs, ty, upcast_ty);
-            var mul_res = try (try func.binOp(lhs_up, rhs_up, upcast_ty, .mul)).toLocal(func, upcast_ty);
-            defer mul_res.free(func);
+            const lhs_up = try cg.intcast(lhs, ty, upcast_ty);
+            const rhs_up = try cg.intcast(rhs, ty, upcast_ty);
+            var mul_res = try (try cg.binOp(lhs_up, rhs_up, upcast_ty, .mul)).toLocal(cg, upcast_ty);
+            defer mul_res.free(cg);
             if (is_signed) {
                 const imm_max: WValue = .{ .imm64 = ~@as(u64, 0) >> @intCast(64 - (int_info.bits - 1)) };
-                try func.emitWValue(mul_res);
-                try func.emitWValue(imm_max);
-                _ = try func.cmp(mul_res, imm_max, upcast_ty, .lt);
-                try func.addTag(.select);
+                try cg.emitWValue(mul_res);
+                try cg.emitWValue(imm_max);
+                _ = try cg.cmp(mul_res, imm_max, upcast_ty, .lt);
+                try cg.addTag(.select);
 
-                var tmp = try func.allocLocal(upcast_ty);
-                defer tmp.free(func);
-                try func.addLabel(.local_set, tmp.local.value);
+                var tmp = try cg.allocLocal(upcast_ty);
+                defer tmp.free(cg);
+                try cg.addLabel(.local_set, tmp.local.value);
 
                 const imm_min: WValue = .{ .imm64 = ~@as(u64, 0) << @intCast(int_info.bits - 1) };
-                try func.emitWValue(tmp);
-                try func.emitWValue(imm_min);
-                _ = try func.cmp(tmp, imm_min, upcast_ty, .gt);
-                try func.addTag(.select);
+                try cg.emitWValue(tmp);
+                try cg.emitWValue(imm_min);
+                _ = try cg.cmp(tmp, imm_min, upcast_ty, .gt);
+                try cg.addTag(.select);
             } else {
                 const imm_max: WValue = .{ .imm64 = ~@as(u64, 0) >> @intCast(64 - int_info.bits) };
-                try func.emitWValue(mul_res);
-                try func.emitWValue(imm_max);
-                _ = try func.cmp(mul_res, imm_max, upcast_ty, .lt);
-                try func.addTag(.select);
+                try cg.emitWValue(mul_res);
+                try cg.emitWValue(imm_max);
+                _ = try cg.cmp(mul_res, imm_max, upcast_ty, .lt);
+                try cg.addTag(.select);
             }
-            try func.addTag(.i32_wrap_i64);
+            try cg.addTag(.i32_wrap_i64);
         },
         64 => {
             if (!(int_info.bits == 64 and int_info.signedness == .signed)) {
-                return func.fail("TODO: mul_sat for {}", .{ty.fmt(pt)});
+                return cg.fail("TODO: mul_sat for {}", .{ty.fmt(pt)});
             }
-            const overflow_ret = try func.allocStack(Type.i32);
-            _ = try func.callIntrinsic(
+            const overflow_ret = try cg.allocStack(Type.i32);
+            _ = try cg.callIntrinsic(
                 "__mulodi4",
                 &[_]InternPool.Index{ .i64_type, .i64_type, .usize_type },
                 Type.i64,
                 &.{ lhs, rhs, overflow_ret },
             );
-            const xor = try func.binOp(lhs, rhs, Type.i64, .xor);
-            const sign_v = try func.binOp(xor, .{ .imm64 = 63 }, Type.i64, .shr);
-            _ = try func.binOp(sign_v, .{ .imm64 = ~@as(u63, 0) }, Type.i64, .xor);
-            _ = try func.load(overflow_ret, Type.i32, 0);
-            try func.addTag(.i32_eqz);
-            try func.addTag(.select);
+            const xor = try cg.binOp(lhs, rhs, Type.i64, .xor);
+            const sign_v = try cg.binOp(xor, .{ .imm64 = 63 }, Type.i64, .shr);
+            _ = try cg.binOp(sign_v, .{ .imm64 = ~@as(u63, 0) }, Type.i64, .xor);
+            _ = try cg.load(overflow_ret, Type.i32, 0);
+            try cg.addTag(.i32_eqz);
+            try cg.addTag(.select);
         },
         128 => {
             if (!(int_info.bits == 128 and int_info.signedness == .signed)) {
-                return func.fail("TODO: mul_sat for {}", .{ty.fmt(pt)});
+                return cg.fail("TODO: mul_sat for {}", .{ty.fmt(pt)});
             }
-            const overflow_ret = try func.allocStack(Type.i32);
-            const ret = try func.callIntrinsic(
+            const overflow_ret = try cg.allocStack(Type.i32);
+            const ret = try cg.callIntrinsic(
                 "__muloti4",
                 &[_]InternPool.Index{ .i128_type, .i128_type, .usize_type },
                 Type.i128,
                 &.{ lhs, rhs, overflow_ret },
             );
-            try func.lowerToStack(ret);
-            const xor = try func.binOp(lhs, rhs, Type.i128, .xor);
-            const sign_v = try func.binOp(xor, .{ .imm32 = 127 }, Type.i128, .shr);
+            try cg.lowerToStack(ret);
+            const xor = try cg.binOp(lhs, rhs, Type.i128, .xor);
+            const sign_v = try cg.binOp(xor, .{ .imm32 = 127 }, Type.i128, .shr);
 
             // xor ~@as(u127, 0)
-            try func.emitWValue(sign_v);
-            const lsb = try func.load(sign_v, Type.u64, 0);
-            _ = try func.binOp(lsb, .{ .imm64 = ~@as(u64, 0) }, Type.u64, .xor);
-            try func.store(.stack, .stack, Type.u64, sign_v.offset());
-            try func.emitWValue(sign_v);
-            const msb = try func.load(sign_v, Type.u64, 8);
-            _ = try func.binOp(msb, .{ .imm64 = ~@as(u63, 0) }, Type.u64, .xor);
-            try func.store(.stack, .stack, Type.u64, sign_v.offset() + 8);
-
-            try func.lowerToStack(sign_v);
-            _ = try func.load(overflow_ret, Type.i32, 0);
-            try func.addTag(.i32_eqz);
-            try func.addTag(.select);
+            try cg.emitWValue(sign_v);
+            const lsb = try cg.load(sign_v, Type.u64, 0);
+            _ = try cg.binOp(lsb, .{ .imm64 = ~@as(u64, 0) }, Type.u64, .xor);
+            try cg.store(.stack, .stack, Type.u64, sign_v.offset());
+            try cg.emitWValue(sign_v);
+            const msb = try cg.load(sign_v, Type.u64, 8);
+            _ = try cg.binOp(msb, .{ .imm64 = ~@as(u63, 0) }, Type.u64, .xor);
+            try cg.store(.stack, .stack, Type.u64, sign_v.offset() + 8);
+
+            try cg.lowerToStack(sign_v);
+            _ = try cg.load(overflow_ret, Type.i32, 0);
+            try cg.addTag(.i32_eqz);
+            try cg.addTag(.select);
         },
         else => unreachable,
     }
-    return func.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs });
+    return cg.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs });
 }
 
-fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
+fn airSatBinOp(cg: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
     assert(op == .add or op == .sub);
-    const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+    const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
 
-    const pt = func.pt;
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const ty = func.typeOfIndex(inst);
-    const lhs = try func.resolveInst(bin_op.lhs);
-    const rhs = try func.resolveInst(bin_op.rhs);
+    const ty = cg.typeOfIndex(inst);
+    const lhs = try cg.resolveInst(bin_op.lhs);
+    const rhs = try cg.resolveInst(bin_op.rhs);
 
     const int_info = ty.intInfo(zcu);
     const is_signed = int_info.signedness == .signed;
 
     if (int_info.bits > 64) {
-        return func.fail("TODO: saturating arithmetic for integers with bitsize '{d}'", .{int_info.bits});
+        return cg.fail("TODO: saturating arithmetic for integers with bitsize '{d}'", .{int_info.bits});
     }
 
     if (is_signed) {
-        const result = try signedSat(func, lhs, rhs, ty, op);
-        return func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
+        const result = try signedSat(cg, lhs, rhs, ty, op);
+        return cg.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
     }
 
     const wasm_bits = toWasmBits(int_info.bits).?;
-    var bin_result = try (try func.binOp(lhs, rhs, ty, op)).toLocal(func, ty);
-    defer bin_result.free(func);
+    var bin_result = try (try cg.binOp(lhs, rhs, ty, op)).toLocal(cg, ty);
+    defer bin_result.free(cg);
     if (wasm_bits != int_info.bits and op == .add) {
         const val: u64 = @as(u64, @intCast((@as(u65, 1) << @as(u7, @intCast(int_info.bits))) - 1));
         const imm_val: WValue = switch (wasm_bits) {
@@ -6865,25 +6863,25 @@ fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
             else => unreachable,
         };
 
-        try func.emitWValue(bin_result);
-        try func.emitWValue(imm_val);
-        _ = try func.cmp(bin_result, imm_val, ty, .lt);
+        try cg.emitWValue(bin_result);
+        try cg.emitWValue(imm_val);
+        _ = try cg.cmp(bin_result, imm_val, ty, .lt);
     } else {
         switch (wasm_bits) {
-            32 => try func.addImm32(if (op == .add) std.math.maxInt(u32) else 0),
-            64 => try func.addImm64(if (op == .add) std.math.maxInt(u64) else 0),
+            32 => try cg.addImm32(if (op == .add) std.math.maxInt(u32) else 0),
+            64 => try cg.addImm64(if (op == .add) std.math.maxInt(u64) else 0),
             else => unreachable,
         }
-        try func.emitWValue(bin_result);
-        _ = try func.cmp(bin_result, lhs, ty, if (op == .add) .lt else .gt);
+        try cg.emitWValue(bin_result);
+        _ = try cg.cmp(bin_result, lhs, ty, if (op == .add) .lt else .gt);
     }
 
-    try func.addTag(.select);
-    return func.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs });
+    try cg.addTag(.select);
+    return cg.finishAir(inst, .stack, &.{ bin_op.lhs, bin_op.rhs });
 }
 
-fn signedSat(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue {
-    const pt = func.pt;
+fn signedSat(cg: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue {
+    const pt = cg.pt;
     const zcu = pt.zcu;
     const int_info = ty.intInfo(zcu);
     const wasm_bits = toWasmBits(int_info.bits).?;
@@ -6903,92 +6901,92 @@ fn signedSat(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerEr
         else => unreachable,
     };
 
-    var bin_result = try (try func.binOp(lhs, rhs, ext_ty, op)).toLocal(func, ext_ty);
+    var bin_result = try (try cg.binOp(lhs, rhs, ext_ty, op)).toLocal(cg, ext_ty);
     if (!is_wasm_bits) {
-        defer bin_result.free(func); // not returned in this branch
-        try func.emitWValue(bin_result);
-        try func.emitWValue(max_wvalue);
-        _ = try func.cmp(bin_result, max_wvalue, ext_ty, .lt);
-        try func.addTag(.select);
-        try func.addLabel(.local_set, bin_result.local.value); // re-use local
-
-        try func.emitWValue(bin_result);
-        try func.emitWValue(min_wvalue);
-        _ = try func.cmp(bin_result, min_wvalue, ext_ty, .gt);
-        try func.addTag(.select);
-        try func.addLabel(.local_set, bin_result.local.value); // re-use local
-        return (try func.wrapOperand(bin_result, ty)).toLocal(func, ty);
+        defer bin_result.free(cg); // not returned in this branch
+        try cg.emitWValue(bin_result);
+        try cg.emitWValue(max_wvalue);
+        _ = try cg.cmp(bin_result, max_wvalue, ext_ty, .lt);
+        try cg.addTag(.select);
+        try cg.addLabel(.local_set, bin_result.local.value); // re-use local
+
+        try cg.emitWValue(bin_result);
+        try cg.emitWValue(min_wvalue);
+        _ = try cg.cmp(bin_result, min_wvalue, ext_ty, .gt);
+        try cg.addTag(.select);
+        try cg.addLabel(.local_set, bin_result.local.value); // re-use local
+        return (try cg.wrapOperand(bin_result, ty)).toLocal(cg, ty);
     } else {
         const zero: WValue = switch (wasm_bits) {
             32 => .{ .imm32 = 0 },
             64 => .{ .imm64 = 0 },
             else => unreachable,
         };
-        try func.emitWValue(max_wvalue);
-        try func.emitWValue(min_wvalue);
-        _ = try func.cmp(bin_result, zero, ty, .lt);
-        try func.addTag(.select);
-        try func.emitWValue(bin_result);
+        try cg.emitWValue(max_wvalue);
+        try cg.emitWValue(min_wvalue);
+        _ = try cg.cmp(bin_result, zero, ty, .lt);
+        try cg.addTag(.select);
+        try cg.emitWValue(bin_result);
         // leave on stack
-        const cmp_zero_result = try func.cmp(rhs, zero, ty, if (op == .add) .lt else .gt);
-        const cmp_bin_result = try func.cmp(bin_result, lhs, ty, .lt);
-        _ = try func.binOp(cmp_zero_result, cmp_bin_result, Type.u32, .xor); // comparisons always return i32, so provide u32 as type to xor.
-        try func.addTag(.select);
-        try func.addLabel(.local_set, bin_result.local.value); // re-use local
+        const cmp_zero_result = try cg.cmp(rhs, zero, ty, if (op == .add) .lt else .gt);
+        const cmp_bin_result = try cg.cmp(bin_result, lhs, ty, .lt);
+        _ = try cg.binOp(cmp_zero_result, cmp_bin_result, Type.u32, .xor); // comparisons always return i32, so provide u32 as type to xor.
+        try cg.addTag(.select);
+        try cg.addLabel(.local_set, bin_result.local.value); // re-use local
         return bin_result;
     }
 }
 
-fn airShlSat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+fn airShlSat(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
 
-    const pt = func.pt;
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const ty = func.typeOfIndex(inst);
+    const ty = cg.typeOfIndex(inst);
     const int_info = ty.intInfo(zcu);
     const is_signed = int_info.signedness == .signed;
     if (int_info.bits > 64) {
-        return func.fail("TODO: Saturating shifting left for integers with bitsize '{d}'", .{int_info.bits});
+        return cg.fail("TODO: Saturating shifting left for integers with bitsize '{d}'", .{int_info.bits});
     }
 
-    const lhs = try func.resolveInst(bin_op.lhs);
-    const rhs = try func.resolveInst(bin_op.rhs);
+    const lhs = try cg.resolveInst(bin_op.lhs);
+    const rhs = try cg.resolveInst(bin_op.rhs);
     const wasm_bits = toWasmBits(int_info.bits).?;
-    const result = try func.allocLocal(ty);
+    const result = try cg.allocLocal(ty);
 
     if (wasm_bits == int_info.bits) {
-        var shl = try (try func.binOp(lhs, rhs, ty, .shl)).toLocal(func, ty);
-        defer shl.free(func);
-        var shr = try (try func.binOp(shl, rhs, ty, .shr)).toLocal(func, ty);
-        defer shr.free(func);
+        var shl = try (try cg.binOp(lhs, rhs, ty, .shl)).toLocal(cg, ty);
+        defer shl.free(cg);
+        var shr = try (try cg.binOp(shl, rhs, ty, .shr)).toLocal(cg, ty);
+        defer shr.free(cg);
 
         switch (wasm_bits) {
             32 => blk: {
                 if (!is_signed) {
-                    try func.addImm32(std.math.maxInt(u32));
+                    try cg.addImm32(std.math.maxInt(u32));
                     break :blk;
                 }
-                try func.addImm32(@bitCast(@as(i32, std.math.minInt(i32))));
-                try func.addImm32(@bitCast(@as(i32, std.math.maxInt(i32))));
-                _ = try func.cmp(lhs, .{ .imm32 = 0 }, ty, .lt);
-                try func.addTag(.select);
+                try cg.addImm32(@bitCast(@as(i32, std.math.minInt(i32))));
+                try cg.addImm32(@bitCast(@as(i32, std.math.maxInt(i32))));
+                _ = try cg.cmp(lhs, .{ .imm32 = 0 }, ty, .lt);
+                try cg.addTag(.select);
             },
             64 => blk: {
                 if (!is_signed) {
-                    try func.addImm64(std.math.maxInt(u64));
+                    try cg.addImm64(std.math.maxInt(u64));
                     break :blk;
                 }
-                try func.addImm64(@bitCast(@as(i64, std.math.minInt(i64))));
-                try func.addImm64(@bitCast(@as(i64, std.math.maxInt(i64))));
-                _ = try func.cmp(lhs, .{ .imm64 = 0 }, ty, .lt);
-                try func.addTag(.select);
+                try cg.addImm64(@bitCast(@as(i64, std.math.minInt(i64))));
+                try cg.addImm64(@bitCast(@as(i64, std.math.maxInt(i64))));
+                _ = try cg.cmp(lhs, .{ .imm64 = 0 }, ty, .lt);
+                try cg.addTag(.select);
             },
             else => unreachable,
         }
-        try func.emitWValue(shl);
-        _ = try func.cmp(lhs, shr, ty, .neq);
-        try func.addTag(.select);
-        try func.addLabel(.local_set, result.local.value);
+        try cg.emitWValue(shl);
+        _ = try cg.cmp(lhs, shr, ty, .neq);
+        try cg.addTag(.select);
+        try cg.addLabel(.local_set, result.local.value);
     } else {
         const shift_size = wasm_bits - int_info.bits;
         const shift_value: WValue = switch (wasm_bits) {
@@ -6998,50 +6996,50 @@ fn airShlSat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
         };
         const ext_ty = try pt.intType(int_info.signedness, wasm_bits);
 
-        var shl_res = try (try func.binOp(lhs, shift_value, ext_ty, .shl)).toLocal(func, ext_ty);
-        defer shl_res.free(func);
-        var shl = try (try func.binOp(shl_res, rhs, ext_ty, .shl)).toLocal(func, ext_ty);
-        defer shl.free(func);
-        var shr = try (try func.binOp(shl, rhs, ext_ty, .shr)).toLocal(func, ext_ty);
-        defer shr.free(func);
+        var shl_res = try (try cg.binOp(lhs, shift_value, ext_ty, .shl)).toLocal(cg, ext_ty);
+        defer shl_res.free(cg);
+        var shl = try (try cg.binOp(shl_res, rhs, ext_ty, .shl)).toLocal(cg, ext_ty);
+        defer shl.free(cg);
+        var shr = try (try cg.binOp(shl, rhs, ext_ty, .shr)).toLocal(cg, ext_ty);
+        defer shr.free(cg);
 
         switch (wasm_bits) {
             32 => blk: {
                 if (!is_signed) {
-                    try func.addImm32(std.math.maxInt(u32));
+                    try cg.addImm32(std.math.maxInt(u32));
                     break :blk;
                 }
 
-                try func.addImm32(@bitCast(@as(i32, std.math.minInt(i32))));
-                try func.addImm32(@bitCast(@as(i32, std.math.maxInt(i32))));
-                _ = try func.cmp(shl_res, .{ .imm32 = 0 }, ext_ty, .lt);
-                try func.addTag(.select);
+                try cg.addImm32(@bitCast(@as(i32, std.math.minInt(i32))));
+                try cg.addImm32(@bitCast(@as(i32, std.math.maxInt(i32))));
+                _ = try cg.cmp(shl_res, .{ .imm32 = 0 }, ext_ty, .lt);
+                try cg.addTag(.select);
             },
             64 => blk: {
                 if (!is_signed) {
-                    try func.addImm64(std.math.maxInt(u64));
+                    try cg.addImm64(std.math.maxInt(u64));
                     break :blk;
                 }
 
-                try func.addImm64(@bitCast(@as(i64, std.math.minInt(i64))));
-                try func.addImm64(@bitCast(@as(i64, std.math.maxInt(i64))));
-                _ = try func.cmp(shl_res, .{ .imm64 = 0 }, ext_ty, .lt);
-                try func.addTag(.select);
+                try cg.addImm64(@bitCast(@as(i64, std.math.minInt(i64))));
+                try cg.addImm64(@bitCast(@as(i64, std.math.maxInt(i64))));
+                _ = try cg.cmp(shl_res, .{ .imm64 = 0 }, ext_ty, .lt);
+                try cg.addTag(.select);
             },
             else => unreachable,
         }
-        try func.emitWValue(shl);
-        _ = try func.cmp(shl_res, shr, ext_ty, .neq);
-        try func.addTag(.select);
-        try func.addLabel(.local_set, result.local.value);
-        var shift_result = try func.binOp(result, shift_value, ext_ty, .shr);
+        try cg.emitWValue(shl);
+        _ = try cg.cmp(shl_res, shr, ext_ty, .neq);
+        try cg.addTag(.select);
+        try cg.addLabel(.local_set, result.local.value);
+        var shift_result = try cg.binOp(result, shift_value, ext_ty, .shr);
         if (is_signed) {
-            shift_result = try func.wrapOperand(shift_result, ty);
+            shift_result = try cg.wrapOperand(shift_result, ty);
         }
-        try func.addLabel(.local_set, result.local.value);
+        try cg.addLabel(.local_set, result.local.value);
     }
 
-    return func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
+    return cg.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
 }
 
 /// Calls a compiler-rt intrinsic by creating an undefined symbol,
@@ -7051,27 +7049,27 @@ fn airShlSat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
 /// passed as the first parameter.
 /// May leave the return value on the stack.
 fn callIntrinsic(
-    func: *CodeGen,
+    cg: *CodeGen,
     name: []const u8,
     param_types: []const InternPool.Index,
     return_type: Type,
     args: []const WValue,
 ) InnerError!WValue {
     assert(param_types.len == args.len);
-    const wasm = func.wasm;
-    const pt = func.pt;
+    const wasm = cg.wasm;
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const func_type_index = try genFunctype(wasm, .{ .wasm_watc = .{} }, param_types, return_type, pt, func.target);
+    const func_type_index = try genFunctype(wasm, .{ .wasm_watc = .{} }, param_types, return_type, pt, cg.target);
     const func_index = wasm.getOutputFunction(try wasm.internString(name), func_type_index);
 
     // Always pass over C-ABI
 
-    const want_sret_param = firstParamSRet(.{ .wasm_watc = .{} }, return_type, pt, func.target);
+    const want_sret_param = firstParamSRet(.{ .wasm_watc = .{} }, return_type, pt, cg.target);
     // if we want return as first param, we allocate a pointer to stack,
     // and emit it as our first argument
     const sret = if (want_sret_param) blk: {
-        const sret_local = try func.allocStack(return_type);
-        try func.lowerToStack(sret_local);
+        const sret_local = try cg.allocStack(return_type);
+        try cg.lowerToStack(sret_local);
         break :blk sret_local;
     } else .none;
 
@@ -7079,16 +7077,16 @@ fn callIntrinsic(
     for (args, 0..) |arg, arg_i| {
         assert(!(want_sret_param and arg == .stack));
         assert(Type.fromInterned(param_types[arg_i]).hasRuntimeBitsIgnoreComptime(zcu));
-        try func.lowerArg(.{ .wasm_watc = .{} }, Type.fromInterned(param_types[arg_i]), arg);
+        try cg.lowerArg(.{ .wasm_watc = .{} }, Type.fromInterned(param_types[arg_i]), arg);
     }
 
     // Actually call our intrinsic
-    try func.addLabel(.call_func, func_index);
+    try cg.addLabel(.call_func, func_index);
 
     if (!return_type.hasRuntimeBitsIgnoreComptime(zcu)) {
         return .none;
     } else if (return_type.isNoReturn(zcu)) {
-        try func.addTag(.@"unreachable");
+        try cg.addTag(.@"unreachable");
         return .none;
     } else if (want_sret_param) {
         return sret;
@@ -7097,31 +7095,31 @@ fn callIntrinsic(
     }
 }
 
-fn airTagName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const un_op = func.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
-    const operand = try func.resolveInst(un_op);
-    const enum_ty = func.typeOf(un_op);
+fn airTagName(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const un_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].un_op;
+    const operand = try cg.resolveInst(un_op);
+    const enum_ty = cg.typeOf(un_op);
 
-    const result_ptr = try func.allocStack(func.typeOfIndex(inst));
-    try func.lowerToStack(result_ptr);
-    try func.emitWValue(operand);
-    try func.addIpIndex(.call_tag_name, enum_ty.toIntern());
+    const result_ptr = try cg.allocStack(cg.typeOfIndex(inst));
+    try cg.lowerToStack(result_ptr);
+    try cg.emitWValue(operand);
+    try cg.addIpIndex(.call_tag_name, enum_ty.toIntern());
 
-    return func.finishAir(inst, result_ptr, &.{un_op});
+    return cg.finishAir(inst, result_ptr, &.{un_op});
 }
 
-fn airErrorSetHasValue(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
+fn airErrorSetHasValue(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
     const ip = &zcu.intern_pool;
-    const ty_op = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
+    const ty_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
 
-    const operand = try func.resolveInst(ty_op.operand);
+    const operand = try cg.resolveInst(ty_op.operand);
     const error_set_ty = ty_op.ty.toType();
-    const result = try func.allocLocal(Type.bool);
+    const result = try cg.allocLocal(Type.bool);
 
     const names = error_set_ty.errorSetNames(zcu);
-    var values = try std.ArrayList(u32).initCapacity(func.gpa, names.len);
+    var values = try std.ArrayList(u32).initCapacity(cg.gpa, names.len);
     defer values.deinit();
 
     var lowest: ?u32 = null;
@@ -7147,23 +7145,23 @@ fn airErrorSetHasValue(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     }
 
     // start block for 'true' branch
-    try func.startBlock(.block, std.wasm.block_empty);
+    try cg.startBlock(.block, std.wasm.block_empty);
     // start block for 'false' branch
-    try func.startBlock(.block, std.wasm.block_empty);
+    try cg.startBlock(.block, std.wasm.block_empty);
     // block for the jump table itself
-    try func.startBlock(.block, std.wasm.block_empty);
+    try cg.startBlock(.block, std.wasm.block_empty);
 
     // lower operand to determine jump table target
-    try func.emitWValue(operand);
-    try func.addImm32(lowest.?);
-    try func.addTag(.i32_sub);
+    try cg.emitWValue(operand);
+    try cg.addImm32(lowest.?);
+    try cg.addTag(.i32_sub);
 
     // Account for default branch so always add '1'
     const depth = @as(u32, @intCast(highest.? - lowest.? + 1));
     const jump_table: Mir.JumpTable = .{ .length = depth };
-    const table_extra_index = try func.addExtra(jump_table);
-    try func.addInst(.{ .tag = .br_table, .data = .{ .payload = table_extra_index } });
-    try func.mir_extra.ensureUnusedCapacity(func.gpa, depth);
+    const table_extra_index = try cg.addExtra(jump_table);
+    try cg.addInst(.{ .tag = .br_table, .data = .{ .payload = table_extra_index } });
+    try cg.mir_extra.ensureUnusedCapacity(cg.gpa, depth);
 
     var value: u32 = lowest.?;
     while (value <= highest.?) : (value += 1) {
@@ -7173,201 +7171,201 @@ fn airErrorSetHasValue(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
             }
             break :blk 0;
         };
-        func.mir_extra.appendAssumeCapacity(idx);
+        cg.mir_extra.appendAssumeCapacity(idx);
     }
-    try func.endBlock();
+    try cg.endBlock();
 
     // 'false' branch (i.e. error set does not have value
     // ensure we set local to 0 in case the local was re-used.
-    try func.addImm32(0);
-    try func.addLabel(.local_set, result.local.value);
-    try func.addLabel(.br, 1);
-    try func.endBlock();
+    try cg.addImm32(0);
+    try cg.addLabel(.local_set, result.local.value);
+    try cg.addLabel(.br, 1);
+    try cg.endBlock();
 
     // 'true' branch
-    try func.addImm32(1);
-    try func.addLabel(.local_set, result.local.value);
-    try func.addLabel(.br, 0);
-    try func.endBlock();
+    try cg.addImm32(1);
+    try cg.addLabel(.local_set, result.local.value);
+    try cg.addLabel(.br, 0);
+    try cg.endBlock();
 
-    return func.finishAir(inst, result, &.{ty_op.operand});
+    return cg.finishAir(inst, result, &.{ty_op.operand});
 }
 
-inline fn useAtomicFeature(func: *const CodeGen) bool {
-    return std.Target.wasm.featureSetHas(func.target.cpu.features, .atomics);
+inline fn useAtomicFeature(cg: *const CodeGen) bool {
+    return std.Target.wasm.featureSetHas(cg.target.cpu.features, .atomics);
 }
 
-fn airCmpxchg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
+fn airCmpxchg(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const ty_pl = func.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
-    const extra = func.air.extraData(Air.Cmpxchg, ty_pl.payload).data;
+    const ty_pl = cg.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+    const extra = cg.air.extraData(Air.Cmpxchg, ty_pl.payload).data;
 
-    const ptr_ty = func.typeOf(extra.ptr);
+    const ptr_ty = cg.typeOf(extra.ptr);
     const ty = ptr_ty.childType(zcu);
-    const result_ty = func.typeOfIndex(inst);
+    const result_ty = cg.typeOfIndex(inst);
 
-    const ptr_operand = try func.resolveInst(extra.ptr);
-    const expected_val = try func.resolveInst(extra.expected_value);
-    const new_val = try func.resolveInst(extra.new_value);
+    const ptr_operand = try cg.resolveInst(extra.ptr);
+    const expected_val = try cg.resolveInst(extra.expected_value);
+    const new_val = try cg.resolveInst(extra.new_value);
 
-    const cmp_result = try func.allocLocal(Type.bool);
+    const cmp_result = try cg.allocLocal(Type.bool);
 
-    const ptr_val = if (func.useAtomicFeature()) val: {
-        const val_local = try func.allocLocal(ty);
-        try func.emitWValue(ptr_operand);
-        try func.lowerToStack(expected_val);
-        try func.lowerToStack(new_val);
-        try func.addAtomicMemArg(switch (ty.abiSize(zcu)) {
+    const ptr_val = if (cg.useAtomicFeature()) val: {
+        const val_local = try cg.allocLocal(ty);
+        try cg.emitWValue(ptr_operand);
+        try cg.lowerToStack(expected_val);
+        try cg.lowerToStack(new_val);
+        try cg.addAtomicMemArg(switch (ty.abiSize(zcu)) {
             1 => .i32_atomic_rmw8_cmpxchg_u,
             2 => .i32_atomic_rmw16_cmpxchg_u,
             4 => .i32_atomic_rmw_cmpxchg,
             8 => .i32_atomic_rmw_cmpxchg,
-            else => |size| return func.fail("TODO: implement `@cmpxchg` for types with abi size '{d}'", .{size}),
+            else => |size| return cg.fail("TODO: implement `@cmpxchg` for types with abi size '{d}'", .{size}),
         }, .{
             .offset = ptr_operand.offset(),
             .alignment = @intCast(ty.abiAlignment(zcu).toByteUnits().?),
         });
-        try func.addLabel(.local_tee, val_local.local.value);
-        _ = try func.cmp(.stack, expected_val, ty, .eq);
-        try func.addLabel(.local_set, cmp_result.local.value);
+        try cg.addLabel(.local_tee, val_local.local.value);
+        _ = try cg.cmp(.stack, expected_val, ty, .eq);
+        try cg.addLabel(.local_set, cmp_result.local.value);
         break :val val_local;
     } else val: {
         if (ty.abiSize(zcu) > 8) {
-            return func.fail("TODO: Implement `@cmpxchg` for types larger than abi size of 8 bytes", .{});
+            return cg.fail("TODO: Implement `@cmpxchg` for types larger than abi size of 8 bytes", .{});
         }
-        const ptr_val = try WValue.toLocal(try func.load(ptr_operand, ty, 0), func, ty);
+        const ptr_val = try WValue.toLocal(try cg.load(ptr_operand, ty, 0), cg, ty);
 
-        try func.lowerToStack(ptr_operand);
-        try func.lowerToStack(new_val);
-        try func.emitWValue(ptr_val);
-        _ = try func.cmp(ptr_val, expected_val, ty, .eq);
-        try func.addLabel(.local_tee, cmp_result.local.value);
-        try func.addTag(.select);
-        try func.store(.stack, .stack, ty, 0);
+        try cg.lowerToStack(ptr_operand);
+        try cg.lowerToStack(new_val);
+        try cg.emitWValue(ptr_val);
+        _ = try cg.cmp(ptr_val, expected_val, ty, .eq);
+        try cg.addLabel(.local_tee, cmp_result.local.value);
+        try cg.addTag(.select);
+        try cg.store(.stack, .stack, ty, 0);
 
         break :val ptr_val;
     };
 
-    const result = if (isByRef(result_ty, pt, func.target)) val: {
-        try func.emitWValue(cmp_result);
-        try func.addImm32(~@as(u32, 0));
-        try func.addTag(.i32_xor);
-        try func.addImm32(1);
-        try func.addTag(.i32_and);
-        const and_result = try WValue.toLocal(.stack, func, Type.bool);
-        const result_ptr = try func.allocStack(result_ty);
-        try func.store(result_ptr, and_result, Type.bool, @as(u32, @intCast(ty.abiSize(zcu))));
-        try func.store(result_ptr, ptr_val, ty, 0);
+    const result = if (isByRef(result_ty, pt, cg.target)) val: {
+        try cg.emitWValue(cmp_result);
+        try cg.addImm32(~@as(u32, 0));
+        try cg.addTag(.i32_xor);
+        try cg.addImm32(1);
+        try cg.addTag(.i32_and);
+        const and_result = try WValue.toLocal(.stack, cg, Type.bool);
+        const result_ptr = try cg.allocStack(result_ty);
+        try cg.store(result_ptr, and_result, Type.bool, @as(u32, @intCast(ty.abiSize(zcu))));
+        try cg.store(result_ptr, ptr_val, ty, 0);
         break :val result_ptr;
     } else val: {
-        try func.addImm32(0);
-        try func.emitWValue(ptr_val);
-        try func.emitWValue(cmp_result);
-        try func.addTag(.select);
+        try cg.addImm32(0);
+        try cg.emitWValue(ptr_val);
+        try cg.emitWValue(cmp_result);
+        try cg.addTag(.select);
         break :val .stack;
     };
 
-    return func.finishAir(inst, result, &.{ extra.ptr, extra.expected_value, extra.new_value });
+    return cg.finishAir(inst, result, &.{ extra.ptr, extra.expected_value, extra.new_value });
 }
 
-fn airAtomicLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
-    const atomic_load = func.air.instructions.items(.data)[@intFromEnum(inst)].atomic_load;
-    const ptr = try func.resolveInst(atomic_load.ptr);
-    const ty = func.typeOfIndex(inst);
+fn airAtomicLoad(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pt = cg.pt;
+    const atomic_load = cg.air.instructions.items(.data)[@intFromEnum(inst)].atomic_load;
+    const ptr = try cg.resolveInst(atomic_load.ptr);
+    const ty = cg.typeOfIndex(inst);
 
-    if (func.useAtomicFeature()) {
+    if (cg.useAtomicFeature()) {
         const tag: std.wasm.AtomicsOpcode = switch (ty.abiSize(pt.zcu)) {
             1 => .i32_atomic_load8_u,
             2 => .i32_atomic_load16_u,
             4 => .i32_atomic_load,
             8 => .i64_atomic_load,
-            else => |size| return func.fail("TODO: @atomicLoad for types with abi size {d}", .{size}),
+            else => |size| return cg.fail("TODO: @atomicLoad for types with abi size {d}", .{size}),
         };
-        try func.emitWValue(ptr);
-        try func.addAtomicMemArg(tag, .{
+        try cg.emitWValue(ptr);
+        try cg.addAtomicMemArg(tag, .{
             .offset = ptr.offset(),
             .alignment = @intCast(ty.abiAlignment(pt.zcu).toByteUnits().?),
         });
     } else {
-        _ = try func.load(ptr, ty, 0);
+        _ = try cg.load(ptr, ty, 0);
     }
 
-    return func.finishAir(inst, .stack, &.{atomic_load.ptr});
+    return cg.finishAir(inst, .stack, &.{atomic_load.ptr});
 }
 
-fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
+fn airAtomicRmw(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const pl_op = func.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
-    const extra = func.air.extraData(Air.AtomicRmw, pl_op.payload).data;
+    const pl_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
+    const extra = cg.air.extraData(Air.AtomicRmw, pl_op.payload).data;
 
-    const ptr = try func.resolveInst(pl_op.operand);
-    const operand = try func.resolveInst(extra.operand);
-    const ty = func.typeOfIndex(inst);
+    const ptr = try cg.resolveInst(pl_op.operand);
+    const operand = try cg.resolveInst(extra.operand);
+    const ty = cg.typeOfIndex(inst);
     const op: std.builtin.AtomicRmwOp = extra.op();
 
-    if (func.useAtomicFeature()) {
+    if (cg.useAtomicFeature()) {
         switch (op) {
             .Max,
             .Min,
             .Nand,
             => {
-                const tmp = try func.load(ptr, ty, 0);
-                const value = try tmp.toLocal(func, ty);
+                const tmp = try cg.load(ptr, ty, 0);
+                const value = try tmp.toLocal(cg, ty);
 
                 // create a loop to cmpxchg the new value
-                try func.startBlock(.loop, std.wasm.block_empty);
+                try cg.startBlock(.loop, std.wasm.block_empty);
 
-                try func.emitWValue(ptr);
-                try func.emitWValue(value);
+                try cg.emitWValue(ptr);
+                try cg.emitWValue(value);
                 if (op == .Nand) {
                     const wasm_bits = toWasmBits(@intCast(ty.bitSize(zcu))).?;
 
-                    const and_res = try func.binOp(value, operand, ty, .@"and");
+                    const and_res = try cg.binOp(value, operand, ty, .@"and");
                     if (wasm_bits == 32)
-                        try func.addImm32(~@as(u32, 0))
+                        try cg.addImm32(~@as(u32, 0))
                     else if (wasm_bits == 64)
-                        try func.addImm64(~@as(u64, 0))
+                        try cg.addImm64(~@as(u64, 0))
                     else
-                        return func.fail("TODO: `@atomicRmw` with operator `Nand` for types larger than 64 bits", .{});
-                    _ = try func.binOp(and_res, .stack, ty, .xor);
+                        return cg.fail("TODO: `@atomicRmw` with operator `Nand` for types larger than 64 bits", .{});
+                    _ = try cg.binOp(and_res, .stack, ty, .xor);
                 } else {
-                    try func.emitWValue(value);
-                    try func.emitWValue(operand);
-                    _ = try func.cmp(value, operand, ty, if (op == .Max) .gt else .lt);
-                    try func.addTag(.select);
+                    try cg.emitWValue(value);
+                    try cg.emitWValue(operand);
+                    _ = try cg.cmp(value, operand, ty, if (op == .Max) .gt else .lt);
+                    try cg.addTag(.select);
                 }
-                try func.addAtomicMemArg(
+                try cg.addAtomicMemArg(
                     switch (ty.abiSize(zcu)) {
                         1 => .i32_atomic_rmw8_cmpxchg_u,
                         2 => .i32_atomic_rmw16_cmpxchg_u,
                         4 => .i32_atomic_rmw_cmpxchg,
                         8 => .i64_atomic_rmw_cmpxchg,
-                        else => return func.fail("TODO: implement `@atomicRmw` with operation `{s}` for types larger than 64 bits", .{@tagName(op)}),
+                        else => return cg.fail("TODO: implement `@atomicRmw` with operation `{s}` for types larger than 64 bits", .{@tagName(op)}),
                     },
                     .{
                         .offset = ptr.offset(),
                         .alignment = @intCast(ty.abiAlignment(zcu).toByteUnits().?),
                     },
                 );
-                const select_res = try func.allocLocal(ty);
-                try func.addLabel(.local_tee, select_res.local.value);
-                _ = try func.cmp(.stack, value, ty, .neq); // leave on stack so we can use it for br_if
+                const select_res = try cg.allocLocal(ty);
+                try cg.addLabel(.local_tee, select_res.local.value);
+                _ = try cg.cmp(.stack, value, ty, .neq); // leave on stack so we can use it for br_if
 
-                try func.emitWValue(select_res);
-                try func.addLabel(.local_set, value.local.value);
+                try cg.emitWValue(select_res);
+                try cg.addLabel(.local_set, value.local.value);
 
-                try func.addLabel(.br_if, 0);
-                try func.endBlock();
-                return func.finishAir(inst, value, &.{ pl_op.operand, extra.operand });
+                try cg.addLabel(.br_if, 0);
+                try cg.endBlock();
+                return cg.finishAir(inst, value, &.{ pl_op.operand, extra.operand });
             },
 
             // the other operations have their own instructions for Wasm.
             else => {
-                try func.emitWValue(ptr);
-                try func.emitWValue(operand);
+                try cg.emitWValue(ptr);
+                try cg.emitWValue(operand);
                 const tag: std.wasm.AtomicsOpcode = switch (ty.abiSize(zcu)) {
                     1 => switch (op) {
                         .Xchg => .i32_atomic_rmw8_xchg_u,
@@ -7405,22 +7403,22 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                         .Xor => .i64_atomic_rmw_xor,
                         else => unreachable,
                     },
-                    else => |size| return func.fail("TODO: Implement `@atomicRmw` for types with abi size {d}", .{size}),
+                    else => |size| return cg.fail("TODO: Implement `@atomicRmw` for types with abi size {d}", .{size}),
                 };
-                try func.addAtomicMemArg(tag, .{
+                try cg.addAtomicMemArg(tag, .{
                     .offset = ptr.offset(),
                     .alignment = @intCast(ty.abiAlignment(zcu).toByteUnits().?),
                 });
-                return func.finishAir(inst, .stack, &.{ pl_op.operand, extra.operand });
+                return cg.finishAir(inst, .stack, &.{ pl_op.operand, extra.operand });
             },
         }
     } else {
-        const loaded = try func.load(ptr, ty, 0);
-        const result = try loaded.toLocal(func, ty);
+        const loaded = try cg.load(ptr, ty, 0);
+        const result = try loaded.toLocal(cg, ty);
 
         switch (op) {
             .Xchg => {
-                try func.store(ptr, operand, ty, 0);
+                try cg.store(ptr, operand, ty, 0);
             },
             .Add,
             .Sub,
@@ -7428,8 +7426,8 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
             .Or,
             .Xor,
             => {
-                try func.emitWValue(ptr);
-                _ = try func.binOp(result, operand, ty, switch (op) {
+                try cg.emitWValue(ptr);
+                _ = try cg.binOp(result, operand, ty, switch (op) {
                     .Add => .add,
                     .Sub => .sub,
                     .And => .@"and",
@@ -7438,87 +7436,87 @@ fn airAtomicRmw(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
                     else => unreachable,
                 });
                 if (ty.isInt(zcu) and (op == .Add or op == .Sub)) {
-                    _ = try func.wrapOperand(.stack, ty);
+                    _ = try cg.wrapOperand(.stack, ty);
                 }
-                try func.store(.stack, .stack, ty, ptr.offset());
+                try cg.store(.stack, .stack, ty, ptr.offset());
             },
             .Max,
             .Min,
             => {
-                try func.emitWValue(ptr);
-                try func.emitWValue(result);
-                try func.emitWValue(operand);
-                _ = try func.cmp(result, operand, ty, if (op == .Max) .gt else .lt);
-                try func.addTag(.select);
-                try func.store(.stack, .stack, ty, ptr.offset());
+                try cg.emitWValue(ptr);
+                try cg.emitWValue(result);
+                try cg.emitWValue(operand);
+                _ = try cg.cmp(result, operand, ty, if (op == .Max) .gt else .lt);
+                try cg.addTag(.select);
+                try cg.store(.stack, .stack, ty, ptr.offset());
             },
             .Nand => {
                 const wasm_bits = toWasmBits(@intCast(ty.bitSize(zcu))).?;
 
-                try func.emitWValue(ptr);
-                const and_res = try func.binOp(result, operand, ty, .@"and");
+                try cg.emitWValue(ptr);
+                const and_res = try cg.binOp(result, operand, ty, .@"and");
                 if (wasm_bits == 32)
-                    try func.addImm32(~@as(u32, 0))
+                    try cg.addImm32(~@as(u32, 0))
                 else if (wasm_bits == 64)
-                    try func.addImm64(~@as(u64, 0))
+                    try cg.addImm64(~@as(u64, 0))
                 else
-                    return func.fail("TODO: `@atomicRmw` with operator `Nand` for types larger than 64 bits", .{});
-                _ = try func.binOp(and_res, .stack, ty, .xor);
-                try func.store(.stack, .stack, ty, ptr.offset());
+                    return cg.fail("TODO: `@atomicRmw` with operator `Nand` for types larger than 64 bits", .{});
+                _ = try cg.binOp(and_res, .stack, ty, .xor);
+                try cg.store(.stack, .stack, ty, ptr.offset());
             },
         }
 
-        return func.finishAir(inst, result, &.{ pl_op.operand, extra.operand });
+        return cg.finishAir(inst, result, &.{ pl_op.operand, extra.operand });
     }
 }
 
-fn airAtomicStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    const pt = func.pt;
+fn airAtomicStore(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    const bin_op = func.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
+    const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
 
-    const ptr = try func.resolveInst(bin_op.lhs);
-    const operand = try func.resolveInst(bin_op.rhs);
-    const ptr_ty = func.typeOf(bin_op.lhs);
+    const ptr = try cg.resolveInst(bin_op.lhs);
+    const operand = try cg.resolveInst(bin_op.rhs);
+    const ptr_ty = cg.typeOf(bin_op.lhs);
     const ty = ptr_ty.childType(zcu);
 
-    if (func.useAtomicFeature()) {
+    if (cg.useAtomicFeature()) {
         const tag: std.wasm.AtomicsOpcode = switch (ty.abiSize(zcu)) {
             1 => .i32_atomic_store8,
             2 => .i32_atomic_store16,
             4 => .i32_atomic_store,
             8 => .i64_atomic_store,
-            else => |size| return func.fail("TODO: @atomicLoad for types with abi size {d}", .{size}),
+            else => |size| return cg.fail("TODO: @atomicLoad for types with abi size {d}", .{size}),
         };
-        try func.emitWValue(ptr);
-        try func.lowerToStack(operand);
-        try func.addAtomicMemArg(tag, .{
+        try cg.emitWValue(ptr);
+        try cg.lowerToStack(operand);
+        try cg.addAtomicMemArg(tag, .{
             .offset = ptr.offset(),
             .alignment = @intCast(ty.abiAlignment(zcu).toByteUnits().?),
         });
     } else {
-        try func.store(ptr, operand, ty, 0);
+        try cg.store(ptr, operand, ty, 0);
     }
 
-    return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
+    return cg.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
 }
 
-fn airFrameAddress(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
-    if (func.initial_stack_value == .none) {
-        try func.initializeStack();
+fn airFrameAddress(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    if (cg.initial_stack_value == .none) {
+        try cg.initializeStack();
     }
-    try func.emitWValue(func.bottom_stack_value);
-    return func.finishAir(inst, .stack, &.{});
+    try cg.emitWValue(cg.bottom_stack_value);
+    return cg.finishAir(inst, .stack, &.{});
 }
 
-fn typeOf(func: *CodeGen, inst: Air.Inst.Ref) Type {
-    const pt = func.pt;
+fn typeOf(cg: *CodeGen, inst: Air.Inst.Ref) Type {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    return func.air.typeOf(inst, &zcu.intern_pool);
+    return cg.air.typeOf(inst, &zcu.intern_pool);
 }
 
-fn typeOfIndex(func: *CodeGen, inst: Air.Inst.Index) Type {
-    const pt = func.pt;
+fn typeOfIndex(cg: *CodeGen, inst: Air.Inst.Index) Type {
+    const pt = cg.pt;
     const zcu = pt.zcu;
-    return func.air.typeOfIndex(inst, &zcu.intern_pool);
+    return cg.air.typeOfIndex(inst, &zcu.intern_pool);
 }