Commit 0aa23fe8b7

Luuk de Gram <luuk@degram.dev>
2022-10-16 15:48:08
wasm: rename 'self' to more explanatory name
'Self' isn't a very good name to describe what it does. This commit changes the type name into `CodeGen` and the parameter to `func` as we're generating code for a function. With this change, the backend's coding style is in line with the self-hosted Wasm-linker.
1 parent ff1cab0
Changed files (1)
src
arch
src/arch/wasm/CodeGen.zig
@@ -78,8 +78,8 @@ const WValue = union(enum) {
     /// bottom of the stack. For instances where `WValue` is not `stack_value`
     /// this will return 0, which allows us to simply call this function for all
     /// loads and stores without requiring checks everywhere.
-    fn offset(self: WValue) u32 {
-        switch (self) {
+    fn offset(value: WValue) u32 {
+        switch (value) {
             .stack_offset => |stack_offset| return stack_offset.value,
             else => return 0,
         }
@@ -88,7 +88,7 @@ const WValue = union(enum) {
     /// Promotes a `WValue` to a local when given value is on top of the stack.
     /// When encountering a `local` or `stack_offset` this is essentially a no-op.
     /// All other tags are illegal.
-    fn toLocal(value: WValue, gen: *Self, ty: Type) InnerError!WValue {
+    fn toLocal(value: WValue, gen: *CodeGen, ty: Type) InnerError!WValue {
         switch (value) {
             .stack => {
                 const new_local = try gen.allocLocal(ty);
@@ -103,7 +103,7 @@ const WValue = union(enum) {
     /// Marks a local as no longer being referenced and essentially allows
     /// us to re-use it somewhere else within the function.
     /// The valtype of the local is deducted by using the index of the given `WValue`.
-    fn free(value: *WValue, gen: *Self) void {
+    fn free(value: *WValue, gen: *CodeGen) void {
         if (value.* != .local) return;
         const local_value = value.local.value;
         const reserved = gen.args.len + @boolToInt(gen.return_value != .none);
@@ -584,7 +584,7 @@ pub const Result = union(enum) {
 /// Hashmap to store generated `WValue` for each `Air.Inst.Ref`
 pub const ValueTable = std.AutoArrayHashMapUnmanaged(Air.Inst.Ref, WValue);
 
-const Self = @This();
+const CodeGen = @This();
 
 /// Reference to the function declaration the code
 /// section belongs to
@@ -686,37 +686,34 @@ const InnerError = error{
     Overflow,
 };
 
-pub fn deinit(self: *Self) void {
-    for (self.branches.items) |*branch| {
-        branch.deinit(self.gpa);
-    }
-    self.branches.deinit(self.gpa);
-    // self.values.deinit(self.gpa);
-    self.blocks.deinit(self.gpa);
-    self.locals.deinit(self.gpa);
-    self.mir_instructions.deinit(self.gpa);
-    self.mir_extra.deinit(self.gpa);
-    self.free_locals_i32.deinit(self.gpa);
-    self.free_locals_i64.deinit(self.gpa);
-    self.free_locals_f32.deinit(self.gpa);
-    self.free_locals_f64.deinit(self.gpa);
-    self.* = undefined;
+pub fn deinit(func: *CodeGen) void {
+    assert(func.branches.items.len == 0); // we should end with no branches left. Forgot a call to `branches.pop()`?
+    func.branches.deinit(func.gpa);
+    func.blocks.deinit(func.gpa);
+    func.locals.deinit(func.gpa);
+    func.mir_instructions.deinit(func.gpa);
+    func.mir_extra.deinit(func.gpa);
+    func.free_locals_i32.deinit(func.gpa);
+    func.free_locals_i64.deinit(func.gpa);
+    func.free_locals_f32.deinit(func.gpa);
+    func.free_locals_f64.deinit(func.gpa);
+    func.* = undefined;
 }
 
 /// Sets `err_msg` on `CodeGen` and returns `error.CodegenFail` which is caught in link/Wasm.zig
-fn fail(self: *Self, comptime fmt: []const u8, args: anytype) InnerError {
+fn fail(func: *CodeGen, comptime fmt: []const u8, args: anytype) InnerError {
     const src = LazySrcLoc.nodeOffset(0);
-    const src_loc = src.toSrcLoc(self.decl);
-    self.err_msg = try Module.ErrorMsg.create(self.gpa, src_loc, fmt, args);
+    const src_loc = src.toSrcLoc(func.decl);
+    func.err_msg = try Module.ErrorMsg.create(func.gpa, src_loc, fmt, args);
     return error.CodegenFail;
 }
 
 /// Resolves the `WValue` for the given instruction `inst`
 /// When the given instruction has a `Value`, it returns a constant instead
-fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!WValue {
-    var branch_index = self.branches.items.len;
+fn resolveInst(func: *CodeGen, ref: Air.Inst.Ref) InnerError!WValue {
+    var branch_index = func.branches.items.len;
     while (branch_index > 0) : (branch_index -= 1) {
-        const branch = self.branches.items[branch_index - 1];
+        const branch = func.branches.items[branch_index - 1];
         if (branch.values.get(ref)) |value| {
             return value;
         }
@@ -726,11 +723,11 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!WValue {
     // means we must generate it from a constant.
     // We always store constants in the most outer branch as they must never
     // be removed. The most outer branch is always at index 0.
-    const gop = try self.branches.items[0].values.getOrPut(self.gpa, ref);
+    const gop = try func.branches.items[0].values.getOrPut(func.gpa, ref);
     assert(!gop.found_existing);
 
-    const val = self.air.value(ref).?;
-    const ty = self.air.typeOf(ref);
+    const val = func.air.value(ref).?;
+    const ty = func.air.typeOf(ref);
     if (!ty.hasRuntimeBitsIgnoreComptime() and !ty.isInt() and !ty.isError()) {
         gop.value_ptr.* = WValue{ .none = {} };
         return gop.value_ptr.*;
@@ -742,34 +739,34 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!WValue {
     //
     // In the other cases, we will simply lower the constant to a value that fits
     // into a single local (such as a pointer, integer, bool, etc).
-    const result = if (isByRef(ty, self.target)) blk: {
-        const sym_index = try self.bin_file.lowerUnnamedConst(.{ .ty = ty, .val = val }, self.decl_index);
+    const result = if (isByRef(ty, func.target)) blk: {
+        const sym_index = try func.bin_file.lowerUnnamedConst(.{ .ty = ty, .val = val }, func.decl_index);
         break :blk WValue{ .memory = sym_index };
-    } else try self.lowerConstant(val, ty);
+    } else try func.lowerConstant(val, ty);
 
     gop.value_ptr.* = result;
     return result;
 }
 
-fn finishAir(self: *Self, inst: Air.Inst.Index, result: WValue, operands: []const Air.Inst.Ref) void {
+fn finishAir(func: *CodeGen, inst: Air.Inst.Index, result: WValue, operands: []const Air.Inst.Ref) void {
     assert(operands.len <= Liveness.bpi - 1);
-    var tomb_bits = self.liveness.getTombBits(inst);
+    var tomb_bits = func.liveness.getTombBits(inst);
     for (operands) |operand| {
         const dies = @truncate(u1, tomb_bits) != 0;
         tomb_bits >>= 1;
         if (!dies) continue;
-        processDeath(self, operand);
+        processDeath(func, operand);
     }
 
     // results of `none` can never be referenced.
     if (result != .none) {
         assert(result != .stack); // it's illegal to store a stack value as we cannot track its position
-        const branch = self.currentBranch();
+        const branch = func.currentBranch();
         branch.values.putAssumeCapacityNoClobber(Air.indexToRef(inst), result);
     }
 
     if (builtin.mode == .Debug) {
-        self.air_bookkeeping += 1;
+        func.air_bookkeeping += 1;
     }
 }
 
@@ -781,12 +778,12 @@ const Branch = struct {
     }
 };
 
-inline fn currentBranch(self: *Self) *Branch {
-    return &self.branches.items[self.branches.items.len - 1];
+inline fn currentBranch(func: *CodeGen) *Branch {
+    return &func.branches.items[func.branches.items.len - 1];
 }
 
 const BigTomb = struct {
-    gen: *Self,
+    gen: *CodeGen,
     inst: Air.Inst.Index,
     lbt: Liveness.BigTomb,
 
@@ -809,85 +806,85 @@ const BigTomb = struct {
     }
 };
 
-fn iterateBigTomb(self: *Self, inst: Air.Inst.Index, operand_count: usize) !BigTomb {
-    try self.currentBranch().values.ensureUnusedCapacity(self.gpa, operand_count + 1);
+fn iterateBigTomb(func: *CodeGen, inst: Air.Inst.Index, operand_count: usize) !BigTomb {
+    try func.currentBranch().values.ensureUnusedCapacity(func.gpa, operand_count + 1);
     return BigTomb{
-        .gen = self,
+        .gen = func,
         .inst = inst,
-        .lbt = self.liveness.iterateBigTomb(inst),
+        .lbt = func.liveness.iterateBigTomb(inst),
     };
 }
 
-fn processDeath(self: *Self, ref: Air.Inst.Ref) void {
+fn processDeath(func: *CodeGen, ref: Air.Inst.Ref) void {
     const inst = Air.refToIndex(ref) orelse return;
-    if (self.air.instructions.items(.tag)[inst] == .constant) return;
+    if (func.air.instructions.items(.tag)[inst] == .constant) return;
     // Branches are currently only allowed to free locals allocated
     // within their own branch.
     // TODO: Upon branch consolidation free any locals if needed.
-    const value = self.currentBranch().values.getPtr(ref) orelse return;
+    const value = func.currentBranch().values.getPtr(ref) orelse return;
     if (value.* != .local) return;
     log.debug("Decreasing reference for ref: %{?d}\n", .{Air.refToIndex(ref)});
     value.local.references -= 1; // if this panics, a call to `reuseOperand` was forgotten by the developer
     if (value.local.references == 0) {
-        value.free(self);
+        value.free(func);
     }
 }
 
 /// Appends a MIR instruction and returns its index within the list of instructions
-fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!void {
-    try self.mir_instructions.append(self.gpa, inst);
+fn addInst(func: *CodeGen, inst: Mir.Inst) error{OutOfMemory}!void {
+    try func.mir_instructions.append(func.gpa, inst);
 }
 
-fn addTag(self: *Self, tag: Mir.Inst.Tag) error{OutOfMemory}!void {
-    try self.addInst(.{ .tag = tag, .data = .{ .tag = {} } });
+fn addTag(func: *CodeGen, tag: Mir.Inst.Tag) error{OutOfMemory}!void {
+    try func.addInst(.{ .tag = tag, .data = .{ .tag = {} } });
 }
 
-fn addExtended(self: *Self, opcode: wasm.PrefixedOpcode) error{OutOfMemory}!void {
-    try self.addInst(.{ .tag = .extended, .secondary = @enumToInt(opcode), .data = .{ .tag = {} } });
+fn addExtended(func: *CodeGen, opcode: wasm.PrefixedOpcode) error{OutOfMemory}!void {
+    try func.addInst(.{ .tag = .extended, .secondary = @enumToInt(opcode), .data = .{ .tag = {} } });
 }
 
-fn addLabel(self: *Self, tag: Mir.Inst.Tag, label: u32) error{OutOfMemory}!void {
-    try self.addInst(.{ .tag = tag, .data = .{ .label = label } });
+fn addLabel(func: *CodeGen, tag: Mir.Inst.Tag, label: u32) error{OutOfMemory}!void {
+    try func.addInst(.{ .tag = tag, .data = .{ .label = label } });
 }
 
-fn addImm32(self: *Self, imm: i32) error{OutOfMemory}!void {
-    try self.addInst(.{ .tag = .i32_const, .data = .{ .imm32 = imm } });
+fn addImm32(func: *CodeGen, imm: i32) error{OutOfMemory}!void {
+    try func.addInst(.{ .tag = .i32_const, .data = .{ .imm32 = imm } });
 }
 
 /// Accepts an unsigned 64bit integer rather than a signed integer to
 /// prevent us from having to bitcast multiple times as most values
 /// within codegen are represented as unsigned rather than signed.
-fn addImm64(self: *Self, imm: u64) error{OutOfMemory}!void {
-    const extra_index = try self.addExtra(Mir.Imm64.fromU64(imm));
-    try self.addInst(.{ .tag = .i64_const, .data = .{ .payload = extra_index } });
+fn addImm64(func: *CodeGen, imm: u64) error{OutOfMemory}!void {
+    const extra_index = try func.addExtra(Mir.Imm64.fromU64(imm));
+    try func.addInst(.{ .tag = .i64_const, .data = .{ .payload = extra_index } });
 }
 
-fn addFloat64(self: *Self, float: f64) error{OutOfMemory}!void {
-    const extra_index = try self.addExtra(Mir.Float64.fromFloat64(float));
-    try self.addInst(.{ .tag = .f64_const, .data = .{ .payload = extra_index } });
+fn addFloat64(func: *CodeGen, float: f64) error{OutOfMemory}!void {
+    const extra_index = try func.addExtra(Mir.Float64.fromFloat64(float));
+    try func.addInst(.{ .tag = .f64_const, .data = .{ .payload = extra_index } });
 }
 
 /// Inserts an instruction to load/store from/to wasm's linear memory dependent on the given `tag`.
-fn addMemArg(self: *Self, tag: Mir.Inst.Tag, mem_arg: Mir.MemArg) error{OutOfMemory}!void {
-    const extra_index = try self.addExtra(mem_arg);
-    try self.addInst(.{ .tag = tag, .data = .{ .payload = extra_index } });
+fn addMemArg(func: *CodeGen, tag: Mir.Inst.Tag, mem_arg: Mir.MemArg) error{OutOfMemory}!void {
+    const extra_index = try func.addExtra(mem_arg);
+    try func.addInst(.{ .tag = tag, .data = .{ .payload = extra_index } });
 }
 
 /// Appends entries to `mir_extra` based on the type of `extra`.
 /// Returns the index into `mir_extra`
-fn addExtra(self: *Self, extra: anytype) error{OutOfMemory}!u32 {
+fn addExtra(func: *CodeGen, extra: anytype) error{OutOfMemory}!u32 {
     const fields = std.meta.fields(@TypeOf(extra));
-    try self.mir_extra.ensureUnusedCapacity(self.gpa, fields.len);
-    return self.addExtraAssumeCapacity(extra);
+    try func.mir_extra.ensureUnusedCapacity(func.gpa, fields.len);
+    return func.addExtraAssumeCapacity(extra);
 }
 
 /// Appends entries to `mir_extra` based on the type of `extra`.
 /// Returns the index into `mir_extra`
-fn addExtraAssumeCapacity(self: *Self, extra: anytype) error{OutOfMemory}!u32 {
+fn addExtraAssumeCapacity(func: *CodeGen, extra: anytype) error{OutOfMemory}!u32 {
     const fields = std.meta.fields(@TypeOf(extra));
-    const result = @intCast(u32, self.mir_extra.items.len);
+    const result = @intCast(u32, func.mir_extra.items.len);
     inline for (fields) |field| {
-        self.mir_extra.appendAssumeCapacity(switch (field.field_type) {
+        func.mir_extra.appendAssumeCapacity(switch (field.field_type) {
             u32 => @field(extra, field.name),
             else => |field_type| @compileError("Unsupported field type " ++ @typeName(field_type)),
         });
@@ -932,32 +929,32 @@ fn genBlockType(ty: Type, target: std.Target) u8 {
 }
 
 /// Writes the bytecode depending on the given `WValue` in `val`
-fn emitWValue(self: *Self, value: WValue) InnerError!void {
+fn emitWValue(func: *CodeGen, value: WValue) InnerError!void {
     switch (value) {
         .none, .stack => {}, // no-op
-        .local => |idx| try self.addLabel(.local_get, idx.value),
-        .imm32 => |val| try self.addImm32(@bitCast(i32, val)),
-        .imm64 => |val| try self.addImm64(val),
-        .float32 => |val| try self.addInst(.{ .tag = .f32_const, .data = .{ .float32 = val } }),
-        .float64 => |val| try self.addFloat64(val),
+        .local => |idx| try func.addLabel(.local_get, idx.value),
+        .imm32 => |val| try func.addImm32(@bitCast(i32, val)),
+        .imm64 => |val| try func.addImm64(val),
+        .float32 => |val| try func.addInst(.{ .tag = .f32_const, .data = .{ .float32 = val } }),
+        .float64 => |val| try func.addFloat64(val),
         .memory => |ptr| {
-            const extra_index = try self.addExtra(Mir.Memory{ .pointer = ptr, .offset = 0 });
-            try self.addInst(.{ .tag = .memory_address, .data = .{ .payload = extra_index } });
+            const extra_index = try func.addExtra(Mir.Memory{ .pointer = ptr, .offset = 0 });
+            try func.addInst(.{ .tag = .memory_address, .data = .{ .payload = extra_index } });
         },
         .memory_offset => |mem_off| {
-            const extra_index = try self.addExtra(Mir.Memory{ .pointer = mem_off.pointer, .offset = mem_off.offset });
-            try self.addInst(.{ .tag = .memory_address, .data = .{ .payload = extra_index } });
+            const extra_index = try func.addExtra(Mir.Memory{ .pointer = mem_off.pointer, .offset = mem_off.offset });
+            try func.addInst(.{ .tag = .memory_address, .data = .{ .payload = extra_index } });
         },
-        .function_index => |index| try self.addLabel(.function_index, index), // write function index and generate relocation
-        .stack_offset => try self.addLabel(.local_get, self.bottom_stack_value.local.value), // caller must ensure to address the offset
+        .function_index => |index| try func.addLabel(.function_index, index), // write function index and generate relocation
+        .stack_offset => try func.addLabel(.local_get, func.bottom_stack_value.local.value), // caller must ensure to address the offset
     }
 }
 
 /// If given a local or stack-offset, increases the reference count by 1.
 /// The old `WValue` found at instruction `ref` is then replaced by the
 /// modified `WValue` and returned. When given a non-local or non-stack-offset,
-/// returns the given `operand` itself instead.
-fn reuseOperand(self: *Self, ref: Air.Inst.Ref, operand: WValue) WValue {
+/// returns the given `operand` itfunc instead.
+fn reuseOperand(func: *CodeGen, ref: Air.Inst.Ref, operand: WValue) WValue {
     if (operand != .local and operand != .stack_offset) return operand;
     var new_value = operand;
     switch (new_value) {
@@ -965,17 +962,17 @@ fn reuseOperand(self: *Self, ref: Air.Inst.Ref, operand: WValue) WValue {
         .stack_offset => |*stack_offset| stack_offset.references += 1,
         else => unreachable,
     }
-    const old_value = self.getResolvedInst(ref);
+    const old_value = func.getResolvedInst(ref);
     old_value.* = new_value;
     return new_value;
 }
 
 /// From a reference, returns its resolved `WValue`.
 /// It's illegal to provide a `Air.Inst.Ref` that hasn't been resolved yet.
-fn getResolvedInst(self: *Self, ref: Air.Inst.Ref) *WValue {
-    var index = self.branches.items.len;
+fn getResolvedInst(func: *CodeGen, ref: Air.Inst.Ref) *WValue {
+    var index = func.branches.items.len;
     while (index > 0) : (index -= 1) {
-        const branch = self.branches.items[index - 1];
+        const branch = func.branches.items[index - 1];
         if (branch.values.getPtr(ref)) |value| {
             return value;
         }
@@ -985,37 +982,37 @@ fn getResolvedInst(self: *Self, ref: Air.Inst.Ref) *WValue {
 
 /// Creates one locals for a given `Type`.
 /// Returns a corresponding `Wvalue` with `local` as active tag
-fn allocLocal(self: *Self, ty: Type) InnerError!WValue {
-    const valtype = typeToValtype(ty, self.target);
+fn allocLocal(func: *CodeGen, ty: Type) InnerError!WValue {
+    const valtype = typeToValtype(ty, func.target);
     switch (valtype) {
-        .i32 => if (self.free_locals_i32.popOrNull()) |index| {
+        .i32 => if (func.free_locals_i32.popOrNull()) |index| {
             log.debug("reusing local ({d}) of type {}\n", .{ index, valtype });
             return WValue{ .local = .{ .value = index, .references = 1 } };
         },
-        .i64 => if (self.free_locals_i64.popOrNull()) |index| {
+        .i64 => if (func.free_locals_i64.popOrNull()) |index| {
             log.debug("reusing local ({d}) of type {}\n", .{ index, valtype });
             return WValue{ .local = .{ .value = index, .references = 1 } };
         },
-        .f32 => if (self.free_locals_f32.popOrNull()) |index| {
+        .f32 => if (func.free_locals_f32.popOrNull()) |index| {
             log.debug("reusing local ({d}) of type {}\n", .{ index, valtype });
             return WValue{ .local = .{ .value = index, .references = 1 } };
         },
-        .f64 => if (self.free_locals_f64.popOrNull()) |index| {
+        .f64 => if (func.free_locals_f64.popOrNull()) |index| {
             log.debug("reusing local ({d}) of type {}\n", .{ index, valtype });
             return WValue{ .local = .{ .value = index, .references = 1 } };
         },
     }
     log.debug("new local of type {}\n", .{valtype});
     // no local was free to be re-used, so allocate a new local instead
-    return self.ensureAllocLocal(ty);
+    return func.ensureAllocLocal(ty);
 }
 
 /// Ensures a new local will be created. This is useful when it's useful
 /// to use a zero-initialized local.
-fn ensureAllocLocal(self: *Self, ty: Type) InnerError!WValue {
-    try self.locals.append(self.gpa, genValtype(ty, self.target));
-    const initial_index = self.local_index;
-    self.local_index += 1;
+fn ensureAllocLocal(func: *CodeGen, ty: Type) InnerError!WValue {
+    try func.locals.append(func.gpa, genValtype(ty, func.target));
+    const initial_index = func.local_index;
+    func.local_index += 1;
     return WValue{ .local = .{ .value = initial_index, .references = 1 } };
 }
 
@@ -1082,7 +1079,7 @@ pub fn generate(
     debug_output: codegen.DebugInfoOutput,
 ) codegen.GenerateSymbolError!codegen.FnResult {
     _ = src_loc;
-    var code_gen: Self = .{
+    var code_gen: CodeGen = .{
         .gpa = bin_file.allocator,
         .air = air,
         .liveness = liveness,
@@ -1107,88 +1104,89 @@ pub fn generate(
     return codegen.FnResult{ .appended = {} };
 }
 
-fn genFunc(self: *Self) InnerError!void {
-    const fn_info = self.decl.ty.fnInfo();
-    var func_type = try genFunctype(self.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, self.target);
-    defer func_type.deinit(self.gpa);
-    self.decl.fn_link.wasm.type_index = try self.bin_file.putOrGetFuncType(func_type);
+fn genFunc(func: *CodeGen) InnerError!void {
+    const fn_info = func.decl.ty.fnInfo();
+    var func_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, func.target);
+    defer func_type.deinit(func.gpa);
+    func.decl.fn_link.wasm.type_index = try func.bin_file.putOrGetFuncType(func_type);
 
-    var cc_result = try self.resolveCallingConventionValues(self.decl.ty);
-    defer cc_result.deinit(self.gpa);
+    var cc_result = try func.resolveCallingConventionValues(func.decl.ty);
+    defer cc_result.deinit(func.gpa);
 
-    self.args = cc_result.args;
-    self.return_value = cc_result.return_value;
+    func.args = cc_result.args;
+    func.return_value = cc_result.return_value;
 
-    try self.addTag(.dbg_prologue_end);
+    try func.addTag(.dbg_prologue_end);
 
-    try self.branches.append(self.gpa, .{});
+    try func.branches.append(func.gpa, .{});
     // Generate MIR for function body
-    try self.genBody(self.air.getMainBody());
+    try func.genBody(func.air.getMainBody());
 
     // clean up outer branch
-    _ = self.branches.pop();
+    var outer_branch = func.branches.pop();
+    outer_branch.deinit(func.gpa);
 
     // In case we have a return value, but the last instruction is a noreturn (such as a while loop)
     // we emit an unreachable instruction to tell the stack validator that part will never be reached.
-    if (func_type.returns.len != 0 and self.air.instructions.len > 0) {
-        const inst = @intCast(u32, self.air.instructions.len - 1);
-        const last_inst_ty = self.air.typeOfIndex(inst);
+    if (func_type.returns.len != 0 and func.air.instructions.len > 0) {
+        const inst = @intCast(u32, func.air.instructions.len - 1);
+        const last_inst_ty = func.air.typeOfIndex(inst);
         if (!last_inst_ty.hasRuntimeBitsIgnoreComptime() or last_inst_ty.isNoReturn()) {
-            try self.addTag(.@"unreachable");
+            try func.addTag(.@"unreachable");
         }
     }
     // End of function body
-    try self.addTag(.end);
+    try func.addTag(.end);
 
-    try self.addTag(.dbg_epilogue_begin);
+    try func.addTag(.dbg_epilogue_begin);
 
     // check if we have to initialize and allocate anything into the stack frame.
     // If so, create enough stack space and insert the instructions at the front of the list.
-    if (self.stack_size > 0) {
-        var prologue = std.ArrayList(Mir.Inst).init(self.gpa);
+    if (func.stack_size > 0) {
+        var prologue = std.ArrayList(Mir.Inst).init(func.gpa);
         defer prologue.deinit();
 
         // load stack pointer
         try prologue.append(.{ .tag = .global_get, .data = .{ .label = 0 } });
         // store stack pointer so we can restore it when we return from the function
-        try prologue.append(.{ .tag = .local_tee, .data = .{ .label = self.initial_stack_value.local.value } });
+        try prologue.append(.{ .tag = .local_tee, .data = .{ .label = func.initial_stack_value.local.value } });
         // get the total stack size
-        const aligned_stack = std.mem.alignForwardGeneric(u32, self.stack_size, self.stack_alignment);
+        const aligned_stack = std.mem.alignForwardGeneric(u32, func.stack_size, func.stack_alignment);
         try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @intCast(i32, aligned_stack) } });
         // substract it from the current stack pointer
         try prologue.append(.{ .tag = .i32_sub, .data = .{ .tag = {} } });
         // Get negative stack aligment
-        try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @intCast(i32, self.stack_alignment) * -1 } });
+        try prologue.append(.{ .tag = .i32_const, .data = .{ .imm32 = @intCast(i32, func.stack_alignment) * -1 } });
         // Bitwise-and the value to get the new stack pointer to ensure the pointers are aligned with the abi alignment
         try prologue.append(.{ .tag = .i32_and, .data = .{ .tag = {} } });
         // store the current stack pointer as the bottom, which will be used to calculate all stack pointer offsets
-        try prologue.append(.{ .tag = .local_tee, .data = .{ .label = self.bottom_stack_value.local.value } });
+        try prologue.append(.{ .tag = .local_tee, .data = .{ .label = func.bottom_stack_value.local.value } });
         // Store the current stack pointer value into the global stack pointer so other function calls will
         // start from this value instead and not overwrite the current stack.
         try prologue.append(.{ .tag = .global_set, .data = .{ .label = 0 } });
 
         // reserve space and insert all prologue instructions at the front of the instruction list
         // We insert them in reserve order as there is no insertSlice in multiArrayList.
-        try self.mir_instructions.ensureUnusedCapacity(self.gpa, prologue.items.len);
+        try func.mir_instructions.ensureUnusedCapacity(func.gpa, prologue.items.len);
         for (prologue.items) |_, index| {
             const inst = prologue.items[prologue.items.len - 1 - index];
-            self.mir_instructions.insertAssumeCapacity(0, inst);
+            func.mir_instructions.insertAssumeCapacity(0, inst);
         }
     }
 
     var mir: Mir = .{
-        .instructions = self.mir_instructions.toOwnedSlice(),
-        .extra = self.mir_extra.toOwnedSlice(self.gpa),
+        .instructions = func.mir_instructions.toOwnedSlice(),
+        .extra = func.mir_extra.toOwnedSlice(func.gpa),
     };
-    defer mir.deinit(self.gpa);
+    defer mir.deinit(func.gpa);
 
     var emit: Emit = .{
         .mir = mir,
-        .bin_file = &self.bin_file.base,
-        .code = self.code,
-        .locals = self.locals.items,
-        .decl = self.decl,
-        .dbg_output = self.debug_output,
+        .bin_file = &func.bin_file.base,
+        .code = func.code,
+        .locals = func.locals.items,
+        .decl = func.decl,
+        .dbg_output = func.debug_output,
         .prev_di_line = 0,
         .prev_di_column = 0,
         .prev_di_offset = 0,
@@ -1196,7 +1194,7 @@ fn genFunc(self: *Self) InnerError!void {
 
     emit.emitMir() catch |err| switch (err) {
         error.EmitFail => {
-            self.err_msg = emit.error_msg.?;
+            func.err_msg = emit.error_msg.?;
             return error.CodegenFail;
         },
         else => |e| return e,
@@ -1207,16 +1205,16 @@ const CallWValues = struct {
     args: []WValue,
     return_value: WValue,
 
-    fn deinit(self: *CallWValues, gpa: Allocator) void {
-        gpa.free(self.args);
-        self.* = undefined;
+    fn deinit(values: *CallWValues, gpa: Allocator) void {
+        gpa.free(values.args);
+        values.* = undefined;
     }
 };
 
-fn resolveCallingConventionValues(self: *Self, fn_ty: Type) InnerError!CallWValues {
+fn resolveCallingConventionValues(func: *CodeGen, fn_ty: Type) InnerError!CallWValues {
     const cc = fn_ty.fnCallingConvention();
-    const param_types = try self.gpa.alloc(Type, fn_ty.fnParamLen());
-    defer self.gpa.free(param_types);
+    const param_types = try func.gpa.alloc(Type, fn_ty.fnParamLen());
+    defer func.gpa.free(param_types);
     fn_ty.fnParamTypes(param_types);
     var result: CallWValues = .{
         .args = &.{},
@@ -1224,17 +1222,17 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) InnerError!CallWValu
     };
     if (cc == .Naked) return result;
 
-    var args = std.ArrayList(WValue).init(self.gpa);
+    var args = std.ArrayList(WValue).init(func.gpa);
     defer args.deinit();
 
     // Check if we store the result as a pointer to the stack rather than
     // by value
     const fn_info = fn_ty.fnInfo();
-    if (firstParamSRet(fn_info.cc, fn_info.return_type, self.target)) {
+    if (firstParamSRet(fn_info.cc, fn_info.return_type, func.target)) {
         // the sret arg will be passed as first argument, therefore we
         // set the `return_value` before allocating locals for regular args.
-        result.return_value = .{ .local = .{ .value = self.local_index, .references = 1 } };
-        self.local_index += 1;
+        result.return_value = .{ .local = .{ .value = func.local_index, .references = 1 } };
+        func.local_index += 1;
     }
 
     switch (cc) {
@@ -1244,21 +1242,21 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) InnerError!CallWValu
                     continue;
                 }
 
-                try args.append(.{ .local = .{ .value = self.local_index, .references = 1 } });
-                self.local_index += 1;
+                try args.append(.{ .local = .{ .value = func.local_index, .references = 1 } });
+                func.local_index += 1;
             }
         },
         .C => {
             for (param_types) |ty| {
-                const ty_classes = abi.classifyType(ty, self.target);
+                const ty_classes = abi.classifyType(ty, func.target);
                 for (ty_classes) |class| {
                     if (class == .none) continue;
-                    try args.append(.{ .local = .{ .value = self.local_index, .references = 1 } });
-                    self.local_index += 1;
+                    try args.append(.{ .local = .{ .value = func.local_index, .references = 1 } });
+                    func.local_index += 1;
                 }
             }
         },
-        else => return self.fail("calling convention '{s}' not supported for Wasm", .{@tagName(cc)}),
+        else => return func.fail("calling convention '{s}' not supported for Wasm", .{@tagName(cc)}),
     }
     result.args = args.toOwnedSlice();
     return result;
@@ -1279,14 +1277,14 @@ fn firstParamSRet(cc: std.builtin.CallingConvention, return_type: Type, target:
 
 /// For a given `Type`, add debug information to .debug_info at the current position.
 /// The actual bytes will be written to the position after relocation.
-fn addDbgInfoTypeReloc(self: *Self, ty: Type) !void {
-    switch (self.debug_output) {
+fn addDbgInfoTypeReloc(func: *CodeGen, ty: Type) !void {
+    switch (func.debug_output) {
         .dwarf => |dwarf| {
             assert(ty.hasRuntimeBitsIgnoreComptime());
             const dbg_info = &dwarf.dbg_info;
             const index = dbg_info.items.len;
             try dbg_info.resize(index + 4);
-            const atom = &self.decl.link.wasm.dbg_info_atom;
+            const atom = &func.decl.link.wasm.dbg_info_atom;
             try dwarf.addTypeRelocGlobal(atom, ty, @intCast(u32, index));
         },
         .plan9 => unreachable,
@@ -1296,96 +1294,96 @@ fn addDbgInfoTypeReloc(self: *Self, ty: Type) !void {
 
 /// Lowers a Zig type and its value based on a given calling convention to ensure
 /// it matches the ABI.
-fn lowerArg(self: *Self, cc: std.builtin.CallingConvention, ty: Type, value: WValue) !void {
+fn lowerArg(func: *CodeGen, cc: std.builtin.CallingConvention, ty: Type, value: WValue) !void {
     if (cc != .C) {
-        return self.lowerToStack(value);
+        return func.lowerToStack(value);
     }
 
-    const ty_classes = abi.classifyType(ty, self.target);
+    const ty_classes = abi.classifyType(ty, func.target);
     assert(ty_classes[0] != .none);
     switch (ty.zigTypeTag()) {
         .Struct, .Union => {
             if (ty_classes[0] == .indirect) {
-                return self.lowerToStack(value);
+                return func.lowerToStack(value);
             }
             assert(ty_classes[0] == .direct);
-            const scalar_type = abi.scalarType(ty, self.target);
-            const abi_size = scalar_type.abiSize(self.target);
+            const scalar_type = abi.scalarType(ty, func.target);
+            const abi_size = scalar_type.abiSize(func.target);
             const opcode = buildOpcode(.{
                 .op = .load,
                 .width = @intCast(u8, abi_size),
                 .signedness = if (scalar_type.isSignedInt()) .signed else .unsigned,
-                .valtype1 = typeToValtype(scalar_type, self.target),
+                .valtype1 = typeToValtype(scalar_type, func.target),
             });
-            try self.emitWValue(value);
-            try self.addMemArg(Mir.Inst.Tag.fromOpcode(opcode), .{
+            try func.emitWValue(value);
+            try func.addMemArg(Mir.Inst.Tag.fromOpcode(opcode), .{
                 .offset = value.offset(),
-                .alignment = scalar_type.abiAlignment(self.target),
+                .alignment = scalar_type.abiAlignment(func.target),
             });
         },
         .Int, .Float => {
             if (ty_classes[1] == .none) {
-                return self.lowerToStack(value);
+                return func.lowerToStack(value);
             }
             assert(ty_classes[0] == .direct and ty_classes[1] == .direct);
-            assert(ty.abiSize(self.target) == 16);
+            assert(ty.abiSize(func.target) == 16);
             // in this case we have an integer or float that must be lowered as 2 i64's.
-            try self.emitWValue(value);
-            try self.addMemArg(.i64_load, .{ .offset = value.offset(), .alignment = 8 });
-            try self.emitWValue(value);
-            try self.addMemArg(.i64_load, .{ .offset = value.offset() + 8, .alignment = 8 });
+            try func.emitWValue(value);
+            try func.addMemArg(.i64_load, .{ .offset = value.offset(), .alignment = 8 });
+            try func.emitWValue(value);
+            try func.addMemArg(.i64_load, .{ .offset = value.offset() + 8, .alignment = 8 });
         },
-        else => return self.lowerToStack(value),
+        else => return func.lowerToStack(value),
     }
 }
 
 /// Lowers a `WValue` to the stack. This means when the `value` results in
 /// `.stack_offset` we calculate the pointer of this offset and use that.
 /// The value is left on the stack, and not stored in any temporary.
-fn lowerToStack(self: *Self, value: WValue) !void {
+fn lowerToStack(func: *CodeGen, value: WValue) !void {
     switch (value) {
         .stack_offset => |offset| {
-            try self.emitWValue(value);
+            try func.emitWValue(value);
             if (offset.value > 0) {
-                switch (self.arch()) {
+                switch (func.arch()) {
                     .wasm32 => {
-                        try self.addImm32(@bitCast(i32, offset.value));
-                        try self.addTag(.i32_add);
+                        try func.addImm32(@bitCast(i32, offset.value));
+                        try func.addTag(.i32_add);
                     },
                     .wasm64 => {
-                        try self.addImm64(offset.value);
-                        try self.addTag(.i64_add);
+                        try func.addImm64(offset.value);
+                        try func.addTag(.i64_add);
                     },
                     else => unreachable,
                 }
             }
         },
-        else => try self.emitWValue(value),
+        else => try func.emitWValue(value),
     }
 }
 
 /// Creates a local for the initial stack value
 /// Asserts `initial_stack_value` is `.none`
-fn initializeStack(self: *Self) !void {
-    assert(self.initial_stack_value == .none);
+fn initializeStack(func: *CodeGen) !void {
+    assert(func.initial_stack_value == .none);
     // Reserve a local to store the current stack pointer
     // We can later use this local to set the stack pointer back to the value
     // we have stored here.
-    self.initial_stack_value = try self.ensureAllocLocal(Type.usize);
+    func.initial_stack_value = try func.ensureAllocLocal(Type.usize);
     // Also reserve a local to store the bottom stack value
-    self.bottom_stack_value = try self.ensureAllocLocal(Type.usize);
+    func.bottom_stack_value = try func.ensureAllocLocal(Type.usize);
 }
 
 /// Reads the stack pointer from `Context.initial_stack_value` and writes it
 /// to the global stack pointer variable
-fn restoreStackPointer(self: *Self) !void {
+fn restoreStackPointer(func: *CodeGen) !void {
     // only restore the pointer if it was initialized
-    if (self.initial_stack_value == .none) return;
+    if (func.initial_stack_value == .none) return;
     // Get the original stack pointer's value
-    try self.emitWValue(self.initial_stack_value);
+    try func.emitWValue(func.initial_stack_value);
 
     // save its value in the global stack pointer
-    try self.addLabel(.global_set, 0);
+    try func.addLabel(.global_set, 0);
 }
 
 /// From a given type, will create space on the virtual stack to store the value of such type.
@@ -1394,26 +1392,26 @@ fn restoreStackPointer(self: *Self) !void {
 /// moveStack unless a local was already created to store the pointer.
 ///
 /// Asserts Type has codegenbits
-fn allocStack(self: *Self, ty: Type) !WValue {
+fn allocStack(func: *CodeGen, ty: Type) !WValue {
     assert(ty.hasRuntimeBitsIgnoreComptime());
-    if (self.initial_stack_value == .none) {
-        try self.initializeStack();
+    if (func.initial_stack_value == .none) {
+        try func.initializeStack();
     }
 
-    const abi_size = std.math.cast(u32, ty.abiSize(self.target)) orelse {
-        const module = self.bin_file.base.options.module.?;
-        return self.fail("Type {} with ABI size of {d} exceeds stack frame size", .{
-            ty.fmt(module), ty.abiSize(self.target),
+    const abi_size = std.math.cast(u32, ty.abiSize(func.target)) orelse {
+        const module = func.bin_file.base.options.module.?;
+        return func.fail("Type {} with ABI size of {d} exceeds stack frame size", .{
+            ty.fmt(module), ty.abiSize(func.target),
         });
     };
-    const abi_align = ty.abiAlignment(self.target);
+    const abi_align = ty.abiAlignment(func.target);
 
-    if (abi_align > self.stack_alignment) {
-        self.stack_alignment = abi_align;
+    if (abi_align > func.stack_alignment) {
+        func.stack_alignment = abi_align;
     }
 
-    const offset = std.mem.alignForwardGeneric(u32, self.stack_size, abi_align);
-    defer self.stack_size = offset + abi_size;
+    const offset = std.mem.alignForwardGeneric(u32, func.stack_size, abi_align);
+    defer func.stack_size = offset + abi_size;
 
     return WValue{ .stack_offset = .{ .value = offset, .references = 1 } };
 }
@@ -1422,31 +1420,31 @@ fn allocStack(self: *Self, ty: Type) !WValue {
 /// the value of its type will live.
 /// This is different from allocStack where this will use the pointer's alignment
 /// if it is set, to ensure the stack alignment will be set correctly.
-fn allocStackPtr(self: *Self, inst: Air.Inst.Index) !WValue {
-    const ptr_ty = self.air.typeOfIndex(inst);
+fn allocStackPtr(func: *CodeGen, inst: Air.Inst.Index) !WValue {
+    const ptr_ty = func.air.typeOfIndex(inst);
     const pointee_ty = ptr_ty.childType();
 
-    if (self.initial_stack_value == .none) {
-        try self.initializeStack();
+    if (func.initial_stack_value == .none) {
+        try func.initializeStack();
     }
 
     if (!pointee_ty.hasRuntimeBitsIgnoreComptime()) {
-        return self.allocStack(Type.usize); // create a value containing just the stack pointer.
+        return func.allocStack(Type.usize); // create a value containing just the stack pointer.
     }
 
-    const abi_alignment = ptr_ty.ptrAlignment(self.target);
-    const abi_size = std.math.cast(u32, pointee_ty.abiSize(self.target)) orelse {
-        const module = self.bin_file.base.options.module.?;
-        return self.fail("Type {} with ABI size of {d} exceeds stack frame size", .{
-            pointee_ty.fmt(module), pointee_ty.abiSize(self.target),
+    const abi_alignment = ptr_ty.ptrAlignment(func.target);
+    const abi_size = std.math.cast(u32, pointee_ty.abiSize(func.target)) orelse {
+        const module = func.bin_file.base.options.module.?;
+        return func.fail("Type {} with ABI size of {d} exceeds stack frame size", .{
+            pointee_ty.fmt(module), pointee_ty.abiSize(func.target),
         });
     };
-    if (abi_alignment > self.stack_alignment) {
-        self.stack_alignment = abi_alignment;
+    if (abi_alignment > func.stack_alignment) {
+        func.stack_alignment = abi_alignment;
     }
 
-    const offset = std.mem.alignForwardGeneric(u32, self.stack_size, abi_alignment);
-    defer self.stack_size = offset + abi_size;
+    const offset = std.mem.alignForwardGeneric(u32, func.stack_size, abi_alignment);
+    defer func.stack_size = offset + abi_size;
 
     return WValue{ .stack_offset = .{ .value = offset, .references = 1 } };
 }
@@ -1460,14 +1458,14 @@ fn toWasmBits(bits: u16) ?u16 {
 
 /// Performs a copy of bytes for a given type. Copying all bytes
 /// from rhs to lhs.
-fn memcpy(self: *Self, dst: WValue, src: WValue, len: WValue) !void {
+fn memcpy(func: *CodeGen, dst: WValue, src: WValue, len: WValue) !void {
     // When bulk_memory is enabled, we lower it to wasm's memcpy instruction.
     // If not, we lower it ourselves manually
-    if (std.Target.wasm.featureSetHas(self.target.cpu.features, .bulk_memory)) {
-        try self.lowerToStack(dst);
-        try self.lowerToStack(src);
-        try self.emitWValue(len);
-        try self.addExtended(.memory_copy);
+    if (std.Target.wasm.featureSetHas(func.target.cpu.features, .bulk_memory)) {
+        try func.lowerToStack(dst);
+        try func.lowerToStack(src);
+        try func.emitWValue(len);
+        try func.addExtended(.memory_copy);
         return;
     }
 
@@ -1485,17 +1483,17 @@ fn memcpy(self: *Self, dst: WValue, src: WValue, len: WValue) !void {
             const rhs_base = src.offset();
             while (offset < length) : (offset += 1) {
                 // get dst's address to store the result
-                try self.emitWValue(dst);
+                try func.emitWValue(dst);
                 // load byte from src's address
-                try self.emitWValue(src);
-                switch (self.arch()) {
+                try func.emitWValue(src);
+                switch (func.arch()) {
                     .wasm32 => {
-                        try self.addMemArg(.i32_load8_u, .{ .offset = rhs_base + offset, .alignment = 1 });
-                        try self.addMemArg(.i32_store8, .{ .offset = lhs_base + offset, .alignment = 1 });
+                        try func.addMemArg(.i32_load8_u, .{ .offset = rhs_base + offset, .alignment = 1 });
+                        try func.addMemArg(.i32_store8, .{ .offset = lhs_base + offset, .alignment = 1 });
                     },
                     .wasm64 => {
-                        try self.addMemArg(.i64_load8_u, .{ .offset = rhs_base + offset, .alignment = 1 });
-                        try self.addMemArg(.i64_store8, .{ .offset = lhs_base + offset, .alignment = 1 });
+                        try func.addMemArg(.i64_load8_u, .{ .offset = rhs_base + offset, .alignment = 1 });
+                        try func.addMemArg(.i64_store8, .{ .offset = lhs_base + offset, .alignment = 1 });
                     },
                     else => unreachable,
                 }
@@ -1504,50 +1502,50 @@ fn memcpy(self: *Self, dst: WValue, src: WValue, len: WValue) !void {
         else => {
             // TODO: We should probably lower this to a call to compiler_rt
             // But for now, we implement it manually
-            var offset = try self.ensureAllocLocal(Type.usize); // local for counter
-            defer offset.free(self);
+            var offset = try func.ensureAllocLocal(Type.usize); // local for counter
+            defer offset.free(func);
 
             // outer block to jump to when loop is done
-            try self.startBlock(.block, wasm.block_empty);
-            try self.startBlock(.loop, wasm.block_empty);
+            try func.startBlock(.block, wasm.block_empty);
+            try func.startBlock(.loop, wasm.block_empty);
 
             // loop condition (offset == length -> break)
             {
-                try self.emitWValue(offset);
-                try self.emitWValue(len);
-                switch (self.arch()) {
-                    .wasm32 => try self.addTag(.i32_eq),
-                    .wasm64 => try self.addTag(.i64_eq),
+                try func.emitWValue(offset);
+                try func.emitWValue(len);
+                switch (func.arch()) {
+                    .wasm32 => try func.addTag(.i32_eq),
+                    .wasm64 => try func.addTag(.i64_eq),
                     else => unreachable,
                 }
-                try self.addLabel(.br_if, 1); // jump out of loop into outer block (finished)
+                try func.addLabel(.br_if, 1); // jump out of loop into outer block (finished)
             }
 
             // get dst ptr
             {
-                try self.emitWValue(dst);
-                try self.emitWValue(offset);
-                switch (self.arch()) {
-                    .wasm32 => try self.addTag(.i32_add),
-                    .wasm64 => try self.addTag(.i64_add),
+                try func.emitWValue(dst);
+                try func.emitWValue(offset);
+                switch (func.arch()) {
+                    .wasm32 => try func.addTag(.i32_add),
+                    .wasm64 => try func.addTag(.i64_add),
                     else => unreachable,
                 }
             }
 
             // get src value and also store in dst
             {
-                try self.emitWValue(src);
-                try self.emitWValue(offset);
-                switch (self.arch()) {
+                try func.emitWValue(src);
+                try func.emitWValue(offset);
+                switch (func.arch()) {
                     .wasm32 => {
-                        try self.addTag(.i32_add);
-                        try self.addMemArg(.i32_load8_u, .{ .offset = src.offset(), .alignment = 1 });
-                        try self.addMemArg(.i32_store8, .{ .offset = dst.offset(), .alignment = 1 });
+                        try func.addTag(.i32_add);
+                        try func.addMemArg(.i32_load8_u, .{ .offset = src.offset(), .alignment = 1 });
+                        try func.addMemArg(.i32_store8, .{ .offset = dst.offset(), .alignment = 1 });
                     },
                     .wasm64 => {
-                        try self.addTag(.i64_add);
-                        try self.addMemArg(.i64_load8_u, .{ .offset = src.offset(), .alignment = 1 });
-                        try self.addMemArg(.i64_store8, .{ .offset = dst.offset(), .alignment = 1 });
+                        try func.addTag(.i64_add);
+                        try func.addMemArg(.i64_load8_u, .{ .offset = src.offset(), .alignment = 1 });
+                        try func.addMemArg(.i64_store8, .{ .offset = dst.offset(), .alignment = 1 });
                     },
                     else => unreachable,
                 }
@@ -1555,33 +1553,33 @@ fn memcpy(self: *Self, dst: WValue, src: WValue, len: WValue) !void {
 
             // increment loop counter
             {
-                try self.emitWValue(offset);
-                switch (self.arch()) {
+                try func.emitWValue(offset);
+                switch (func.arch()) {
                     .wasm32 => {
-                        try self.addImm32(1);
-                        try self.addTag(.i32_add);
+                        try func.addImm32(1);
+                        try func.addTag(.i32_add);
                     },
                     .wasm64 => {
-                        try self.addImm64(1);
-                        try self.addTag(.i64_add);
+                        try func.addImm64(1);
+                        try func.addTag(.i64_add);
                     },
                     else => unreachable,
                 }
-                try self.addLabel(.local_set, offset.local.value);
-                try self.addLabel(.br, 0); // jump to start of loop
+                try func.addLabel(.local_set, offset.local.value);
+                try func.addLabel(.br, 0); // jump to start of loop
             }
-            try self.endBlock(); // close off loop block
-            try self.endBlock(); // close off outer block
+            try func.endBlock(); // close off loop block
+            try func.endBlock(); // close off outer block
         },
     }
 }
 
-fn ptrSize(self: *const Self) u16 {
-    return @divExact(self.target.cpu.arch.ptrBitWidth(), 8);
+fn ptrSize(func: *const CodeGen) u16 {
+    return @divExact(func.target.cpu.arch.ptrBitWidth(), 8);
 }
 
-fn arch(self: *const Self) std.Target.Cpu.Arch {
-    return self.target.cpu.arch;
+fn arch(func: *const CodeGen) std.Target.Cpu.Arch {
+    return func.target.cpu.arch;
 }
 
 /// For a given `Type`, will return true when the type will be passed
@@ -1639,191 +1637,191 @@ fn isByRef(ty: Type, target: std.Target) bool {
 /// This can be used to get a pointer to a struct field, error payload, etc.
 /// By providing `modify` as action, it will modify the given `ptr_value` instead of making a new
 /// local value to store the pointer. This allows for local re-use and improves binary size.
-fn buildPointerOffset(self: *Self, ptr_value: WValue, offset: u64, action: enum { modify, new }) InnerError!WValue {
+fn buildPointerOffset(func: *CodeGen, ptr_value: WValue, offset: u64, action: enum { modify, new }) InnerError!WValue {
     // do not perform arithmetic when offset is 0.
     if (offset == 0 and ptr_value.offset() == 0 and action == .modify) return ptr_value;
     const result_ptr: WValue = switch (action) {
-        .new => try self.ensureAllocLocal(Type.usize),
+        .new => try func.ensureAllocLocal(Type.usize),
         .modify => ptr_value,
     };
-    try self.emitWValue(ptr_value);
+    try func.emitWValue(ptr_value);
     if (offset + ptr_value.offset() > 0) {
-        switch (self.arch()) {
+        switch (func.arch()) {
             .wasm32 => {
-                try self.addImm32(@bitCast(i32, @intCast(u32, offset + ptr_value.offset())));
-                try self.addTag(.i32_add);
+                try func.addImm32(@bitCast(i32, @intCast(u32, offset + ptr_value.offset())));
+                try func.addTag(.i32_add);
             },
             .wasm64 => {
-                try self.addImm64(offset + ptr_value.offset());
-                try self.addTag(.i64_add);
+                try func.addImm64(offset + ptr_value.offset());
+                try func.addTag(.i64_add);
             },
             else => unreachable,
         }
     }
-    try self.addLabel(.local_set, result_ptr.local.value);
+    try func.addLabel(.local_set, result_ptr.local.value);
     return result_ptr;
 }
 
-fn genInst(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const air_tags = self.air.instructions.items(.tag);
+fn genInst(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const air_tags = func.air.instructions.items(.tag);
     return switch (air_tags[inst]) {
         .constant => unreachable,
         .const_ty => unreachable,
 
-        .add => self.airBinOp(inst, .add),
-        .add_sat => self.airSatBinOp(inst, .add),
-        .addwrap => self.airWrapBinOp(inst, .add),
-        .sub => self.airBinOp(inst, .sub),
-        .sub_sat => self.airSatBinOp(inst, .sub),
-        .subwrap => self.airWrapBinOp(inst, .sub),
-        .mul => self.airBinOp(inst, .mul),
-        .mulwrap => self.airWrapBinOp(inst, .mul),
+        .add => func.airBinOp(inst, .add),
+        .add_sat => func.airSatBinOp(inst, .add),
+        .addwrap => func.airWrapBinOp(inst, .add),
+        .sub => func.airBinOp(inst, .sub),
+        .sub_sat => func.airSatBinOp(inst, .sub),
+        .subwrap => func.airWrapBinOp(inst, .sub),
+        .mul => func.airBinOp(inst, .mul),
+        .mulwrap => func.airWrapBinOp(inst, .mul),
         .div_float,
         .div_exact,
         .div_trunc,
-        => self.airDiv(inst),
-        .div_floor => self.airDivFloor(inst),
-        .ceil => self.airCeilFloorTrunc(inst, .ceil),
-        .floor => self.airCeilFloorTrunc(inst, .floor),
-        .trunc_float => self.airCeilFloorTrunc(inst, .trunc),
-        .bit_and => self.airBinOp(inst, .@"and"),
-        .bit_or => self.airBinOp(inst, .@"or"),
-        .bool_and => self.airBinOp(inst, .@"and"),
-        .bool_or => self.airBinOp(inst, .@"or"),
-        .rem => self.airBinOp(inst, .rem),
-        .shl => self.airWrapBinOp(inst, .shl),
-        .shl_exact => self.airBinOp(inst, .shl),
-        .shl_sat => self.airShlSat(inst),
-        .shr, .shr_exact => self.airBinOp(inst, .shr),
-        .xor => self.airBinOp(inst, .xor),
-        .max => self.airMaxMin(inst, .max),
-        .min => self.airMaxMin(inst, .min),
-        .mul_add => self.airMulAdd(inst),
-
-        .add_with_overflow => self.airAddSubWithOverflow(inst, .add),
-        .sub_with_overflow => self.airAddSubWithOverflow(inst, .sub),
-        .shl_with_overflow => self.airShlWithOverflow(inst),
-        .mul_with_overflow => self.airMulWithOverflow(inst),
-
-        .clz => self.airClz(inst),
-        .ctz => self.airCtz(inst),
-
-        .cmp_eq => self.airCmp(inst, .eq),
-        .cmp_gte => self.airCmp(inst, .gte),
-        .cmp_gt => self.airCmp(inst, .gt),
-        .cmp_lte => self.airCmp(inst, .lte),
-        .cmp_lt => self.airCmp(inst, .lt),
-        .cmp_neq => self.airCmp(inst, .neq),
-
-        .cmp_vector => self.airCmpVector(inst),
-        .cmp_lt_errors_len => self.airCmpLtErrorsLen(inst),
-
-        .array_elem_val => self.airArrayElemVal(inst),
-        .array_to_slice => self.airArrayToSlice(inst),
-        .alloc => self.airAlloc(inst),
-        .arg => self.airArg(inst),
-        .bitcast => self.airBitcast(inst),
-        .block => self.airBlock(inst),
-        .breakpoint => self.airBreakpoint(inst),
-        .br => self.airBr(inst),
-        .bool_to_int => self.airBoolToInt(inst),
-        .cond_br => self.airCondBr(inst),
-        .intcast => self.airIntcast(inst),
-        .fptrunc => self.airFptrunc(inst),
-        .fpext => self.airFpext(inst),
-        .float_to_int => self.airFloatToInt(inst),
-        .int_to_float => self.airIntToFloat(inst),
-        .get_union_tag => self.airGetUnionTag(inst),
-
-        .@"try" => self.airTry(inst),
-        .try_ptr => self.airTryPtr(inst),
+        => func.airDiv(inst),
+        .div_floor => func.airDivFloor(inst),
+        .ceil => func.airCeilFloorTrunc(inst, .ceil),
+        .floor => func.airCeilFloorTrunc(inst, .floor),
+        .trunc_float => func.airCeilFloorTrunc(inst, .trunc),
+        .bit_and => func.airBinOp(inst, .@"and"),
+        .bit_or => func.airBinOp(inst, .@"or"),
+        .bool_and => func.airBinOp(inst, .@"and"),
+        .bool_or => func.airBinOp(inst, .@"or"),
+        .rem => func.airBinOp(inst, .rem),
+        .shl => func.airWrapBinOp(inst, .shl),
+        .shl_exact => func.airBinOp(inst, .shl),
+        .shl_sat => func.airShlSat(inst),
+        .shr, .shr_exact => func.airBinOp(inst, .shr),
+        .xor => func.airBinOp(inst, .xor),
+        .max => func.airMaxMin(inst, .max),
+        .min => func.airMaxMin(inst, .min),
+        .mul_add => func.airMulAdd(inst),
+
+        .add_with_overflow => func.airAddSubWithOverflow(inst, .add),
+        .sub_with_overflow => func.airAddSubWithOverflow(inst, .sub),
+        .shl_with_overflow => func.airShlWithOverflow(inst),
+        .mul_with_overflow => func.airMulWithOverflow(inst),
+
+        .clz => func.airClz(inst),
+        .ctz => func.airCtz(inst),
+
+        .cmp_eq => func.airCmp(inst, .eq),
+        .cmp_gte => func.airCmp(inst, .gte),
+        .cmp_gt => func.airCmp(inst, .gt),
+        .cmp_lte => func.airCmp(inst, .lte),
+        .cmp_lt => func.airCmp(inst, .lt),
+        .cmp_neq => func.airCmp(inst, .neq),
+
+        .cmp_vector => func.airCmpVector(inst),
+        .cmp_lt_errors_len => func.airCmpLtErrorsLen(inst),
+
+        .array_elem_val => func.airArrayElemVal(inst),
+        .array_to_slice => func.airArrayToSlice(inst),
+        .alloc => func.airAlloc(inst),
+        .arg => func.airArg(inst),
+        .bitcast => func.airBitcast(inst),
+        .block => func.airBlock(inst),
+        .breakpoint => func.airBreakpoint(inst),
+        .br => func.airBr(inst),
+        .bool_to_int => func.airBoolToInt(inst),
+        .cond_br => func.airCondBr(inst),
+        .intcast => func.airIntcast(inst),
+        .fptrunc => func.airFptrunc(inst),
+        .fpext => func.airFpext(inst),
+        .float_to_int => func.airFloatToInt(inst),
+        .int_to_float => func.airIntToFloat(inst),
+        .get_union_tag => func.airGetUnionTag(inst),
+
+        .@"try" => func.airTry(inst),
+        .try_ptr => func.airTryPtr(inst),
 
         // TODO
         .dbg_inline_begin,
         .dbg_inline_end,
         .dbg_block_begin,
         .dbg_block_end,
-        => self.finishAir(inst, .none, &.{}),
-
-        .dbg_var_ptr => self.airDbgVar(inst, true),
-        .dbg_var_val => self.airDbgVar(inst, false),
-
-        .dbg_stmt => self.airDbgStmt(inst),
-
-        .call => self.airCall(inst, .auto),
-        .call_always_tail => self.airCall(inst, .always_tail),
-        .call_never_tail => self.airCall(inst, .never_tail),
-        .call_never_inline => self.airCall(inst, .never_inline),
-
-        .is_err => self.airIsErr(inst, .i32_ne),
-        .is_non_err => self.airIsErr(inst, .i32_eq),
-
-        .is_null => self.airIsNull(inst, .i32_eq, .value),
-        .is_non_null => self.airIsNull(inst, .i32_ne, .value),
-        .is_null_ptr => self.airIsNull(inst, .i32_eq, .ptr),
-        .is_non_null_ptr => self.airIsNull(inst, .i32_ne, .ptr),
-
-        .load => self.airLoad(inst),
-        .loop => self.airLoop(inst),
-        .memset => self.airMemset(inst),
-        .not => self.airNot(inst),
-        .optional_payload => self.airOptionalPayload(inst),
-        .optional_payload_ptr => self.airOptionalPayloadPtr(inst),
-        .optional_payload_ptr_set => self.airOptionalPayloadPtrSet(inst),
-        .ptr_add => self.airPtrBinOp(inst, .add),
-        .ptr_sub => self.airPtrBinOp(inst, .sub),
-        .ptr_elem_ptr => self.airPtrElemPtr(inst),
-        .ptr_elem_val => self.airPtrElemVal(inst),
-        .ptrtoint => self.airPtrToInt(inst),
-        .ret => self.airRet(inst),
-        .ret_ptr => self.airRetPtr(inst),
-        .ret_load => self.airRetLoad(inst),
-        .splat => self.airSplat(inst),
-        .select => self.airSelect(inst),
-        .shuffle => self.airShuffle(inst),
-        .reduce => self.airReduce(inst),
-        .aggregate_init => self.airAggregateInit(inst),
-        .union_init => self.airUnionInit(inst),
-        .prefetch => self.airPrefetch(inst),
-        .popcount => self.airPopcount(inst),
-        .byte_swap => self.airByteSwap(inst),
-
-        .slice => self.airSlice(inst),
-        .slice_len => self.airSliceLen(inst),
-        .slice_elem_val => self.airSliceElemVal(inst),
-        .slice_elem_ptr => self.airSliceElemPtr(inst),
-        .slice_ptr => self.airSlicePtr(inst),
-        .ptr_slice_len_ptr => self.airPtrSliceFieldPtr(inst, self.ptrSize()),
-        .ptr_slice_ptr_ptr => self.airPtrSliceFieldPtr(inst, 0),
-        .store => self.airStore(inst),
-
-        .set_union_tag => self.airSetUnionTag(inst),
-        .struct_field_ptr => self.airStructFieldPtr(inst),
-        .struct_field_ptr_index_0 => self.airStructFieldPtrIndex(inst, 0),
-        .struct_field_ptr_index_1 => self.airStructFieldPtrIndex(inst, 1),
-        .struct_field_ptr_index_2 => self.airStructFieldPtrIndex(inst, 2),
-        .struct_field_ptr_index_3 => self.airStructFieldPtrIndex(inst, 3),
-        .struct_field_val => self.airStructFieldVal(inst),
-        .field_parent_ptr => self.airFieldParentPtr(inst),
-
-        .switch_br => self.airSwitchBr(inst),
-        .trunc => self.airTrunc(inst),
-        .unreach => self.airUnreachable(inst),
-
-        .wrap_optional => self.airWrapOptional(inst),
-        .unwrap_errunion_payload => self.airUnwrapErrUnionPayload(inst, false),
-        .unwrap_errunion_payload_ptr => self.airUnwrapErrUnionPayload(inst, true),
-        .unwrap_errunion_err => self.airUnwrapErrUnionError(inst, false),
-        .unwrap_errunion_err_ptr => self.airUnwrapErrUnionError(inst, true),
-        .wrap_errunion_payload => self.airWrapErrUnionPayload(inst),
-        .wrap_errunion_err => self.airWrapErrUnionErr(inst),
-        .errunion_payload_ptr_set => self.airErrUnionPayloadPtrSet(inst),
-        .error_name => self.airErrorName(inst),
-
-        .wasm_memory_size => self.airWasmMemorySize(inst),
-        .wasm_memory_grow => self.airWasmMemoryGrow(inst),
-
-        .memcpy => self.airMemcpy(inst),
+        => func.finishAir(inst, .none, &.{}),
+
+        .dbg_var_ptr => func.airDbgVar(inst, true),
+        .dbg_var_val => func.airDbgVar(inst, false),
+
+        .dbg_stmt => func.airDbgStmt(inst),
+
+        .call => func.airCall(inst, .auto),
+        .call_always_tail => func.airCall(inst, .always_tail),
+        .call_never_tail => func.airCall(inst, .never_tail),
+        .call_never_inline => func.airCall(inst, .never_inline),
+
+        .is_err => func.airIsErr(inst, .i32_ne),
+        .is_non_err => func.airIsErr(inst, .i32_eq),
+
+        .is_null => func.airIsNull(inst, .i32_eq, .value),
+        .is_non_null => func.airIsNull(inst, .i32_ne, .value),
+        .is_null_ptr => func.airIsNull(inst, .i32_eq, .ptr),
+        .is_non_null_ptr => func.airIsNull(inst, .i32_ne, .ptr),
+
+        .load => func.airLoad(inst),
+        .loop => func.airLoop(inst),
+        .memset => func.airMemset(inst),
+        .not => func.airNot(inst),
+        .optional_payload => func.airOptionalPayload(inst),
+        .optional_payload_ptr => func.airOptionalPayloadPtr(inst),
+        .optional_payload_ptr_set => func.airOptionalPayloadPtrSet(inst),
+        .ptr_add => func.airPtrBinOp(inst, .add),
+        .ptr_sub => func.airPtrBinOp(inst, .sub),
+        .ptr_elem_ptr => func.airPtrElemPtr(inst),
+        .ptr_elem_val => func.airPtrElemVal(inst),
+        .ptrtoint => func.airPtrToInt(inst),
+        .ret => func.airRet(inst),
+        .ret_ptr => func.airRetPtr(inst),
+        .ret_load => func.airRetLoad(inst),
+        .splat => func.airSplat(inst),
+        .select => func.airSelect(inst),
+        .shuffle => func.airShuffle(inst),
+        .reduce => func.airReduce(inst),
+        .aggregate_init => func.airAggregateInit(inst),
+        .union_init => func.airUnionInit(inst),
+        .prefetch => func.airPrefetch(inst),
+        .popcount => func.airPopcount(inst),
+        .byte_swap => func.airByteSwap(inst),
+
+        .slice => func.airSlice(inst),
+        .slice_len => func.airSliceLen(inst),
+        .slice_elem_val => func.airSliceElemVal(inst),
+        .slice_elem_ptr => func.airSliceElemPtr(inst),
+        .slice_ptr => func.airSlicePtr(inst),
+        .ptr_slice_len_ptr => func.airPtrSliceFieldPtr(inst, func.ptrSize()),
+        .ptr_slice_ptr_ptr => func.airPtrSliceFieldPtr(inst, 0),
+        .store => func.airStore(inst),
+
+        .set_union_tag => func.airSetUnionTag(inst),
+        .struct_field_ptr => func.airStructFieldPtr(inst),
+        .struct_field_ptr_index_0 => func.airStructFieldPtrIndex(inst, 0),
+        .struct_field_ptr_index_1 => func.airStructFieldPtrIndex(inst, 1),
+        .struct_field_ptr_index_2 => func.airStructFieldPtrIndex(inst, 2),
+        .struct_field_ptr_index_3 => func.airStructFieldPtrIndex(inst, 3),
+        .struct_field_val => func.airStructFieldVal(inst),
+        .field_parent_ptr => func.airFieldParentPtr(inst),
+
+        .switch_br => func.airSwitchBr(inst),
+        .trunc => func.airTrunc(inst),
+        .unreach => func.airUnreachable(inst),
+
+        .wrap_optional => func.airWrapOptional(inst),
+        .unwrap_errunion_payload => func.airUnwrapErrUnionPayload(inst, false),
+        .unwrap_errunion_payload_ptr => func.airUnwrapErrUnionPayload(inst, true),
+        .unwrap_errunion_err => func.airUnwrapErrUnionError(inst, false),
+        .unwrap_errunion_err_ptr => func.airUnwrapErrUnionError(inst, true),
+        .wrap_errunion_payload => func.airWrapErrUnionPayload(inst),
+        .wrap_errunion_err => func.airWrapErrUnionErr(inst),
+        .errunion_payload_ptr_set => func.airErrUnionPayloadPtrSet(inst),
+        .error_name => func.airErrorName(inst),
+
+        .wasm_memory_size => func.airWasmMemorySize(inst),
+        .wasm_memory_grow => func.airWasmMemoryGrow(inst),
+
+        .memcpy => func.airMemcpy(inst),
 
         .mul_sat,
         .mod,
@@ -1862,7 +1860,7 @@ fn genInst(self: *Self, inst: Air.Inst.Index) InnerError!void {
         .is_named_enum_value,
         .error_set_has_value,
         .addrspace_cast,
-        => |tag| return self.fail("TODO: Implement wasm inst: {s}", .{@tagName(tag)}),
+        => |tag| return func.fail("TODO: Implement wasm inst: {s}", .{@tagName(tag)}),
 
         .add_optimized,
         .addwrap_optimized,
@@ -1886,116 +1884,116 @@ fn genInst(self: *Self, inst: Air.Inst.Index) InnerError!void {
         .cmp_vector_optimized,
         .reduce_optimized,
         .float_to_int_optimized,
-        => return self.fail("TODO implement optimized float mode", .{}),
+        => return func.fail("TODO implement optimized float mode", .{}),
     };
 }
 
-fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
+fn genBody(func: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
     for (body) |inst| {
-        const old_bookkeeping_value = self.air_bookkeeping;
+        const old_bookkeeping_value = func.air_bookkeeping;
         // TODO: Determine why we need to pre-allocate an extra 4 possible values here.
-        try self.currentBranch().values.ensureUnusedCapacity(self.gpa, Liveness.bpi + 4);
-        try self.genInst(inst);
+        try func.currentBranch().values.ensureUnusedCapacity(func.gpa, Liveness.bpi + 4);
+        try func.genInst(inst);
 
-        if (builtin.mode == .Debug and self.air_bookkeeping < old_bookkeeping_value + 1) {
+        if (builtin.mode == .Debug and func.air_bookkeeping < old_bookkeeping_value + 1) {
             std.debug.panic("Missing call to `finishAir` in AIR instruction %{d} ('{}')", .{
                 inst,
-                self.air.instructions.items(.tag)[inst],
+                func.air.instructions.items(.tag)[inst],
             });
         }
     }
 }
 
-fn airRet(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const un_op = self.air.instructions.items(.data)[inst].un_op;
-    const operand = try self.resolveInst(un_op);
-    const fn_info = self.decl.ty.fnInfo();
+fn airRet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const un_op = func.air.instructions.items(.data)[inst].un_op;
+    const operand = try func.resolveInst(un_op);
+    const fn_info = func.decl.ty.fnInfo();
     const ret_ty = fn_info.return_type;
 
     // result must be stored in the stack and we return a pointer
     // to the stack instead
-    if (self.return_value != .none) {
-        try self.store(self.return_value, operand, ret_ty, 0);
+    if (func.return_value != .none) {
+        try func.store(func.return_value, operand, ret_ty, 0);
     } else if (fn_info.cc == .C and ret_ty.hasRuntimeBitsIgnoreComptime()) {
         switch (ret_ty.zigTypeTag()) {
             // Aggregate types can be lowered as a singular value
             .Struct, .Union => {
-                const scalar_type = abi.scalarType(ret_ty, self.target);
-                try self.emitWValue(operand);
+                const scalar_type = abi.scalarType(ret_ty, func.target);
+                try func.emitWValue(operand);
                 const opcode = buildOpcode(.{
                     .op = .load,
-                    .width = @intCast(u8, scalar_type.abiSize(self.target) * 8),
+                    .width = @intCast(u8, scalar_type.abiSize(func.target) * 8),
                     .signedness = if (scalar_type.isSignedInt()) .signed else .unsigned,
-                    .valtype1 = typeToValtype(scalar_type, self.target),
+                    .valtype1 = typeToValtype(scalar_type, func.target),
                 });
-                try self.addMemArg(Mir.Inst.Tag.fromOpcode(opcode), .{
+                try func.addMemArg(Mir.Inst.Tag.fromOpcode(opcode), .{
                     .offset = operand.offset(),
-                    .alignment = scalar_type.abiAlignment(self.target),
+                    .alignment = scalar_type.abiAlignment(func.target),
                 });
             },
-            else => try self.emitWValue(operand),
+            else => try func.emitWValue(operand),
         }
     } else {
         if (!ret_ty.hasRuntimeBitsIgnoreComptime() and ret_ty.isError()) {
-            try self.addImm32(0);
+            try func.addImm32(0);
         } else {
-            try self.emitWValue(operand);
+            try func.emitWValue(operand);
         }
     }
-    try self.restoreStackPointer();
-    try self.addTag(.@"return");
+    try func.restoreStackPointer();
+    try func.addTag(.@"return");
 
-    self.finishAir(inst, .none, &.{un_op});
+    func.finishAir(inst, .none, &.{un_op});
 }
 
-fn airRetPtr(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const child_type = self.air.typeOfIndex(inst).childType();
+fn airRetPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const child_type = func.air.typeOfIndex(inst).childType();
 
     var result = result: {
         if (!child_type.isFnOrHasRuntimeBitsIgnoreComptime()) {
-            break :result try self.allocStack(Type.usize); // create pointer to void
+            break :result try func.allocStack(Type.usize); // create pointer to void
         }
 
-        const fn_info = self.decl.ty.fnInfo();
-        if (firstParamSRet(fn_info.cc, fn_info.return_type, self.target)) {
-            break :result self.return_value;
+        const fn_info = func.decl.ty.fnInfo();
+        if (firstParamSRet(fn_info.cc, fn_info.return_type, func.target)) {
+            break :result func.return_value;
         }
 
-        break :result try self.allocStackPtr(inst);
+        break :result try func.allocStackPtr(inst);
     };
 
-    self.finishAir(inst, result, &.{});
+    func.finishAir(inst, result, &.{});
 }
 
-fn airRetLoad(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const un_op = self.air.instructions.items(.data)[inst].un_op;
-    const operand = try self.resolveInst(un_op);
-    const ret_ty = self.air.typeOf(un_op).childType();
+fn airRetLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const un_op = func.air.instructions.items(.data)[inst].un_op;
+    const operand = try func.resolveInst(un_op);
+    const ret_ty = func.air.typeOf(un_op).childType();
     if (!ret_ty.hasRuntimeBitsIgnoreComptime()) {
         if (ret_ty.isError()) {
-            try self.addImm32(0);
+            try func.addImm32(0);
         } else {
-            return self.finishAir(inst, .none, &.{});
+            return func.finishAir(inst, .none, &.{});
         }
     }
 
-    const fn_info = self.decl.ty.fnInfo();
-    if (!firstParamSRet(fn_info.cc, fn_info.return_type, self.target)) {
+    const fn_info = func.decl.ty.fnInfo();
+    if (!firstParamSRet(fn_info.cc, fn_info.return_type, func.target)) {
         // leave on the stack
-        _ = try self.load(operand, ret_ty, 0);
+        _ = try func.load(operand, ret_ty, 0);
     }
 
-    try self.restoreStackPointer();
-    try self.addTag(.@"return");
-    return self.finishAir(inst, .none, &.{});
+    try func.restoreStackPointer();
+    try func.addTag(.@"return");
+    return func.finishAir(inst, .none, &.{});
 }
 
-fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.Modifier) InnerError!void {
-    if (modifier == .always_tail) return self.fail("TODO implement tail calls for wasm", .{});
-    const pl_op = self.air.instructions.items(.data)[inst].pl_op;
-    const extra = self.air.extraData(Air.Call, pl_op.payload);
-    const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]);
-    const ty = self.air.typeOf(pl_op.operand);
+fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.Modifier) InnerError!void {
+    if (modifier == .always_tail) return func.fail("TODO implement tail calls for wasm", .{});
+    const pl_op = func.air.instructions.items(.data)[inst].pl_op;
+    const extra = func.air.extraData(Air.Call, pl_op.payload);
+    const args = @ptrCast([]const Air.Inst.Ref, func.air.extra[extra.end..][0..extra.data.args_len]);
+    const ty = func.air.typeOf(pl_op.operand);
 
     const fn_ty = switch (ty.zigTypeTag()) {
         .Fn => ty,
@@ -2004,21 +2002,21 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
     };
     const ret_ty = fn_ty.fnReturnType();
     const fn_info = fn_ty.fnInfo();
-    const first_param_sret = firstParamSRet(fn_info.cc, fn_info.return_type, self.target);
+    const first_param_sret = firstParamSRet(fn_info.cc, fn_info.return_type, func.target);
 
     const callee: ?*Decl = blk: {
-        const func_val = self.air.value(pl_op.operand) orelse break :blk null;
-        const module = self.bin_file.base.options.module.?;
+        const func_val = func.air.value(pl_op.operand) orelse break :blk null;
+        const module = func.bin_file.base.options.module.?;
 
-        if (func_val.castTag(.function)) |func| {
-            break :blk module.declPtr(func.data.owner_decl);
+        if (func_val.castTag(.function)) |function| {
+            break :blk module.declPtr(function.data.owner_decl);
         } else if (func_val.castTag(.extern_fn)) |extern_fn| {
             const ext_decl = module.declPtr(extern_fn.data.owner_decl);
             const ext_info = ext_decl.ty.fnInfo();
-            var func_type = try genFunctype(self.gpa, ext_info.cc, ext_info.param_types, ext_info.return_type, self.target);
-            defer func_type.deinit(self.gpa);
-            ext_decl.fn_link.wasm.type_index = try self.bin_file.putOrGetFuncType(func_type);
-            try self.bin_file.addOrUpdateImport(
+            var func_type = try genFunctype(func.gpa, ext_info.cc, ext_info.param_types, ext_info.return_type, func.target);
+            defer func_type.deinit(func.gpa);
+            ext_decl.fn_link.wasm.type_index = try func.bin_file.putOrGetFuncType(func_type);
+            try func.bin_file.addOrUpdateImport(
                 mem.sliceTo(ext_decl.name, 0),
                 ext_decl.link.wasm.sym_index,
                 ext_decl.getExternFn().?.lib_name,
@@ -2028,151 +2026,151 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
         } else if (func_val.castTag(.decl_ref)) |decl_ref| {
             break :blk module.declPtr(decl_ref.data);
         }
-        return self.fail("Expected a function, but instead found type '{}'", .{func_val.tag()});
+        return func.fail("Expected a function, but instead found type '{}'", .{func_val.tag()});
     };
 
     const sret = if (first_param_sret) blk: {
-        const sret_local = try self.allocStack(ret_ty);
-        try self.lowerToStack(sret_local);
+        const sret_local = try func.allocStack(ret_ty);
+        try func.lowerToStack(sret_local);
         break :blk sret_local;
     } else WValue{ .none = {} };
 
     for (args) |arg| {
-        const arg_val = try self.resolveInst(arg);
+        const arg_val = try func.resolveInst(arg);
 
-        const arg_ty = self.air.typeOf(arg);
+        const arg_ty = func.air.typeOf(arg);
         if (!arg_ty.hasRuntimeBitsIgnoreComptime()) continue;
 
-        try self.lowerArg(fn_ty.fnInfo().cc, arg_ty, arg_val);
+        try func.lowerArg(fn_ty.fnInfo().cc, arg_ty, arg_val);
     }
 
     if (callee) |direct| {
-        try self.addLabel(.call, direct.link.wasm.sym_index);
+        try func.addLabel(.call, direct.link.wasm.sym_index);
     } else {
         // in this case we call a function pointer
         // so load its value onto the stack
         std.debug.assert(ty.zigTypeTag() == .Pointer);
-        const operand = try self.resolveInst(pl_op.operand);
-        try self.emitWValue(operand);
+        const operand = try func.resolveInst(pl_op.operand);
+        try func.emitWValue(operand);
 
-        var fn_type = try genFunctype(self.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, self.target);
-        defer fn_type.deinit(self.gpa);
+        var fn_type = try genFunctype(func.gpa, fn_info.cc, fn_info.param_types, fn_info.return_type, func.target);
+        defer fn_type.deinit(func.gpa);
 
-        const fn_type_index = try self.bin_file.putOrGetFuncType(fn_type);
-        try self.addLabel(.call_indirect, fn_type_index);
+        const fn_type_index = try func.bin_file.putOrGetFuncType(fn_type);
+        try func.addLabel(.call_indirect, fn_type_index);
     }
 
     const result_value = result_value: {
-        if (self.liveness.isUnused(inst) or (!ret_ty.hasRuntimeBitsIgnoreComptime() and !ret_ty.isError())) {
+        if (func.liveness.isUnused(inst) or (!ret_ty.hasRuntimeBitsIgnoreComptime() and !ret_ty.isError())) {
             break :result_value WValue{ .none = {} };
         } else if (ret_ty.isNoReturn()) {
-            try self.addTag(.@"unreachable");
+            try func.addTag(.@"unreachable");
             break :result_value WValue{ .none = {} };
         } else if (first_param_sret) {
             break :result_value sret;
             // TODO: Make this less fragile and optimize
         } else if (fn_ty.fnInfo().cc == .C and ret_ty.zigTypeTag() == .Struct or ret_ty.zigTypeTag() == .Union) {
-            const result_local = try self.allocLocal(ret_ty);
-            try self.addLabel(.local_set, result_local.local.value);
-            const scalar_type = abi.scalarType(ret_ty, self.target);
-            const result = try self.allocStack(scalar_type);
-            try self.store(result, result_local, scalar_type, 0);
+            const result_local = try func.allocLocal(ret_ty);
+            try func.addLabel(.local_set, result_local.local.value);
+            const scalar_type = abi.scalarType(ret_ty, func.target);
+            const result = try func.allocStack(scalar_type);
+            try func.store(result, result_local, scalar_type, 0);
             break :result_value result;
         } else {
-            const result_local = try self.allocLocal(ret_ty);
-            try self.addLabel(.local_set, result_local.local.value);
+            const result_local = try func.allocLocal(ret_ty);
+            try func.addLabel(.local_set, result_local.local.value);
             break :result_value result_local;
         }
     };
 
-    var bt = try self.iterateBigTomb(inst, 1 + args.len);
+    var bt = try func.iterateBigTomb(inst, 1 + args.len);
     bt.feed(pl_op.operand);
     for (args) |arg| bt.feed(arg);
     return bt.finishAir(result_value);
 }
 
-fn airAlloc(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const value = try self.allocStackPtr(inst);
-    self.finishAir(inst, value, &.{});
+fn airAlloc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const value = try func.allocStackPtr(inst);
+    func.finishAir(inst, value, &.{});
 }
 
-fn airStore(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+fn airStore(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const bin_op = func.air.instructions.items(.data)[inst].bin_op;
 
-    const lhs = try self.resolveInst(bin_op.lhs);
-    const rhs = try self.resolveInst(bin_op.rhs);
-    const ty = self.air.typeOf(bin_op.lhs).childType();
+    const lhs = try func.resolveInst(bin_op.lhs);
+    const rhs = try func.resolveInst(bin_op.rhs);
+    const ty = func.air.typeOf(bin_op.lhs).childType();
 
-    try self.store(lhs, rhs, ty, 0);
-    self.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
+    try func.store(lhs, rhs, ty, 0);
+    func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
 }
 
-fn store(self: *Self, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerError!void {
+fn store(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerError!void {
     assert(!(lhs != .stack and rhs == .stack));
     switch (ty.zigTypeTag()) {
         .ErrorUnion => {
             const pl_ty = ty.errorUnionPayload();
             if (!pl_ty.hasRuntimeBitsIgnoreComptime()) {
-                return self.store(lhs, rhs, Type.anyerror, 0);
+                return func.store(lhs, rhs, Type.anyerror, 0);
             }
 
-            const len = @intCast(u32, ty.abiSize(self.target));
-            return self.memcpy(lhs, rhs, .{ .imm32 = len });
+            const len = @intCast(u32, ty.abiSize(func.target));
+            return func.memcpy(lhs, rhs, .{ .imm32 = len });
         },
         .Optional => {
             if (ty.isPtrLikeOptional()) {
-                return self.store(lhs, rhs, Type.usize, 0);
+                return func.store(lhs, rhs, Type.usize, 0);
             }
             var buf: Type.Payload.ElemType = undefined;
             const pl_ty = ty.optionalChild(&buf);
             if (!pl_ty.hasRuntimeBitsIgnoreComptime()) {
-                return self.store(lhs, rhs, Type.u8, 0);
+                return func.store(lhs, rhs, Type.u8, 0);
             }
             if (pl_ty.zigTypeTag() == .ErrorSet) {
-                return self.store(lhs, rhs, Type.anyerror, 0);
+                return func.store(lhs, rhs, Type.anyerror, 0);
             }
 
-            const len = @intCast(u32, ty.abiSize(self.target));
-            return self.memcpy(lhs, rhs, .{ .imm32 = len });
+            const len = @intCast(u32, ty.abiSize(func.target));
+            return func.memcpy(lhs, rhs, .{ .imm32 = len });
         },
         .Struct, .Array, .Union, .Vector => {
-            const len = @intCast(u32, ty.abiSize(self.target));
-            return self.memcpy(lhs, rhs, .{ .imm32 = len });
+            const len = @intCast(u32, ty.abiSize(func.target));
+            return func.memcpy(lhs, rhs, .{ .imm32 = len });
         },
         .Pointer => {
             if (ty.isSlice()) {
                 // store pointer first
                 // lower it to the stack so we do not have to store rhs into a local first
-                try self.emitWValue(lhs);
-                const ptr_local = try self.load(rhs, Type.usize, 0);
-                try self.store(.{ .stack = {} }, ptr_local, Type.usize, 0 + lhs.offset());
+                try func.emitWValue(lhs);
+                const ptr_local = try func.load(rhs, Type.usize, 0);
+                try func.store(.{ .stack = {} }, ptr_local, Type.usize, 0 + lhs.offset());
 
                 // retrieve length from rhs, and store that alongside lhs as well
-                try self.emitWValue(lhs);
-                const len_local = try self.load(rhs, Type.usize, self.ptrSize());
-                try self.store(.{ .stack = {} }, len_local, Type.usize, self.ptrSize() + lhs.offset());
+                try func.emitWValue(lhs);
+                const len_local = try func.load(rhs, Type.usize, func.ptrSize());
+                try func.store(.{ .stack = {} }, len_local, Type.usize, func.ptrSize() + lhs.offset());
                 return;
             }
         },
-        .Int => if (ty.intInfo(self.target).bits > 64) {
-            try self.emitWValue(lhs);
-            const lsb = try self.load(rhs, Type.u64, 0);
-            try self.store(.{ .stack = {} }, lsb, Type.u64, 0 + lhs.offset());
-
-            try self.emitWValue(lhs);
-            const msb = try self.load(rhs, Type.u64, 8);
-            try self.store(.{ .stack = {} }, msb, Type.u64, 8 + lhs.offset());
+        .Int => if (ty.intInfo(func.target).bits > 64) {
+            try func.emitWValue(lhs);
+            const lsb = try func.load(rhs, Type.u64, 0);
+            try func.store(.{ .stack = {} }, lsb, Type.u64, 0 + lhs.offset());
+
+            try func.emitWValue(lhs);
+            const msb = try func.load(rhs, Type.u64, 8);
+            try func.store(.{ .stack = {} }, msb, Type.u64, 8 + lhs.offset());
             return;
         },
         else => {},
     }
-    try self.emitWValue(lhs);
+    try func.emitWValue(lhs);
     // In this case we're actually interested in storing the stack position
     // into lhs, so we calculate that and emit that instead
-    try self.lowerToStack(rhs);
+    try func.lowerToStack(rhs);
 
-    const valtype = typeToValtype(ty, self.target);
-    const abi_size = @intCast(u8, ty.abiSize(self.target));
+    const valtype = typeToValtype(ty, func.target);
+    const abi_size = @intCast(u8, ty.abiSize(func.target));
 
     const opcode = buildOpcode(.{
         .valtype1 = valtype,
@@ -2181,64 +2179,64 @@ fn store(self: *Self, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerErro
     });
 
     // store rhs value at stack pointer's location in memory
-    try self.addMemArg(
+    try func.addMemArg(
         Mir.Inst.Tag.fromOpcode(opcode),
-        .{ .offset = offset + lhs.offset(), .alignment = ty.abiAlignment(self.target) },
+        .{ .offset = offset + lhs.offset(), .alignment = ty.abiAlignment(func.target) },
     );
 }
 
-fn airLoad(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const ty_op = self.air.instructions.items(.data)[inst].ty_op;
-    const operand = try self.resolveInst(ty_op.operand);
-    const ty = self.air.getRefType(ty_op.ty);
+fn airLoad(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_op = func.air.instructions.items(.data)[inst].ty_op;
+    const operand = try func.resolveInst(ty_op.operand);
+    const ty = func.air.getRefType(ty_op.ty);
 
-    if (!ty.hasRuntimeBitsIgnoreComptime()) return self.finishAir(inst, .none, &.{ty_op.operand});
+    if (!ty.hasRuntimeBitsIgnoreComptime()) return func.finishAir(inst, .none, &.{ty_op.operand});
 
     const result = result: {
-        if (isByRef(ty, self.target)) {
-            const new_local = try self.allocStack(ty);
-            try self.store(new_local, operand, ty, 0);
+        if (isByRef(ty, func.target)) {
+            const new_local = try func.allocStack(ty);
+            try func.store(new_local, operand, ty, 0);
             break :result new_local;
         }
 
-        const stack_loaded = try self.load(operand, ty, 0);
-        break :result try stack_loaded.toLocal(self, ty);
+        const stack_loaded = try func.load(operand, ty, 0);
+        break :result try stack_loaded.toLocal(func, ty);
     };
-    self.finishAir(inst, result, &.{ty_op.operand});
+    func.finishAir(inst, result, &.{ty_op.operand});
 }
 
 /// Loads an operand from the linear memory section.
 /// NOTE: Leaves the value on the stack.
-fn load(self: *Self, operand: WValue, ty: Type, offset: u32) InnerError!WValue {
+fn load(func: *CodeGen, operand: WValue, ty: Type, offset: u32) InnerError!WValue {
     // load local's value from memory by its stack position
-    try self.emitWValue(operand);
+    try func.emitWValue(operand);
 
-    const abi_size = @intCast(u8, ty.abiSize(self.target));
+    const abi_size = @intCast(u8, ty.abiSize(func.target));
     const opcode = buildOpcode(.{
-        .valtype1 = typeToValtype(ty, self.target),
+        .valtype1 = typeToValtype(ty, func.target),
         .width = abi_size * 8,
         .op = .load,
         .signedness = .unsigned,
     });
 
-    try self.addMemArg(
+    try func.addMemArg(
         Mir.Inst.Tag.fromOpcode(opcode),
-        .{ .offset = offset + operand.offset(), .alignment = ty.abiAlignment(self.target) },
+        .{ .offset = offset + operand.offset(), .alignment = ty.abiAlignment(func.target) },
     );
 
     return WValue{ .stack = {} };
 }
 
-fn airArg(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const arg_index = self.arg_index;
-    const arg = self.args[arg_index];
-    const cc = self.decl.ty.fnInfo().cc;
-    const arg_ty = self.air.typeOfIndex(inst);
+fn airArg(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const arg_index = func.arg_index;
+    const arg = func.args[arg_index];
+    const cc = func.decl.ty.fnInfo().cc;
+    const arg_ty = func.air.typeOfIndex(inst);
     if (cc == .C) {
-        const arg_classes = abi.classifyType(arg_ty, self.target);
+        const arg_classes = abi.classifyType(arg_ty, func.target);
         for (arg_classes) |class| {
             if (class != .none) {
-                self.arg_index += 1;
+                func.arg_index += 1;
             }
         }
 
@@ -2246,24 +2244,24 @@ fn airArg(self: *Self, inst: Air.Inst.Index) InnerError!void {
         // we combine them into a single stack value
         if (arg_classes[0] == .direct and arg_classes[1] == .direct) {
             if (arg_ty.zigTypeTag() != .Int) {
-                return self.fail(
+                return func.fail(
                     "TODO: Implement C-ABI argument for type '{}'",
-                    .{arg_ty.fmt(self.bin_file.base.options.module.?)},
+                    .{arg_ty.fmt(func.bin_file.base.options.module.?)},
                 );
             }
-            const result = try self.allocStack(arg_ty);
-            try self.store(result, arg, Type.u64, 0);
-            try self.store(result, self.args[arg_index + 1], Type.u64, 8);
-            return self.finishAir(inst, arg, &.{});
+            const result = try func.allocStack(arg_ty);
+            try func.store(result, arg, Type.u64, 0);
+            try func.store(result, func.args[arg_index + 1], Type.u64, 8);
+            return func.finishAir(inst, arg, &.{});
         }
     } else {
-        self.arg_index += 1;
+        func.arg_index += 1;
     }
 
-    switch (self.debug_output) {
+    switch (func.debug_output) {
         .dwarf => |dwarf| {
             // TODO: Get the original arg index rather than wasm arg index
-            const name = self.mod_fn.getParamName(self.bin_file.base.options.module.?, arg_index);
+            const name = func.mod_fn.getParamName(func.bin_file.base.options.module.?, arg_index);
             const leb_size = link.File.Wasm.getULEB128Size(arg.local.value);
             const dbg_info = &dwarf.dbg_info;
             try dbg_info.ensureUnusedCapacity(3 + leb_size + 5 + name.len + 1);
@@ -2279,196 +2277,196 @@ fn airArg(self: *Self, inst: Air.Inst.Index) InnerError!void {
                 std.dwarf.OP.WASM_local,
             });
             leb.writeULEB128(dbg_info.writer(), arg.local.value) catch unreachable;
-            try self.addDbgInfoTypeReloc(arg_ty);
+            try func.addDbgInfoTypeReloc(arg_ty);
             dbg_info.appendSliceAssumeCapacity(name);
             dbg_info.appendAssumeCapacity(0);
         },
         else => {},
     }
 
-    self.finishAir(inst, arg, &.{});
+    func.finishAir(inst, arg, &.{});
 }
 
-fn airBinOp(self: *Self, inst: Air.Inst.Index, op: Op) InnerError!void {
-    const bin_op = self.air.instructions.items(.data)[inst].bin_op;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
-    const lhs = try self.resolveInst(bin_op.lhs);
-    const rhs = try self.resolveInst(bin_op.rhs);
-    const ty = self.air.typeOf(bin_op.lhs);
+fn airBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
+    const bin_op = func.air.instructions.items(.data)[inst].bin_op;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
+    const lhs = try func.resolveInst(bin_op.lhs);
+    const rhs = try func.resolveInst(bin_op.rhs);
+    const ty = func.air.typeOf(bin_op.lhs);
 
-    const stack_value = try self.binOp(lhs, rhs, ty, op);
-    self.finishAir(inst, try stack_value.toLocal(self, ty), &.{ bin_op.lhs, bin_op.rhs });
+    const stack_value = try func.binOp(lhs, rhs, ty, op);
+    func.finishAir(inst, try stack_value.toLocal(func, ty), &.{ bin_op.lhs, bin_op.rhs });
 }
 
 /// Performs a binary operation on the given `WValue`'s
 /// NOTE: THis leaves the value on top of the stack.
-fn binOp(self: *Self, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue {
+fn binOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue {
     assert(!(lhs != .stack and rhs == .stack));
-    if (isByRef(ty, self.target)) {
+    if (isByRef(ty, func.target)) {
         if (ty.zigTypeTag() == .Int) {
-            return self.binOpBigInt(lhs, rhs, ty, op);
+            return func.binOpBigInt(lhs, rhs, ty, op);
         } else {
-            return self.fail(
+            return func.fail(
                 "TODO: Implement binary operation for type: {}",
-                .{ty.fmt(self.bin_file.base.options.module.?)},
+                .{ty.fmt(func.bin_file.base.options.module.?)},
             );
         }
     }
 
-    if (ty.isAnyFloat() and ty.floatBits(self.target) == 16) {
-        return self.binOpFloat16(lhs, rhs, op);
+    if (ty.isAnyFloat() and ty.floatBits(func.target) == 16) {
+        return func.binOpFloat16(lhs, rhs, op);
     }
 
     const opcode: wasm.Opcode = buildOpcode(.{
         .op = op,
-        .valtype1 = typeToValtype(ty, self.target),
+        .valtype1 = typeToValtype(ty, func.target),
         .signedness = if (ty.isSignedInt()) .signed else .unsigned,
     });
-    try self.emitWValue(lhs);
-    try self.emitWValue(rhs);
+    try func.emitWValue(lhs);
+    try func.emitWValue(rhs);
 
-    try self.addTag(Mir.Inst.Tag.fromOpcode(opcode));
+    try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
 
     return WValue{ .stack = {} };
 }
 
 /// Performs a binary operation for 16-bit floats.
 /// NOTE: Leaves the result value on the stack
-fn binOpFloat16(self: *Self, lhs: WValue, rhs: WValue, op: Op) InnerError!WValue {
+fn binOpFloat16(func: *CodeGen, lhs: WValue, rhs: WValue, op: Op) InnerError!WValue {
     const opcode: wasm.Opcode = buildOpcode(.{ .op = op, .valtype1 = .f32, .signedness = .unsigned });
-    _ = try self.fpext(lhs, Type.f16, Type.f32);
-    _ = try self.fpext(rhs, Type.f16, Type.f32);
-    try self.addTag(Mir.Inst.Tag.fromOpcode(opcode));
+    _ = try func.fpext(lhs, Type.f16, Type.f32);
+    _ = try func.fpext(rhs, Type.f16, Type.f32);
+    try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
 
-    return self.fptrunc(.{ .stack = {} }, Type.f32, Type.f16);
+    return func.fptrunc(.{ .stack = {} }, Type.f32, Type.f16);
 }
 
-fn binOpBigInt(self: *Self, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue {
-    if (ty.intInfo(self.target).bits > 128) {
-        return self.fail("TODO: Implement binary operation for big integer", .{});
+fn binOpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue {
+    if (ty.intInfo(func.target).bits > 128) {
+        return func.fail("TODO: Implement binary operation for big integer", .{});
     }
 
     if (op != .add and op != .sub) {
-        return self.fail("TODO: Implement binary operation for big integers", .{});
+        return func.fail("TODO: Implement binary operation for big integers", .{});
     }
 
-    const result = try self.allocStack(ty);
-    var lhs_high_bit = try (try self.load(lhs, Type.u64, 0)).toLocal(self, Type.u64);
-    defer lhs_high_bit.free(self);
-    var rhs_high_bit = try (try self.load(rhs, Type.u64, 0)).toLocal(self, Type.u64);
-    defer rhs_high_bit.free(self);
-    var high_op_res = try (try self.binOp(lhs_high_bit, rhs_high_bit, Type.u64, op)).toLocal(self, Type.u64);
-    defer high_op_res.free(self);
+    const result = try func.allocStack(ty);
+    var lhs_high_bit = try (try func.load(lhs, Type.u64, 0)).toLocal(func, Type.u64);
+    defer lhs_high_bit.free(func);
+    var rhs_high_bit = try (try func.load(rhs, Type.u64, 0)).toLocal(func, Type.u64);
+    defer rhs_high_bit.free(func);
+    var high_op_res = try (try func.binOp(lhs_high_bit, rhs_high_bit, Type.u64, op)).toLocal(func, Type.u64);
+    defer high_op_res.free(func);
 
-    const lhs_low_bit = try self.load(lhs, Type.u64, 8);
-    const rhs_low_bit = try self.load(rhs, Type.u64, 8);
-    const low_op_res = try self.binOp(lhs_low_bit, rhs_low_bit, Type.u64, op);
+    const lhs_low_bit = try func.load(lhs, Type.u64, 8);
+    const rhs_low_bit = try func.load(rhs, Type.u64, 8);
+    const low_op_res = try func.binOp(lhs_low_bit, rhs_low_bit, Type.u64, op);
 
     const lt = if (op == .add) blk: {
-        break :blk try self.cmp(high_op_res, rhs_high_bit, Type.u64, .lt);
+        break :blk try func.cmp(high_op_res, rhs_high_bit, Type.u64, .lt);
     } else if (op == .sub) blk: {
-        break :blk try self.cmp(lhs_high_bit, rhs_high_bit, Type.u64, .lt);
+        break :blk try func.cmp(lhs_high_bit, rhs_high_bit, Type.u64, .lt);
     } else unreachable;
-    const tmp = try self.intcast(lt, Type.u32, Type.u64);
-    var tmp_op = try (try self.binOp(low_op_res, tmp, Type.u64, op)).toLocal(self, Type.u64);
-    defer tmp_op.free(self);
+    const tmp = try func.intcast(lt, Type.u32, Type.u64);
+    var tmp_op = try (try func.binOp(low_op_res, tmp, Type.u64, op)).toLocal(func, Type.u64);
+    defer tmp_op.free(func);
 
-    try self.store(result, high_op_res, Type.u64, 0);
-    try self.store(result, tmp_op, Type.u64, 8);
+    try func.store(result, high_op_res, Type.u64, 0);
+    try func.store(result, tmp_op, Type.u64, 8);
     return result;
 }
 
-fn airWrapBinOp(self: *Self, inst: Air.Inst.Index, op: Op) InnerError!void {
-    const bin_op = self.air.instructions.items(.data)[inst].bin_op;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
+fn airWrapBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
+    const bin_op = func.air.instructions.items(.data)[inst].bin_op;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
 
-    const lhs = try self.resolveInst(bin_op.lhs);
-    const rhs = try self.resolveInst(bin_op.rhs);
-    const ty = self.air.typeOf(bin_op.lhs);
+    const lhs = try func.resolveInst(bin_op.lhs);
+    const rhs = try func.resolveInst(bin_op.rhs);
+    const ty = func.air.typeOf(bin_op.lhs);
 
     if (ty.zigTypeTag() == .Vector) {
-        return self.fail("TODO: Implement wrapping arithmetic for vectors", .{});
+        return func.fail("TODO: Implement wrapping arithmetic for vectors", .{});
     }
 
-    const result = try (try self.wrapBinOp(lhs, rhs, ty, op)).toLocal(self, ty);
-    self.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
+    const result = try (try func.wrapBinOp(lhs, rhs, ty, op)).toLocal(func, ty);
+    func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
 }
 
 /// Performs a wrapping binary operation.
 /// Asserts rhs is not a stack value when lhs also isn't.
 /// NOTE: Leaves the result on the stack when its Type is <= 64 bits
-fn wrapBinOp(self: *Self, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue {
-    const bin_local = try self.binOp(lhs, rhs, ty, op);
-    return self.wrapOperand(bin_local, ty);
+fn wrapBinOp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: Op) InnerError!WValue {
+    const bin_local = try func.binOp(lhs, rhs, ty, op);
+    return func.wrapOperand(bin_local, ty);
 }
 
 /// Wraps an operand based on a given type's bitsize.
 /// Asserts `Type` is <= 128 bits.
 /// NOTE: When the Type is <= 64 bits, leaves the value on top of the stack.
-fn wrapOperand(self: *Self, operand: WValue, ty: Type) InnerError!WValue {
-    assert(ty.abiSize(self.target) <= 16);
-    const bitsize = ty.intInfo(self.target).bits;
+fn wrapOperand(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue {
+    assert(ty.abiSize(func.target) <= 16);
+    const bitsize = ty.intInfo(func.target).bits;
     const wasm_bits = toWasmBits(bitsize) orelse {
-        return self.fail("TODO: Implement wrapOperand for bitsize '{d}'", .{bitsize});
+        return func.fail("TODO: Implement wrapOperand for bitsize '{d}'", .{bitsize});
     };
 
     if (wasm_bits == bitsize) return operand;
 
     if (wasm_bits == 128) {
         assert(operand != .stack);
-        const lsb = try self.load(operand, Type.u64, 8);
+        const lsb = try func.load(operand, Type.u64, 8);
 
-        const result_ptr = try self.allocStack(ty);
-        try self.emitWValue(result_ptr);
-        try self.store(.{ .stack = {} }, lsb, Type.u64, 8 + result_ptr.offset());
+        const result_ptr = try func.allocStack(ty);
+        try func.emitWValue(result_ptr);
+        try func.store(.{ .stack = {} }, lsb, Type.u64, 8 + result_ptr.offset());
         const result = (@as(u64, 1) << @intCast(u6, 64 - (wasm_bits - bitsize))) - 1;
-        try self.emitWValue(result_ptr);
-        _ = try self.load(operand, Type.u64, 0);
-        try self.addImm64(result);
-        try self.addTag(.i64_and);
-        try self.addMemArg(.i64_store, .{ .offset = result_ptr.offset(), .alignment = 8 });
+        try func.emitWValue(result_ptr);
+        _ = try func.load(operand, Type.u64, 0);
+        try func.addImm64(result);
+        try func.addTag(.i64_and);
+        try func.addMemArg(.i64_store, .{ .offset = result_ptr.offset(), .alignment = 8 });
         return result_ptr;
     }
 
     const result = (@as(u64, 1) << @intCast(u6, bitsize)) - 1;
-    try self.emitWValue(operand);
+    try func.emitWValue(operand);
     if (bitsize <= 32) {
-        try self.addImm32(@bitCast(i32, @intCast(u32, result)));
-        try self.addTag(.i32_and);
+        try func.addImm32(@bitCast(i32, @intCast(u32, result)));
+        try func.addTag(.i32_and);
     } else if (bitsize <= 64) {
-        try self.addImm64(result);
-        try self.addTag(.i64_and);
+        try func.addImm64(result);
+        try func.addTag(.i64_and);
     } else unreachable;
 
     return WValue{ .stack = {} };
 }
 
-fn lowerParentPtr(self: *Self, ptr_val: Value, ptr_child_ty: Type) InnerError!WValue {
+fn lowerParentPtr(func: *CodeGen, ptr_val: Value, ptr_child_ty: Type) InnerError!WValue {
     switch (ptr_val.tag()) {
         .decl_ref_mut => {
             const decl_index = ptr_val.castTag(.decl_ref_mut).?.data.decl_index;
-            return self.lowerParentPtrDecl(ptr_val, decl_index);
+            return func.lowerParentPtrDecl(ptr_val, decl_index);
         },
         .decl_ref => {
             const decl_index = ptr_val.castTag(.decl_ref).?.data;
-            return self.lowerParentPtrDecl(ptr_val, decl_index);
+            return func.lowerParentPtrDecl(ptr_val, decl_index);
         },
         .variable => {
             const decl_index = ptr_val.castTag(.variable).?.data.owner_decl;
-            return self.lowerParentPtrDecl(ptr_val, decl_index);
+            return func.lowerParentPtrDecl(ptr_val, decl_index);
         },
         .field_ptr => {
             const field_ptr = ptr_val.castTag(.field_ptr).?.data;
             const parent_ty = field_ptr.container_ty;
-            const parent_ptr = try self.lowerParentPtr(field_ptr.container_ptr, parent_ty);
+            const parent_ptr = try func.lowerParentPtr(field_ptr.container_ptr, parent_ty);
 
             const offset = switch (parent_ty.zigTypeTag()) {
                 .Struct => blk: {
-                    const offset = parent_ty.structFieldOffset(field_ptr.field_index, self.target);
+                    const offset = parent_ty.structFieldOffset(field_ptr.field_index, func.target);
                     break :blk offset;
                 },
                 .Union => blk: {
-                    const layout: Module.Union.Layout = parent_ty.unionGetLayout(self.target);
+                    const layout: Module.Union.Layout = parent_ty.unionGetLayout(func.target);
                     if (layout.payload_size == 0) break :blk 0;
                     if (layout.payload_align > layout.tag_align) break :blk 0;
 
@@ -2479,7 +2477,7 @@ fn lowerParentPtr(self: *Self, ptr_val: Value, ptr_child_ty: Type) InnerError!WV
                 .Pointer => switch (parent_ty.ptrSize()) {
                     .Slice => switch (field_ptr.field_index) {
                         0 => 0,
-                        1 => self.ptrSize(),
+                        1 => func.ptrSize(),
                         else => unreachable,
                     },
                     else => unreachable,
@@ -2506,8 +2504,8 @@ fn lowerParentPtr(self: *Self, ptr_val: Value, ptr_child_ty: Type) InnerError!WV
         .elem_ptr => {
             const elem_ptr = ptr_val.castTag(.elem_ptr).?.data;
             const index = elem_ptr.index;
-            const offset = index * ptr_child_ty.abiSize(self.target);
-            const array_ptr = try self.lowerParentPtr(elem_ptr.array_ptr, elem_ptr.elem_ty);
+            const offset = index * ptr_child_ty.abiSize(func.target);
+            const array_ptr = try func.lowerParentPtr(elem_ptr.array_ptr, elem_ptr.elem_ty);
 
             return WValue{ .memory_offset = .{
                 .pointer = array_ptr.memory,
@@ -2516,27 +2514,27 @@ fn lowerParentPtr(self: *Self, ptr_val: Value, ptr_child_ty: Type) InnerError!WV
         },
         .opt_payload_ptr => {
             const payload_ptr = ptr_val.castTag(.opt_payload_ptr).?.data;
-            const parent_ptr = try self.lowerParentPtr(payload_ptr.container_ptr, payload_ptr.container_ty);
+            const parent_ptr = try func.lowerParentPtr(payload_ptr.container_ptr, payload_ptr.container_ty);
             var buf: Type.Payload.ElemType = undefined;
             const payload_ty = payload_ptr.container_ty.optionalChild(&buf);
             if (!payload_ty.hasRuntimeBitsIgnoreComptime() or payload_ty.optionalReprIsPayload()) {
                 return parent_ptr;
             }
 
-            const abi_size = payload_ptr.container_ty.abiSize(self.target);
-            const offset = abi_size - payload_ty.abiSize(self.target);
+            const abi_size = payload_ptr.container_ty.abiSize(func.target);
+            const offset = abi_size - payload_ty.abiSize(func.target);
 
             return WValue{ .memory_offset = .{
                 .pointer = parent_ptr.memory,
                 .offset = @intCast(u32, offset),
             } };
         },
-        else => |tag| return self.fail("TODO: Implement lowerParentPtr for tag: {}", .{tag}),
+        else => |tag| return func.fail("TODO: Implement lowerParentPtr for tag: {}", .{tag}),
     }
 }
 
-fn lowerParentPtrDecl(self: *Self, ptr_val: Value, decl_index: Module.Decl.Index) InnerError!WValue {
-    const module = self.bin_file.base.options.module.?;
+fn lowerParentPtrDecl(func: *CodeGen, ptr_val: Value, decl_index: Module.Decl.Index) InnerError!WValue {
+    const module = func.bin_file.base.options.module.?;
     const decl = module.declPtr(decl_index);
     module.markDeclAlive(decl);
     var ptr_ty_payload: Type.Payload.ElemType = .{
@@ -2544,15 +2542,15 @@ fn lowerParentPtrDecl(self: *Self, ptr_val: Value, decl_index: Module.Decl.Index
         .data = decl.ty,
     };
     const ptr_ty = Type.initPayload(&ptr_ty_payload.base);
-    return self.lowerDeclRefValue(.{ .ty = ptr_ty, .val = ptr_val }, decl_index);
+    return func.lowerDeclRefValue(.{ .ty = ptr_ty, .val = ptr_val }, decl_index);
 }
 
-fn lowerDeclRefValue(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) InnerError!WValue {
+fn lowerDeclRefValue(func: *CodeGen, tv: TypedValue, decl_index: Module.Decl.Index) InnerError!WValue {
     if (tv.ty.isSlice()) {
-        return WValue{ .memory = try self.bin_file.lowerUnnamedConst(tv, decl_index) };
+        return WValue{ .memory = try func.bin_file.lowerUnnamedConst(tv, decl_index) };
     }
 
-    const module = self.bin_file.base.options.module.?;
+    const module = func.bin_file.base.options.module.?;
     const decl = module.declPtr(decl_index);
     if (decl.ty.zigTypeTag() != .Fn and !decl.ty.hasRuntimeBitsIgnoreComptime()) {
         return WValue{ .imm32 = 0xaaaaaaaa };
@@ -2562,7 +2560,7 @@ fn lowerDeclRefValue(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index)
 
     const target_sym_index = decl.link.wasm.sym_index;
     if (decl.ty.zigTypeTag() == .Fn) {
-        try self.bin_file.addTableFunction(target_sym_index);
+        try func.bin_file.addTableFunction(target_sym_index);
         return WValue{ .function_index = target_sym_index };
     } else return WValue{ .memory = target_sym_index };
 }
@@ -2583,21 +2581,21 @@ fn toTwosComplement(value: anytype, bits: u7) std.meta.Int(.unsigned, @typeInfo(
     return @intCast(WantedT, result);
 }
 
-fn lowerConstant(self: *Self, val: Value, ty: Type) InnerError!WValue {
-    if (val.isUndefDeep()) return self.emitUndefined(ty);
+fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
+    if (val.isUndefDeep()) return func.emitUndefined(ty);
     if (val.castTag(.decl_ref)) |decl_ref| {
         const decl_index = decl_ref.data;
-        return self.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl_index);
+        return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl_index);
     }
     if (val.castTag(.decl_ref_mut)) |decl_ref_mut| {
         const decl_index = decl_ref_mut.data.decl_index;
-        return self.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl_index);
+        return func.lowerDeclRefValue(.{ .ty = ty, .val = val }, decl_index);
     }
-    const target = self.target;
+    const target = func.target;
     switch (ty.zigTypeTag()) {
         .Void => return WValue{ .none = {} },
         .Int => {
-            const int_info = ty.intInfo(self.target);
+            const int_info = ty.intInfo(func.target);
             switch (int_info.signedness) {
                 .signed => switch (int_info.bits) {
                     0...32 => return WValue{ .imm32 = @intCast(u32, toTwosComplement(
@@ -2618,7 +2616,7 @@ fn lowerConstant(self: *Self, val: Value, ty: Type) InnerError!WValue {
             }
         },
         .Bool => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(target)) },
-        .Float => switch (ty.floatBits(self.target)) {
+        .Float => switch (ty.floatBits(func.target)) {
             16 => return WValue{ .imm32 = @bitCast(u16, val.toFloat(f16)) },
             32 => return WValue{ .float32 = val.toFloat(f32) },
             64 => return WValue{ .float64 = val.toFloat(f64) },
@@ -2626,11 +2624,11 @@ fn lowerConstant(self: *Self, val: Value, ty: Type) InnerError!WValue {
         },
         .Pointer => switch (val.tag()) {
             .field_ptr, .elem_ptr, .opt_payload_ptr => {
-                return self.lowerParentPtr(val, ty.childType());
+                return func.lowerParentPtr(val, ty.childType());
             },
             .int_u64, .one => return WValue{ .imm32 = @intCast(u32, val.toUnsignedInt(target)) },
             .zero, .null_value => return WValue{ .imm32 = 0 },
-            else => return self.fail("Wasm TODO: lowerConstant for other const pointer tag {}", .{val.tag()}),
+            else => return func.fail("Wasm TODO: lowerConstant for other const pointer tag {}", .{val.tag()}),
         },
         .Enum => {
             if (val.castTag(.enum_field_index)) |field_index| {
@@ -2640,7 +2638,7 @@ fn lowerConstant(self: *Self, val: Value, ty: Type) InnerError!WValue {
                         const enum_full = ty.cast(Type.Payload.EnumFull).?.data;
                         if (enum_full.values.count() != 0) {
                             const tag_val = enum_full.values.keys()[field_index.data];
-                            return self.lowerConstant(tag_val, enum_full.tag_ty);
+                            return func.lowerConstant(tag_val, enum_full.tag_ty);
                         } else {
                             return WValue{ .imm32 = field_index.data };
                         }
@@ -2649,19 +2647,19 @@ fn lowerConstant(self: *Self, val: Value, ty: Type) InnerError!WValue {
                         const index = field_index.data;
                         const enum_data = ty.castTag(.enum_numbered).?.data;
                         const enum_val = enum_data.values.keys()[index];
-                        return self.lowerConstant(enum_val, enum_data.tag_ty);
+                        return func.lowerConstant(enum_val, enum_data.tag_ty);
                     },
-                    else => return self.fail("TODO: lowerConstant for enum tag: {}", .{ty.tag()}),
+                    else => return func.fail("TODO: lowerConstant for enum tag: {}", .{ty.tag()}),
                 }
             } else {
                 var int_tag_buffer: Type.Payload.Bits = undefined;
                 const int_tag_ty = ty.intTagType(&int_tag_buffer);
-                return self.lowerConstant(val, int_tag_ty);
+                return func.lowerConstant(val, int_tag_ty);
             }
         },
         .ErrorSet => switch (val.tag()) {
             .@"error" => {
-                const kv = try self.bin_file.base.options.module.?.getErrorValue(val.getError().?);
+                const kv = try func.bin_file.base.options.module.?.getErrorValue(val.getError().?);
                 return WValue{ .imm32 = kv.value };
             },
             else => return WValue{ .imm32 = 0 },
@@ -2670,41 +2668,41 @@ fn lowerConstant(self: *Self, val: Value, ty: Type) InnerError!WValue {
             const error_type = ty.errorUnionSet();
             const is_pl = val.errorUnionIsPayload();
             const err_val = if (!is_pl) val else Value.initTag(.zero);
-            return self.lowerConstant(err_val, error_type);
+            return func.lowerConstant(err_val, error_type);
         },
         .Optional => if (ty.optionalReprIsPayload()) {
             var buf: Type.Payload.ElemType = undefined;
             const pl_ty = ty.optionalChild(&buf);
             if (val.castTag(.opt_payload)) |payload| {
-                return self.lowerConstant(payload.data, pl_ty);
+                return func.lowerConstant(payload.data, pl_ty);
             } else if (val.isNull()) {
                 return WValue{ .imm32 = 0 };
             } else {
-                return self.lowerConstant(val, pl_ty);
+                return func.lowerConstant(val, pl_ty);
             }
         } else {
             const is_pl = val.tag() == .opt_payload;
             return WValue{ .imm32 = if (is_pl) @as(u32, 1) else 0 };
         },
-        else => |zig_type| return self.fail("Wasm TODO: LowerConstant for zigTypeTag {}", .{zig_type}),
+        else => |zig_type| return func.fail("Wasm TODO: LowerConstant for zigTypeTag {}", .{zig_type}),
     }
 }
 
-fn emitUndefined(self: *Self, ty: Type) InnerError!WValue {
+fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
     switch (ty.zigTypeTag()) {
         .Bool, .ErrorSet => return WValue{ .imm32 = 0xaaaaaaaa },
-        .Int => switch (ty.intInfo(self.target).bits) {
+        .Int => switch (ty.intInfo(func.target).bits) {
             0...32 => return WValue{ .imm32 = 0xaaaaaaaa },
             33...64 => return WValue{ .imm64 = 0xaaaaaaaaaaaaaaaa },
             else => unreachable,
         },
-        .Float => switch (ty.floatBits(self.target)) {
+        .Float => switch (ty.floatBits(func.target)) {
             16 => return WValue{ .imm32 = 0xaaaaaaaa },
             32 => return WValue{ .float32 = @bitCast(f32, @as(u32, 0xaaaaaaaa)) },
             64 => return WValue{ .float64 = @bitCast(f64, @as(u64, 0xaaaaaaaaaaaaaaaa)) },
             else => unreachable,
         },
-        .Pointer => switch (self.arch()) {
+        .Pointer => switch (func.arch()) {
             .wasm32 => return WValue{ .imm32 = 0xaaaaaaaa },
             .wasm64 => return WValue{ .imm64 = 0xaaaaaaaaaaaaaaaa },
             else => unreachable,
@@ -2713,22 +2711,22 @@ fn emitUndefined(self: *Self, ty: Type) InnerError!WValue {
             var buf: Type.Payload.ElemType = undefined;
             const pl_ty = ty.optionalChild(&buf);
             if (ty.optionalReprIsPayload()) {
-                return self.emitUndefined(pl_ty);
+                return func.emitUndefined(pl_ty);
             }
             return WValue{ .imm32 = 0xaaaaaaaa };
         },
         .ErrorUnion => {
             return WValue{ .imm32 = 0xaaaaaaaa };
         },
-        else => return self.fail("Wasm TODO: emitUndefined for type: {}\n", .{ty.zigTypeTag()}),
+        else => return func.fail("Wasm TODO: emitUndefined for type: {}\n", .{ty.zigTypeTag()}),
     }
 }
 
 /// Returns a `Value` as a signed 32 bit value.
 /// It's illegal to provide a value with a type that cannot be represented
 /// as an integer value.
-fn valueAsI32(self: Self, val: Value, ty: Type) i32 {
-    const target = self.target;
+fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 {
+    const target = func.target;
     switch (ty.zigTypeTag()) {
         .Enum => {
             if (val.castTag(.enum_field_index)) |field_index| {
@@ -2738,28 +2736,28 @@ fn valueAsI32(self: Self, val: Value, ty: Type) i32 {
                         const enum_full = ty.cast(Type.Payload.EnumFull).?.data;
                         if (enum_full.values.count() != 0) {
                             const tag_val = enum_full.values.keys()[field_index.data];
-                            return self.valueAsI32(tag_val, enum_full.tag_ty);
+                            return func.valueAsI32(tag_val, enum_full.tag_ty);
                         } else return @bitCast(i32, field_index.data);
                     },
                     .enum_numbered => {
                         const index = field_index.data;
                         const enum_data = ty.castTag(.enum_numbered).?.data;
-                        return self.valueAsI32(enum_data.values.keys()[index], enum_data.tag_ty);
+                        return func.valueAsI32(enum_data.values.keys()[index], enum_data.tag_ty);
                     },
                     else => unreachable,
                 }
             } else {
                 var int_tag_buffer: Type.Payload.Bits = undefined;
                 const int_tag_ty = ty.intTagType(&int_tag_buffer);
-                return self.valueAsI32(val, int_tag_ty);
+                return func.valueAsI32(val, int_tag_ty);
             }
         },
-        .Int => switch (ty.intInfo(self.target).signedness) {
+        .Int => switch (ty.intInfo(func.target).signedness) {
             .signed => return @truncate(i32, val.toSignedInt()),
             .unsigned => return @bitCast(i32, @truncate(u32, val.toUnsignedInt(target))),
         },
         .ErrorSet => {
-            const kv = self.bin_file.base.options.module.?.getErrorValue(val.getError().?) catch unreachable; // passed invalid `Value` to function
+            const kv = func.bin_file.base.options.module.?.getErrorValue(val.getError().?) catch unreachable; // passed invalid `Value` to function
             return @bitCast(i32, kv.value);
         },
         .Bool => return @intCast(i32, val.toSignedInt()),
@@ -2768,139 +2766,139 @@ fn valueAsI32(self: Self, val: Value, ty: Type) i32 {
     }
 }
 
-fn airBlock(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
-    const block_ty = self.air.getRefType(ty_pl.ty);
-    const wasm_block_ty = genBlockType(block_ty, self.target);
-    const extra = self.air.extraData(Air.Block, ty_pl.payload);
-    const body = self.air.extra[extra.end..][0..extra.data.body_len];
+fn airBlock(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
+    const block_ty = func.air.getRefType(ty_pl.ty);
+    const wasm_block_ty = genBlockType(block_ty, func.target);
+    const extra = func.air.extraData(Air.Block, ty_pl.payload);
+    const body = func.air.extra[extra.end..][0..extra.data.body_len];
 
     // if wasm_block_ty is non-empty, we create a register to store the temporary value
     const block_result: WValue = if (wasm_block_ty != wasm.block_empty) blk: {
-        const ty: Type = if (isByRef(block_ty, self.target)) Type.u32 else block_ty;
-        break :blk try self.ensureAllocLocal(ty); // make sure it's a clean local as it may never get overwritten
+        const ty: Type = if (isByRef(block_ty, func.target)) Type.u32 else block_ty;
+        break :blk try func.ensureAllocLocal(ty); // make sure it's a clean local as it may never get overwritten
     } else WValue.none;
 
-    try self.startBlock(.block, wasm.block_empty);
+    try func.startBlock(.block, wasm.block_empty);
     // Here we set the current block idx, so breaks know the depth to jump
     // to when breaking out.
-    try self.blocks.putNoClobber(self.gpa, inst, .{
-        .label = self.block_depth,
+    try func.blocks.putNoClobber(func.gpa, inst, .{
+        .label = func.block_depth,
         .value = block_result,
     });
-    try self.genBody(body);
-    try self.endBlock();
+    try func.genBody(body);
+    try func.endBlock();
 
-    self.finishAir(inst, block_result, &.{});
+    func.finishAir(inst, block_result, &.{});
 }
 
 /// appends a new wasm block to the code section and increases the `block_depth` by 1
-fn startBlock(self: *Self, block_tag: wasm.Opcode, valtype: u8) !void {
-    self.block_depth += 1;
-    try self.addInst(.{
+fn startBlock(func: *CodeGen, block_tag: wasm.Opcode, valtype: u8) !void {
+    func.block_depth += 1;
+    try func.addInst(.{
         .tag = Mir.Inst.Tag.fromOpcode(block_tag),
         .data = .{ .block_type = valtype },
     });
 }
 
 /// Ends the current wasm block and decreases the `block_depth` by 1
-fn endBlock(self: *Self) !void {
-    try self.addTag(.end);
-    self.block_depth -= 1;
+fn endBlock(func: *CodeGen) !void {
+    try func.addTag(.end);
+    func.block_depth -= 1;
 }
 
-fn airLoop(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
-    const loop = self.air.extraData(Air.Block, ty_pl.payload);
-    const body = self.air.extra[loop.end..][0..loop.data.body_len];
+fn airLoop(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
+    const loop = func.air.extraData(Air.Block, ty_pl.payload);
+    const body = func.air.extra[loop.end..][0..loop.data.body_len];
 
     // result type of loop is always 'noreturn', meaning we can always
     // emit the wasm type 'block_empty'.
-    try self.startBlock(.loop, wasm.block_empty);
-    try self.genBody(body);
+    try func.startBlock(.loop, wasm.block_empty);
+    try func.genBody(body);
 
     // breaking to the index of a loop block will continue the loop instead
-    try self.addLabel(.br, 0);
-    try self.endBlock();
+    try func.addLabel(.br, 0);
+    try func.endBlock();
 
-    self.finishAir(inst, .none, &.{});
+    func.finishAir(inst, .none, &.{});
 }
 
-fn airCondBr(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const pl_op = self.air.instructions.items(.data)[inst].pl_op;
-    const condition = try self.resolveInst(pl_op.operand);
-    const extra = self.air.extraData(Air.CondBr, pl_op.payload);
-    const then_body = self.air.extra[extra.end..][0..extra.data.then_body_len];
-    const else_body = self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len];
-    const liveness_condbr = self.liveness.getCondBr(inst);
+fn airCondBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pl_op = func.air.instructions.items(.data)[inst].pl_op;
+    const condition = try func.resolveInst(pl_op.operand);
+    const extra = func.air.extraData(Air.CondBr, pl_op.payload);
+    const then_body = func.air.extra[extra.end..][0..extra.data.then_body_len];
+    const else_body = func.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len];
+    const liveness_condbr = func.liveness.getCondBr(inst);
 
     // result type is always noreturn, so use `block_empty` as type.
-    try self.startBlock(.block, wasm.block_empty);
+    try func.startBlock(.block, wasm.block_empty);
     // emit the conditional value
-    try self.emitWValue(condition);
+    try func.emitWValue(condition);
 
     // we inserted the block in front of the condition
     // so now check if condition matches. If not, break outside this block
     // and continue with the then codepath
-    try self.addLabel(.br_if, 0);
+    try func.addLabel(.br_if, 0);
 
-    try self.branches.ensureUnusedCapacity(self.gpa, 2);
+    try func.branches.ensureUnusedCapacity(func.gpa, 2);
 
-    self.branches.appendAssumeCapacity(.{});
-    try self.currentBranch().values.ensureUnusedCapacity(self.gpa, @intCast(u32, liveness_condbr.else_deaths.len));
+    func.branches.appendAssumeCapacity(.{});
+    try func.currentBranch().values.ensureUnusedCapacity(func.gpa, @intCast(u32, liveness_condbr.else_deaths.len));
     for (liveness_condbr.else_deaths) |death| {
-        self.processDeath(Air.indexToRef(death));
+        func.processDeath(Air.indexToRef(death));
     }
-    try self.genBody(else_body);
-    try self.endBlock();
-    var else_stack = self.branches.pop();
-    defer else_stack.deinit(self.gpa);
+    try func.genBody(else_body);
+    try func.endBlock();
+    var else_stack = func.branches.pop();
+    defer else_stack.deinit(func.gpa);
 
     // Outer block that matches the condition
-    self.branches.appendAssumeCapacity(.{});
-    try self.currentBranch().values.ensureUnusedCapacity(self.gpa, @intCast(u32, liveness_condbr.then_deaths.len));
+    func.branches.appendAssumeCapacity(.{});
+    try func.currentBranch().values.ensureUnusedCapacity(func.gpa, @intCast(u32, liveness_condbr.then_deaths.len));
     for (liveness_condbr.then_deaths) |death| {
-        self.processDeath(Air.indexToRef(death));
+        func.processDeath(Air.indexToRef(death));
     }
-    try self.genBody(then_body);
-    var then_stack = self.branches.pop();
-    defer then_stack.deinit(self.gpa);
+    try func.genBody(then_body);
+    var then_stack = func.branches.pop();
+    defer then_stack.deinit(func.gpa);
 
-    try self.mergeBranch(&else_stack);
-    try self.mergeBranch(&then_stack);
+    try func.mergeBranch(&else_stack);
+    try func.mergeBranch(&then_stack);
 
-    self.finishAir(inst, .none, &.{});
+    func.finishAir(inst, .none, &.{});
 }
 
-fn mergeBranch(self: *Self, branch: *const Branch) !void {
-    const parent = self.currentBranch();
+fn mergeBranch(func: *CodeGen, branch: *const Branch) !void {
+    const parent = func.currentBranch();
 
     const target_slice = branch.values.entries.slice();
     const target_keys = target_slice.items(.key);
     const target_values = target_slice.items(.value);
 
-    try parent.values.ensureUnusedCapacity(self.gpa, branch.values.count());
+    try parent.values.ensureUnusedCapacity(func.gpa, branch.values.count());
     for (target_keys) |key, index| {
         // TODO: process deaths from branches
         parent.values.putAssumeCapacity(key, target_values[index]);
     }
 }
 
-fn airCmp(self: *Self, inst: Air.Inst.Index, op: std.math.CompareOperator) InnerError!void {
-    const bin_op = self.air.instructions.items(.data)[inst].bin_op;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
+fn airCmp(func: *CodeGen, inst: Air.Inst.Index, op: std.math.CompareOperator) InnerError!void {
+    const bin_op = func.air.instructions.items(.data)[inst].bin_op;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
 
-    const lhs = try self.resolveInst(bin_op.lhs);
-    const rhs = try self.resolveInst(bin_op.rhs);
-    const operand_ty = self.air.typeOf(bin_op.lhs);
-    const result = try (try self.cmp(lhs, rhs, operand_ty, op)).toLocal(self, Type.u32); // comparison result is always 32 bits
-    self.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
+    const lhs = try func.resolveInst(bin_op.lhs);
+    const rhs = try func.resolveInst(bin_op.rhs);
+    const operand_ty = func.air.typeOf(bin_op.lhs);
+    const result = try (try func.cmp(lhs, rhs, operand_ty, op)).toLocal(func, Type.u32); // comparison result is always 32 bits
+    func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
 }
 
 /// Compares two operands.
 /// Asserts rhs is not a stack value when the lhs isn't a stack value either
 /// NOTE: This leaves the result on top of the stack, rather than a new local.
-fn cmp(self: *Self, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareOperator) InnerError!WValue {
+fn cmp(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareOperator) InnerError!WValue {
     assert(!(lhs != .stack and rhs == .stack));
     if (ty.zigTypeTag() == .Optional and !ty.optionalReprIsPayload()) {
         var buf: Type.Payload.ElemType = undefined;
@@ -2909,28 +2907,28 @@ fn cmp(self: *Self, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareOper
             // When we hit this case, we must check the value of optionals
             // that are not pointers. This means first checking against non-null for
             // both lhs and rhs, as well as checking the payload are matching of lhs and rhs
-            return self.cmpOptionals(lhs, rhs, ty, op);
+            return func.cmpOptionals(lhs, rhs, ty, op);
         }
-    } else if (isByRef(ty, self.target)) {
-        return self.cmpBigInt(lhs, rhs, ty, op);
-    } else if (ty.isAnyFloat() and ty.floatBits(self.target) == 16) {
-        return self.cmpFloat16(lhs, rhs, op);
+    } else if (isByRef(ty, func.target)) {
+        return func.cmpBigInt(lhs, rhs, ty, op);
+    } else if (ty.isAnyFloat() and ty.floatBits(func.target) == 16) {
+        return func.cmpFloat16(lhs, rhs, op);
     }
 
     // ensure that when we compare pointers, we emit
     // the true pointer of a stack value, rather than the stack pointer.
-    try self.lowerToStack(lhs);
-    try self.lowerToStack(rhs);
+    try func.lowerToStack(lhs);
+    try func.lowerToStack(rhs);
 
     const signedness: std.builtin.Signedness = blk: {
         // by default we tell the operand type is unsigned (i.e. bools and enum values)
         if (ty.zigTypeTag() != .Int) break :blk .unsigned;
 
         // incase of an actual integer, we emit the correct signedness
-        break :blk ty.intInfo(self.target).signedness;
+        break :blk ty.intInfo(func.target).signedness;
     };
     const opcode: wasm.Opcode = buildOpcode(.{
-        .valtype1 = typeToValtype(ty, self.target),
+        .valtype1 = typeToValtype(ty, func.target),
         .op = switch (op) {
             .lt => .lt,
             .lte => .le,
@@ -2941,14 +2939,14 @@ fn cmp(self: *Self, lhs: WValue, rhs: WValue, ty: Type, op: std.math.CompareOper
         },
         .signedness = signedness,
     });
-    try self.addTag(Mir.Inst.Tag.fromOpcode(opcode));
+    try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
 
     return WValue{ .stack = {} };
 }
 
 /// Compares 16-bit floats
 /// NOTE: The result value remains on top of the stack.
-fn cmpFloat16(self: *Self, lhs: WValue, rhs: WValue, op: std.math.CompareOperator) InnerError!WValue {
+fn cmpFloat16(func: *CodeGen, lhs: WValue, rhs: WValue, op: std.math.CompareOperator) InnerError!WValue {
     const opcode: wasm.Opcode = buildOpcode(.{
         .op = switch (op) {
             .lt => .lt,
@@ -2961,200 +2959,200 @@ fn cmpFloat16(self: *Self, lhs: WValue, rhs: WValue, op: std.math.CompareOperato
         .valtype1 = .f32,
         .signedness = .unsigned,
     });
-    _ = try self.fpext(lhs, Type.f16, Type.f32);
-    _ = try self.fpext(rhs, Type.f16, Type.f32);
-    try self.addTag(Mir.Inst.Tag.fromOpcode(opcode));
+    _ = try func.fpext(lhs, Type.f16, Type.f32);
+    _ = try func.fpext(rhs, Type.f16, Type.f32);
+    try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
 
     return WValue{ .stack = {} };
 }
 
-fn airCmpVector(self: *Self, inst: Air.Inst.Index) InnerError!void {
+fn airCmpVector(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     _ = inst;
-    return self.fail("TODO implement airCmpVector for wasm", .{});
+    return func.fail("TODO implement airCmpVector for wasm", .{});
 }
 
-fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const un_op = self.air.instructions.items(.data)[inst].un_op;
-    const operand = try self.resolveInst(un_op);
+fn airCmpLtErrorsLen(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const un_op = func.air.instructions.items(.data)[inst].un_op;
+    const operand = try func.resolveInst(un_op);
 
     _ = operand;
-    return self.fail("TODO implement airCmpLtErrorsLen for wasm", .{});
+    return func.fail("TODO implement airCmpLtErrorsLen for wasm", .{});
 }
 
-fn airBr(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const br = self.air.instructions.items(.data)[inst].br;
-    const block = self.blocks.get(br.block_inst).?;
+fn airBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const br = func.air.instructions.items(.data)[inst].br;
+    const block = func.blocks.get(br.block_inst).?;
 
     // if operand has codegen bits we should break with a value
-    if (self.air.typeOf(br.operand).hasRuntimeBitsIgnoreComptime()) {
-        const operand = try self.resolveInst(br.operand);
-        try self.lowerToStack(operand);
+    if (func.air.typeOf(br.operand).hasRuntimeBitsIgnoreComptime()) {
+        const operand = try func.resolveInst(br.operand);
+        try func.lowerToStack(operand);
 
         if (block.value != .none) {
-            try self.addLabel(.local_set, block.value.local.value);
+            try func.addLabel(.local_set, block.value.local.value);
         }
     }
 
     // We map every block to its block index.
     // We then determine how far we have to jump to it by subtracting it from current block depth
-    const idx: u32 = self.block_depth - block.label;
-    try self.addLabel(.br, idx);
+    const idx: u32 = func.block_depth - block.label;
+    try func.addLabel(.br, idx);
 
-    self.finishAir(inst, .none, &.{br.operand});
+    func.finishAir(inst, .none, &.{br.operand});
 }
 
-fn airNot(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const ty_op = self.air.instructions.items(.data)[inst].ty_op;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{ty_op.operand});
+fn airNot(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_op = func.air.instructions.items(.data)[inst].ty_op;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
 
-    const operand = try self.resolveInst(ty_op.operand);
-    const operand_ty = self.air.typeOf(ty_op.operand);
+    const operand = try func.resolveInst(ty_op.operand);
+    const operand_ty = func.air.typeOf(ty_op.operand);
 
     const result = result: {
         if (operand_ty.zigTypeTag() == .Bool) {
-            try self.emitWValue(operand);
-            try self.addTag(.i32_eqz);
-            const not_tmp = try self.allocLocal(operand_ty);
-            try self.addLabel(.local_set, not_tmp.local.value);
+            try func.emitWValue(operand);
+            try func.addTag(.i32_eqz);
+            const not_tmp = try func.allocLocal(operand_ty);
+            try func.addLabel(.local_set, not_tmp.local.value);
             break :result not_tmp;
         } else {
-            const operand_bits = operand_ty.intInfo(self.target).bits;
+            const operand_bits = operand_ty.intInfo(func.target).bits;
             const wasm_bits = toWasmBits(operand_bits) orelse {
-                return self.fail("TODO: Implement binary NOT for integer with bitsize '{d}'", .{operand_bits});
+                return func.fail("TODO: Implement binary NOT for integer with bitsize '{d}'", .{operand_bits});
             };
 
             switch (wasm_bits) {
                 32 => {
-                    const bin_op = try self.binOp(operand, .{ .imm32 = ~@as(u32, 0) }, operand_ty, .xor);
-                    break :result try (try self.wrapOperand(bin_op, operand_ty)).toLocal(self, operand_ty);
+                    const bin_op = try func.binOp(operand, .{ .imm32 = ~@as(u32, 0) }, operand_ty, .xor);
+                    break :result try (try func.wrapOperand(bin_op, operand_ty)).toLocal(func, operand_ty);
                 },
                 64 => {
-                    const bin_op = try self.binOp(operand, .{ .imm64 = ~@as(u64, 0) }, operand_ty, .xor);
-                    break :result try (try self.wrapOperand(bin_op, operand_ty)).toLocal(self, operand_ty);
+                    const bin_op = try func.binOp(operand, .{ .imm64 = ~@as(u64, 0) }, operand_ty, .xor);
+                    break :result try (try func.wrapOperand(bin_op, operand_ty)).toLocal(func, operand_ty);
                 },
                 128 => {
-                    const result_ptr = try self.allocStack(operand_ty);
-                    try self.emitWValue(result_ptr);
-                    const msb = try self.load(operand, Type.u64, 0);
-                    const msb_xor = try self.binOp(msb, .{ .imm64 = ~@as(u64, 0) }, Type.u64, .xor);
-                    try self.store(.{ .stack = {} }, msb_xor, Type.u64, 0 + result_ptr.offset());
-
-                    try self.emitWValue(result_ptr);
-                    const lsb = try self.load(operand, Type.u64, 8);
-                    const lsb_xor = try self.binOp(lsb, .{ .imm64 = ~@as(u64, 0) }, Type.u64, .xor);
-                    try self.store(result_ptr, lsb_xor, Type.u64, 8 + result_ptr.offset());
+                    const result_ptr = try func.allocStack(operand_ty);
+                    try func.emitWValue(result_ptr);
+                    const msb = try func.load(operand, Type.u64, 0);
+                    const msb_xor = try func.binOp(msb, .{ .imm64 = ~@as(u64, 0) }, Type.u64, .xor);
+                    try func.store(.{ .stack = {} }, msb_xor, Type.u64, 0 + result_ptr.offset());
+
+                    try func.emitWValue(result_ptr);
+                    const lsb = try func.load(operand, Type.u64, 8);
+                    const lsb_xor = try func.binOp(lsb, .{ .imm64 = ~@as(u64, 0) }, Type.u64, .xor);
+                    try func.store(result_ptr, lsb_xor, Type.u64, 8 + result_ptr.offset());
                     break :result result_ptr;
                 },
                 else => unreachable,
             }
         }
     };
-    self.finishAir(inst, result, &.{ty_op.operand});
+    func.finishAir(inst, result, &.{ty_op.operand});
 }
 
-fn airBreakpoint(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    // unsupported by wasm itself. Can be implemented once we support DWARF
+fn airBreakpoint(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    // unsupported by wasm itfunc. Can be implemented once we support DWARF
     // for wasm
-    self.finishAir(inst, .none, &.{});
+    func.finishAir(inst, .none, &.{});
 }
 
-fn airUnreachable(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    try self.addTag(.@"unreachable");
-    self.finishAir(inst, .none, &.{});
+fn airUnreachable(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    try func.addTag(.@"unreachable");
+    func.finishAir(inst, .none, &.{});
 }
 
-fn airBitcast(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const ty_op = self.air.instructions.items(.data)[inst].ty_op;
-    const result = if (!self.liveness.isUnused(inst)) result: {
-        const operand = try self.resolveInst(ty_op.operand);
-        break :result self.reuseOperand(ty_op.operand, operand);
+fn airBitcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_op = func.air.instructions.items(.data)[inst].ty_op;
+    const result = if (!func.liveness.isUnused(inst)) result: {
+        const operand = try func.resolveInst(ty_op.operand);
+        break :result func.reuseOperand(ty_op.operand, operand);
     } else WValue{ .none = {} };
-    self.finishAir(inst, result, &.{});
+    func.finishAir(inst, result, &.{});
 }
 
-fn airStructFieldPtr(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
-    const extra = self.air.extraData(Air.StructField, ty_pl.payload);
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{extra.data.struct_operand});
+fn airStructFieldPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
+    const extra = func.air.extraData(Air.StructField, ty_pl.payload);
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{extra.data.struct_operand});
 
-    const struct_ptr = try self.resolveInst(extra.data.struct_operand);
-    const struct_ty = self.air.typeOf(extra.data.struct_operand).childType();
-    const offset = std.math.cast(u32, struct_ty.structFieldOffset(extra.data.field_index, self.target)) orelse {
-        const module = self.bin_file.base.options.module.?;
-        return self.fail("Field type '{}' too big to fit into stack frame", .{
+    const struct_ptr = try func.resolveInst(extra.data.struct_operand);
+    const struct_ty = func.air.typeOf(extra.data.struct_operand).childType();
+    const offset = std.math.cast(u32, struct_ty.structFieldOffset(extra.data.field_index, func.target)) orelse {
+        const module = func.bin_file.base.options.module.?;
+        return func.fail("Field type '{}' too big to fit into stack frame", .{
             struct_ty.structFieldType(extra.data.field_index).fmt(module),
         });
     };
-    const result = try self.structFieldPtr(struct_ptr, offset);
-    self.finishAir(inst, result, &.{extra.data.struct_operand});
+    const result = try func.structFieldPtr(struct_ptr, offset);
+    func.finishAir(inst, result, &.{extra.data.struct_operand});
 }
 
-fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u32) InnerError!void {
-    const ty_op = self.air.instructions.items(.data)[inst].ty_op;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{ty_op.operand});
-    const struct_ptr = try self.resolveInst(ty_op.operand);
-    const struct_ty = self.air.typeOf(ty_op.operand).childType();
+fn airStructFieldPtrIndex(func: *CodeGen, inst: Air.Inst.Index, index: u32) InnerError!void {
+    const ty_op = func.air.instructions.items(.data)[inst].ty_op;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
+    const struct_ptr = try func.resolveInst(ty_op.operand);
+    const struct_ty = func.air.typeOf(ty_op.operand).childType();
     const field_ty = struct_ty.structFieldType(index);
-    const offset = std.math.cast(u32, struct_ty.structFieldOffset(index, self.target)) orelse {
-        const module = self.bin_file.base.options.module.?;
-        return self.fail("Field type '{}' too big to fit into stack frame", .{
+    const offset = std.math.cast(u32, struct_ty.structFieldOffset(index, func.target)) orelse {
+        const module = func.bin_file.base.options.module.?;
+        return func.fail("Field type '{}' too big to fit into stack frame", .{
             field_ty.fmt(module),
         });
     };
-    const result = try self.structFieldPtr(struct_ptr, offset);
-    self.finishAir(inst, result, &.{ty_op.operand});
+    const result = try func.structFieldPtr(struct_ptr, offset);
+    func.finishAir(inst, result, &.{ty_op.operand});
 }
 
-fn structFieldPtr(self: *Self, struct_ptr: WValue, offset: u32) InnerError!WValue {
+fn structFieldPtr(func: *CodeGen, struct_ptr: WValue, offset: u32) InnerError!WValue {
     switch (struct_ptr) {
         .stack_offset => |stack_offset| {
             return WValue{ .stack_offset = .{ .value = stack_offset.value + offset, .references = 1 } };
         },
-        else => return self.buildPointerOffset(struct_ptr, offset, .new),
+        else => return func.buildPointerOffset(struct_ptr, offset, .new),
     }
 }
 
-fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
-    const struct_field = self.air.extraData(Air.StructField, ty_pl.payload).data;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{struct_field.struct_operand});
+fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
+    const struct_field = func.air.extraData(Air.StructField, ty_pl.payload).data;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{struct_field.struct_operand});
 
-    const struct_ty = self.air.typeOf(struct_field.struct_operand);
-    const operand = try self.resolveInst(struct_field.struct_operand);
+    const struct_ty = func.air.typeOf(struct_field.struct_operand);
+    const operand = try func.resolveInst(struct_field.struct_operand);
     const field_index = struct_field.field_index;
     const field_ty = struct_ty.structFieldType(field_index);
-    if (!field_ty.hasRuntimeBitsIgnoreComptime()) return self.finishAir(inst, .none, &.{struct_field.struct_operand});
+    if (!field_ty.hasRuntimeBitsIgnoreComptime()) return func.finishAir(inst, .none, &.{struct_field.struct_operand});
 
-    const offset = std.math.cast(u32, struct_ty.structFieldOffset(field_index, self.target)) orelse {
-        const module = self.bin_file.base.options.module.?;
-        return self.fail("Field type '{}' too big to fit into stack frame", .{field_ty.fmt(module)});
+    const offset = std.math.cast(u32, struct_ty.structFieldOffset(field_index, func.target)) orelse {
+        const module = func.bin_file.base.options.module.?;
+        return func.fail("Field type '{}' too big to fit into stack frame", .{field_ty.fmt(module)});
     };
 
     const result = result: {
-        if (isByRef(field_ty, self.target)) {
+        if (isByRef(field_ty, func.target)) {
             switch (operand) {
                 .stack_offset => |stack_offset| {
                     break :result WValue{ .stack_offset = .{ .value = stack_offset.value + offset, .references = 1 } };
                 },
-                else => break :result try self.buildPointerOffset(operand, offset, .new),
+                else => break :result try func.buildPointerOffset(operand, offset, .new),
             }
         }
 
-        const field = try self.load(operand, field_ty, offset);
-        break :result try field.toLocal(self, field_ty);
+        const field = try func.load(operand, field_ty, offset);
+        break :result try field.toLocal(func, field_ty);
     };
-    self.finishAir(inst, result, &.{struct_field.struct_operand});
+    func.finishAir(inst, result, &.{struct_field.struct_operand});
 }
 
-fn airSwitchBr(self: *Self, inst: Air.Inst.Index) InnerError!void {
+fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
     // result type is always 'noreturn'
     const blocktype = wasm.block_empty;
-    const pl_op = self.air.instructions.items(.data)[inst].pl_op;
-    const target = try self.resolveInst(pl_op.operand);
-    const target_ty = self.air.typeOf(pl_op.operand);
-    const switch_br = self.air.extraData(Air.SwitchBr, pl_op.payload);
-    const liveness = try self.liveness.getSwitchBr(self.gpa, inst, switch_br.data.cases_len + 1);
-    defer self.gpa.free(liveness.deaths);
+    const pl_op = func.air.instructions.items(.data)[inst].pl_op;
+    const target = try func.resolveInst(pl_op.operand);
+    const target_ty = func.air.typeOf(pl_op.operand);
+    const switch_br = func.air.extraData(Air.SwitchBr, pl_op.payload);
+    const liveness = try func.liveness.getSwitchBr(func.gpa, inst, switch_br.data.cases_len + 1);
+    defer func.gpa.free(liveness.deaths);
 
     var extra_index: usize = switch_br.end;
     var case_i: u32 = 0;
@@ -3164,24 +3162,24 @@ fn airSwitchBr(self: *Self, inst: Air.Inst.Index) InnerError!void {
     var case_list = try std.ArrayList(struct {
         values: []const CaseValue,
         body: []const Air.Inst.Index,
-    }).initCapacity(self.gpa, switch_br.data.cases_len);
+    }).initCapacity(func.gpa, switch_br.data.cases_len);
     defer for (case_list.items) |case| {
-        self.gpa.free(case.values);
+        func.gpa.free(case.values);
     } else case_list.deinit();
 
     var lowest_maybe: ?i32 = null;
     var highest_maybe: ?i32 = null;
     while (case_i < switch_br.data.cases_len) : (case_i += 1) {
-        const case = self.air.extraData(Air.SwitchBr.Case, extra_index);
-        const items = @ptrCast([]const Air.Inst.Ref, self.air.extra[case.end..][0..case.data.items_len]);
-        const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len];
+        const case = func.air.extraData(Air.SwitchBr.Case, extra_index);
+        const items = @ptrCast([]const Air.Inst.Ref, func.air.extra[case.end..][0..case.data.items_len]);
+        const case_body = func.air.extra[case.end + items.len ..][0..case.data.body_len];
         extra_index = case.end + items.len + case_body.len;
-        const values = try self.gpa.alloc(CaseValue, items.len);
-        errdefer self.gpa.free(values);
+        const values = try func.gpa.alloc(CaseValue, items.len);
+        errdefer func.gpa.free(values);
 
         for (items) |ref, i| {
-            const item_val = self.air.value(ref).?;
-            const int_val = self.valueAsI32(item_val, target_ty);
+            const item_val = func.air.value(ref).?;
+            const int_val = func.valueAsI32(item_val, target_ty);
             if (lowest_maybe == null or int_val < lowest_maybe.?) {
                 lowest_maybe = int_val;
             }
@@ -3192,7 +3190,7 @@ fn airSwitchBr(self: *Self, inst: Air.Inst.Index) InnerError!void {
         }
 
         case_list.appendAssumeCapacity(.{ .values = values, .body = case_body });
-        try self.startBlock(.block, blocktype);
+        try func.startBlock(.block, blocktype);
     }
 
     // When highest and lowest are null, we have no cases and can use a jump table
@@ -3203,12 +3201,12 @@ fn airSwitchBr(self: *Self, inst: Air.Inst.Index) InnerError!void {
     // When the target is an integer size larger than u32, we have no way to use the value
     // as an index, therefore we also use an if/else-chain for those cases.
     // TODO: Benchmark this to find a proper value, LLVM seems to draw the line at '40~45'.
-    const is_sparse = highest - lowest > 50 or target_ty.bitSize(self.target) > 32;
+    const is_sparse = highest - lowest > 50 or target_ty.bitSize(func.target) > 32;
 
-    const else_body = self.air.extra[extra_index..][0..switch_br.data.else_body_len];
+    const else_body = func.air.extra[extra_index..][0..switch_br.data.else_body_len];
     const has_else_body = else_body.len != 0;
     if (has_else_body) {
-        try self.startBlock(.block, blocktype);
+        try func.startBlock(.block, blocktype);
     }
 
     if (!is_sparse) {
@@ -3216,25 +3214,25 @@ fn airSwitchBr(self: *Self, inst: Air.Inst.Index) InnerError!void {
         // The value 'target' represents the index into the table.
         // Each index in the table represents a label to the branch
         // to jump to.
-        try self.startBlock(.block, blocktype);
-        try self.emitWValue(target);
+        try func.startBlock(.block, blocktype);
+        try func.emitWValue(target);
         if (lowest < 0) {
             // since br_table works using indexes, starting from '0', we must ensure all values
             // we put inside, are atleast 0.
-            try self.addImm32(lowest * -1);
-            try self.addTag(.i32_add);
+            try func.addImm32(lowest * -1);
+            try func.addTag(.i32_add);
         } else if (lowest > 0) {
             // make the index start from 0 by substracting the lowest value
-            try self.addImm32(lowest);
-            try self.addTag(.i32_sub);
+            try func.addImm32(lowest);
+            try func.addTag(.i32_sub);
         }
 
         // Account for default branch so always add '1'
         const depth = @intCast(u32, highest - lowest + @boolToInt(has_else_body)) + 1;
         const jump_table: Mir.JumpTable = .{ .length = depth };
-        const table_extra_index = try self.addExtra(jump_table);
-        try self.addInst(.{ .tag = .br_table, .data = .{ .payload = table_extra_index } });
-        try self.mir_extra.ensureUnusedCapacity(self.gpa, depth);
+        const table_extra_index = try func.addExtra(jump_table);
+        try func.addInst(.{ .tag = .br_table, .data = .{ .payload = table_extra_index } });
+        try func.mir_extra.ensureUnusedCapacity(func.gpa, depth);
         var value = lowest;
         while (value <= highest) : (value += 1) {
             // idx represents the branch we jump to
@@ -3250,11 +3248,11 @@ fn airSwitchBr(self: *Self, inst: Air.Inst.Index) InnerError!void {
                 // by using a jump table for this instead of if-else chains.
                 break :blk if (has_else_body or target_ty.zigTypeTag() == .ErrorSet) case_i else unreachable;
             };
-            self.mir_extra.appendAssumeCapacity(idx);
+            func.mir_extra.appendAssumeCapacity(idx);
         } else if (has_else_body) {
-            self.mir_extra.appendAssumeCapacity(case_i); // default branch
+            func.mir_extra.appendAssumeCapacity(case_i); // default branch
         }
-        try self.endBlock();
+        try func.endBlock();
     }
 
     const signedness: std.builtin.Signedness = blk: {
@@ -3262,79 +3260,79 @@ fn airSwitchBr(self: *Self, inst: Air.Inst.Index) InnerError!void {
         if (target_ty.zigTypeTag() != .Int) break :blk .unsigned;
 
         // incase of an actual integer, we emit the correct signedness
-        break :blk target_ty.intInfo(self.target).signedness;
+        break :blk target_ty.intInfo(func.target).signedness;
     };
 
-    try self.branches.ensureUnusedCapacity(self.gpa, case_list.items.len + @boolToInt(has_else_body));
+    try func.branches.ensureUnusedCapacity(func.gpa, case_list.items.len + @boolToInt(has_else_body));
     for (case_list.items) |case, index| {
         // when sparse, we use if/else-chain, so emit conditional checks
         if (is_sparse) {
             // for single value prong we can emit a simple if
             if (case.values.len == 1) {
-                try self.emitWValue(target);
-                const val = try self.lowerConstant(case.values[0].value, target_ty);
-                try self.emitWValue(val);
+                try func.emitWValue(target);
+                const val = try func.lowerConstant(case.values[0].value, target_ty);
+                try func.emitWValue(val);
                 const opcode = buildOpcode(.{
-                    .valtype1 = typeToValtype(target_ty, self.target),
+                    .valtype1 = typeToValtype(target_ty, func.target),
                     .op = .ne, // not equal, because we want to jump out of this block if it does not match the condition.
                     .signedness = signedness,
                 });
-                try self.addTag(Mir.Inst.Tag.fromOpcode(opcode));
-                try self.addLabel(.br_if, 0);
+                try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
+                try func.addLabel(.br_if, 0);
             } else {
                 // in multi-value prongs we must check if any prongs match the target value.
-                try self.startBlock(.block, blocktype);
+                try func.startBlock(.block, blocktype);
                 for (case.values) |value| {
-                    try self.emitWValue(target);
-                    const val = try self.lowerConstant(value.value, target_ty);
-                    try self.emitWValue(val);
+                    try func.emitWValue(target);
+                    const val = try func.lowerConstant(value.value, target_ty);
+                    try func.emitWValue(val);
                     const opcode = buildOpcode(.{
-                        .valtype1 = typeToValtype(target_ty, self.target),
+                        .valtype1 = typeToValtype(target_ty, func.target),
                         .op = .eq,
                         .signedness = signedness,
                     });
-                    try self.addTag(Mir.Inst.Tag.fromOpcode(opcode));
-                    try self.addLabel(.br_if, 0);
+                    try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
+                    try func.addLabel(.br_if, 0);
                 }
                 // value did not match any of the prong values
-                try self.addLabel(.br, 1);
-                try self.endBlock();
+                try func.addLabel(.br, 1);
+                try func.endBlock();
             }
         }
-        self.branches.appendAssumeCapacity(.{});
+        func.branches.appendAssumeCapacity(.{});
 
-        try self.currentBranch().values.ensureUnusedCapacity(self.gpa, liveness.deaths[index].len);
+        try func.currentBranch().values.ensureUnusedCapacity(func.gpa, liveness.deaths[index].len);
         for (liveness.deaths[index]) |operand| {
-            self.processDeath(Air.indexToRef(operand));
+            func.processDeath(Air.indexToRef(operand));
         }
-        try self.genBody(case.body);
-        try self.endBlock();
-        var case_branch = self.branches.pop();
-        defer case_branch.deinit(self.gpa);
-        try self.mergeBranch(&case_branch);
+        try func.genBody(case.body);
+        try func.endBlock();
+        var case_branch = func.branches.pop();
+        defer case_branch.deinit(func.gpa);
+        try func.mergeBranch(&case_branch);
     }
 
     if (has_else_body) {
-        self.branches.appendAssumeCapacity(.{});
+        func.branches.appendAssumeCapacity(.{});
         const else_deaths = liveness.deaths.len - 1;
-        try self.currentBranch().values.ensureUnusedCapacity(self.gpa, liveness.deaths[else_deaths].len);
+        try func.currentBranch().values.ensureUnusedCapacity(func.gpa, liveness.deaths[else_deaths].len);
         for (liveness.deaths[else_deaths]) |operand| {
-            self.processDeath(Air.indexToRef(operand));
+            func.processDeath(Air.indexToRef(operand));
         }
-        try self.genBody(else_body);
-        try self.endBlock();
-        var else_branch = self.branches.pop();
-        defer else_branch.deinit(self.gpa);
-        try self.mergeBranch(&else_branch);
+        try func.genBody(else_body);
+        try func.endBlock();
+        var else_branch = func.branches.pop();
+        defer else_branch.deinit(func.gpa);
+        try func.mergeBranch(&else_branch);
     }
-    self.finishAir(inst, .none, &.{});
+    func.finishAir(inst, .none, &.{});
 }
 
-fn airIsErr(self: *Self, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!void {
-    const un_op = self.air.instructions.items(.data)[inst].un_op;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{un_op});
-    const operand = try self.resolveInst(un_op);
-    const err_union_ty = self.air.typeOf(un_op);
+fn airIsErr(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!void {
+    const un_op = func.air.instructions.items(.data)[inst].un_op;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{un_op});
+    const operand = try func.resolveInst(un_op);
+    const err_union_ty = func.air.typeOf(un_op);
     const pl_ty = err_union_ty.errorUnionPayload();
 
     const result = result: {
@@ -3346,54 +3344,54 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!v
             }
         }
 
-        try self.emitWValue(operand);
+        try func.emitWValue(operand);
         if (pl_ty.hasRuntimeBitsIgnoreComptime()) {
-            try self.addMemArg(.i32_load16_u, .{
-                .offset = operand.offset() + @intCast(u32, errUnionErrorOffset(pl_ty, self.target)),
-                .alignment = Type.anyerror.abiAlignment(self.target),
+            try func.addMemArg(.i32_load16_u, .{
+                .offset = operand.offset() + @intCast(u32, errUnionErrorOffset(pl_ty, func.target)),
+                .alignment = Type.anyerror.abiAlignment(func.target),
             });
         }
 
         // Compare the error value with '0'
-        try self.addImm32(0);
-        try self.addTag(Mir.Inst.Tag.fromOpcode(opcode));
+        try func.addImm32(0);
+        try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
 
-        const is_err_tmp = try self.allocLocal(Type.i32);
-        try self.addLabel(.local_set, is_err_tmp.local.value);
+        const is_err_tmp = try func.allocLocal(Type.i32);
+        try func.addLabel(.local_set, is_err_tmp.local.value);
         break :result is_err_tmp;
     };
-    self.finishAir(inst, result, &.{un_op});
+    func.finishAir(inst, result, &.{un_op});
 }
 
-fn airUnwrapErrUnionPayload(self: *Self, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!void {
-    const ty_op = self.air.instructions.items(.data)[inst].ty_op;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{ty_op.operand});
+fn airUnwrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!void {
+    const ty_op = func.air.instructions.items(.data)[inst].ty_op;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
 
-    const operand = try self.resolveInst(ty_op.operand);
-    const op_ty = self.air.typeOf(ty_op.operand);
+    const operand = try func.resolveInst(ty_op.operand);
+    const op_ty = func.air.typeOf(ty_op.operand);
     const err_ty = if (op_is_ptr) op_ty.childType() else op_ty;
     const payload_ty = err_ty.errorUnionPayload();
 
     const result = result: {
         if (!payload_ty.hasRuntimeBitsIgnoreComptime()) break :result WValue{ .none = {} };
 
-        const pl_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, self.target));
-        if (op_is_ptr or isByRef(payload_ty, self.target)) {
-            break :result try self.buildPointerOffset(operand, pl_offset, .new);
+        const pl_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, func.target));
+        if (op_is_ptr or isByRef(payload_ty, func.target)) {
+            break :result try func.buildPointerOffset(operand, pl_offset, .new);
         }
 
-        const payload = try self.load(operand, payload_ty, pl_offset);
-        break :result try payload.toLocal(self, payload_ty);
+        const payload = try func.load(operand, payload_ty, pl_offset);
+        break :result try payload.toLocal(func, payload_ty);
     };
-    self.finishAir(inst, result, &.{ty_op.operand});
+    func.finishAir(inst, result, &.{ty_op.operand});
 }
 
-fn airUnwrapErrUnionError(self: *Self, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!void {
-    const ty_op = self.air.instructions.items(.data)[inst].ty_op;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{ty_op.operand});
+fn airUnwrapErrUnionError(func: *CodeGen, inst: Air.Inst.Index, op_is_ptr: bool) InnerError!void {
+    const ty_op = func.air.instructions.items(.data)[inst].ty_op;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
 
-    const operand = try self.resolveInst(ty_op.operand);
-    const op_ty = self.air.typeOf(ty_op.operand);
+    const operand = try func.resolveInst(ty_op.operand);
+    const op_ty = func.air.typeOf(ty_op.operand);
     const err_ty = if (op_is_ptr) op_ty.childType() else op_ty;
     const payload_ty = err_ty.errorUnionPayload();
 
@@ -3403,94 +3401,94 @@ fn airUnwrapErrUnionError(self: *Self, inst: Air.Inst.Index, op_is_ptr: bool) In
         }
 
         if (op_is_ptr or !payload_ty.hasRuntimeBitsIgnoreComptime()) {
-            break :result self.reuseOperand(ty_op.operand, operand);
+            break :result func.reuseOperand(ty_op.operand, operand);
         }
 
-        const error_val = try self.load(operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(payload_ty, self.target)));
-        break :result try error_val.toLocal(self, Type.anyerror);
+        const error_val = try func.load(operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(payload_ty, func.target)));
+        break :result try error_val.toLocal(func, Type.anyerror);
     };
-    self.finishAir(inst, result, &.{ty_op.operand});
+    func.finishAir(inst, result, &.{ty_op.operand});
 }
 
-fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const ty_op = self.air.instructions.items(.data)[inst].ty_op;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{ty_op.operand});
+fn airWrapErrUnionPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_op = func.air.instructions.items(.data)[inst].ty_op;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
 
-    const operand = try self.resolveInst(ty_op.operand);
-    const err_ty = self.air.typeOfIndex(inst);
+    const operand = try func.resolveInst(ty_op.operand);
+    const err_ty = func.air.typeOfIndex(inst);
 
-    const pl_ty = self.air.typeOf(ty_op.operand);
+    const pl_ty = func.air.typeOf(ty_op.operand);
     const result = result: {
         if (!pl_ty.hasRuntimeBitsIgnoreComptime()) {
-            break :result self.reuseOperand(ty_op.operand, operand);
+            break :result func.reuseOperand(ty_op.operand, operand);
         }
 
-        const err_union = try self.allocStack(err_ty);
-        const payload_ptr = try self.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, self.target)), .new);
-        try self.store(payload_ptr, operand, pl_ty, 0);
+        const err_union = try func.allocStack(err_ty);
+        const payload_ptr = try func.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, func.target)), .new);
+        try func.store(payload_ptr, operand, pl_ty, 0);
 
         // ensure we also write '0' to the error part, so any present stack value gets overwritten by it.
-        try self.emitWValue(err_union);
-        try self.addImm32(0);
-        const err_val_offset = @intCast(u32, errUnionErrorOffset(pl_ty, self.target));
-        try self.addMemArg(.i32_store16, .{ .offset = err_union.offset() + err_val_offset, .alignment = 2 });
+        try func.emitWValue(err_union);
+        try func.addImm32(0);
+        const err_val_offset = @intCast(u32, errUnionErrorOffset(pl_ty, func.target));
+        try func.addMemArg(.i32_store16, .{ .offset = err_union.offset() + err_val_offset, .alignment = 2 });
         break :result err_union;
     };
-    self.finishAir(inst, result, &.{ty_op.operand});
+    func.finishAir(inst, result, &.{ty_op.operand});
 }
 
-fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const ty_op = self.air.instructions.items(.data)[inst].ty_op;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{ty_op.operand});
+fn airWrapErrUnionErr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_op = func.air.instructions.items(.data)[inst].ty_op;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
 
-    const operand = try self.resolveInst(ty_op.operand);
-    const err_ty = self.air.getRefType(ty_op.ty);
+    const operand = try func.resolveInst(ty_op.operand);
+    const err_ty = func.air.getRefType(ty_op.ty);
     const pl_ty = err_ty.errorUnionPayload();
 
     const result = result: {
         if (!pl_ty.hasRuntimeBitsIgnoreComptime()) {
-            break :result self.reuseOperand(ty_op.operand, operand);
+            break :result func.reuseOperand(ty_op.operand, operand);
         }
 
-        const err_union = try self.allocStack(err_ty);
+        const err_union = try func.allocStack(err_ty);
         // store error value
-        try self.store(err_union, operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(pl_ty, self.target)));
+        try func.store(err_union, operand, Type.anyerror, @intCast(u32, errUnionErrorOffset(pl_ty, func.target)));
 
         // write 'undefined' to the payload
-        const payload_ptr = try self.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, self.target)), .new);
-        const len = @intCast(u32, err_ty.errorUnionPayload().abiSize(self.target));
-        try self.memset(payload_ptr, .{ .imm32 = len }, .{ .imm32 = 0xaaaaaaaa });
+        const payload_ptr = try func.buildPointerOffset(err_union, @intCast(u32, errUnionPayloadOffset(pl_ty, func.target)), .new);
+        const len = @intCast(u32, err_ty.errorUnionPayload().abiSize(func.target));
+        try func.memset(payload_ptr, .{ .imm32 = len }, .{ .imm32 = 0xaaaaaaaa });
 
         break :result err_union;
     };
-    self.finishAir(inst, result, &.{ty_op.operand});
+    func.finishAir(inst, result, &.{ty_op.operand});
 }
 
-fn airIntcast(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const ty_op = self.air.instructions.items(.data)[inst].ty_op;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{ty_op.operand});
+fn airIntcast(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_op = func.air.instructions.items(.data)[inst].ty_op;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
 
-    const ty = self.air.getRefType(ty_op.ty);
-    const operand = try self.resolveInst(ty_op.operand);
-    const operand_ty = self.air.typeOf(ty_op.operand);
+    const ty = func.air.getRefType(ty_op.ty);
+    const operand = try func.resolveInst(ty_op.operand);
+    const operand_ty = func.air.typeOf(ty_op.operand);
     if (ty.zigTypeTag() == .Vector or operand_ty.zigTypeTag() == .Vector) {
-        return self.fail("todo Wasm intcast for vectors", .{});
+        return func.fail("todo Wasm intcast for vectors", .{});
     }
-    if (ty.abiSize(self.target) > 16 or operand_ty.abiSize(self.target) > 16) {
-        return self.fail("todo Wasm intcast for bitsize > 128", .{});
+    if (ty.abiSize(func.target) > 16 or operand_ty.abiSize(func.target) > 16) {
+        return func.fail("todo Wasm intcast for bitsize > 128", .{});
     }
 
-    const result = try (try self.intcast(operand, operand_ty, ty)).toLocal(self, ty);
-    self.finishAir(inst, result, &.{});
+    const result = try (try func.intcast(operand, operand_ty, ty)).toLocal(func, ty);
+    func.finishAir(inst, result, &.{});
 }
 
 /// Upcasts or downcasts an integer based on the given and wanted types,
 /// and stores the result in a new operand.
 /// Asserts type's bitsize <= 128
 /// NOTE: May leave the result on the top of the stack.
-fn intcast(self: *Self, operand: WValue, given: Type, wanted: Type) InnerError!WValue {
-    const given_info = given.intInfo(self.target);
-    const wanted_info = wanted.intInfo(self.target);
+fn intcast(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!WValue {
+    const given_info = given.intInfo(func.target);
+    const wanted_info = wanted.intInfo(func.target);
     assert(given_info.bits <= 128);
     assert(wanted_info.bits <= 128);
 
@@ -3499,463 +3497,463 @@ fn intcast(self: *Self, operand: WValue, given: Type, wanted: Type) InnerError!W
     if (op_bits == wanted_bits) return operand;
 
     if (op_bits > 32 and op_bits <= 64 and wanted_bits == 32) {
-        try self.emitWValue(operand);
-        try self.addTag(.i32_wrap_i64);
+        try func.emitWValue(operand);
+        try func.addTag(.i32_wrap_i64);
     } else if (op_bits == 32 and wanted_bits > 32 and wanted_bits <= 64) {
-        try self.emitWValue(operand);
-        try self.addTag(switch (wanted_info.signedness) {
+        try func.emitWValue(operand);
+        try func.addTag(switch (wanted_info.signedness) {
             .signed => .i64_extend_i32_s,
             .unsigned => .i64_extend_i32_u,
         });
     } else if (wanted_bits == 128) {
         // for 128bit integers we store the integer in the virtual stack, rather than a local
-        const stack_ptr = try self.allocStack(wanted);
-        try self.emitWValue(stack_ptr);
+        const stack_ptr = try func.allocStack(wanted);
+        try func.emitWValue(stack_ptr);
 
         // for 32 bit integers, we first coerce the value into a 64 bit integer before storing it
         // meaning less store operations are required.
         const lhs = if (op_bits == 32) blk: {
-            break :blk try self.intcast(operand, given, if (wanted.isSignedInt()) Type.i64 else Type.u64);
+            break :blk try func.intcast(operand, given, if (wanted.isSignedInt()) Type.i64 else Type.u64);
         } else operand;
 
         // store msb first
-        try self.store(.{ .stack = {} }, lhs, Type.u64, 0 + stack_ptr.offset());
+        try func.store(.{ .stack = {} }, lhs, Type.u64, 0 + stack_ptr.offset());
 
         // For signed integers we shift msb by 63 (64bit integer - 1 sign bit) and store remaining value
         if (wanted.isSignedInt()) {
-            try self.emitWValue(stack_ptr);
-            const shr = try self.binOp(lhs, .{ .imm64 = 63 }, Type.i64, .shr);
-            try self.store(.{ .stack = {} }, shr, Type.u64, 8 + stack_ptr.offset());
+            try func.emitWValue(stack_ptr);
+            const shr = try func.binOp(lhs, .{ .imm64 = 63 }, Type.i64, .shr);
+            try func.store(.{ .stack = {} }, shr, Type.u64, 8 + stack_ptr.offset());
         } else {
             // Ensure memory of lsb is zero'd
-            try self.store(stack_ptr, .{ .imm64 = 0 }, Type.u64, 8);
+            try func.store(stack_ptr, .{ .imm64 = 0 }, Type.u64, 8);
         }
         return stack_ptr;
-    } else return self.load(operand, wanted, 0);
+    } else return func.load(operand, wanted, 0);
 
     return WValue{ .stack = {} };
 }
 
-fn airIsNull(self: *Self, inst: Air.Inst.Index, opcode: wasm.Opcode, op_kind: enum { value, ptr }) InnerError!void {
-    const un_op = self.air.instructions.items(.data)[inst].un_op;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{un_op});
-    const operand = try self.resolveInst(un_op);
+fn airIsNull(func: *CodeGen, inst: Air.Inst.Index, opcode: wasm.Opcode, op_kind: enum { value, ptr }) InnerError!void {
+    const un_op = func.air.instructions.items(.data)[inst].un_op;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{un_op});
+    const operand = try func.resolveInst(un_op);
 
-    const op_ty = self.air.typeOf(un_op);
+    const op_ty = func.air.typeOf(un_op);
     const optional_ty = if (op_kind == .ptr) op_ty.childType() else op_ty;
-    const is_null = try self.isNull(operand, optional_ty, opcode);
-    const result = try is_null.toLocal(self, optional_ty);
-    self.finishAir(inst, result, &.{un_op});
+    const is_null = try func.isNull(operand, optional_ty, opcode);
+    const result = try is_null.toLocal(func, optional_ty);
+    func.finishAir(inst, result, &.{un_op});
 }
 
 /// For a given type and operand, checks if it's considered `null`.
 /// NOTE: Leaves the result on the stack
-fn isNull(self: *Self, operand: WValue, optional_ty: Type, opcode: wasm.Opcode) InnerError!WValue {
-    try self.emitWValue(operand);
+fn isNull(func: *CodeGen, operand: WValue, optional_ty: Type, opcode: wasm.Opcode) InnerError!WValue {
+    try func.emitWValue(operand);
     if (!optional_ty.optionalReprIsPayload()) {
         var buf: Type.Payload.ElemType = undefined;
         const payload_ty = optional_ty.optionalChild(&buf);
         // When payload is zero-bits, we can treat operand as a value, rather than
         // a pointer to the stack value
         if (payload_ty.hasRuntimeBitsIgnoreComptime()) {
-            try self.addMemArg(.i32_load8_u, .{ .offset = operand.offset(), .alignment = 1 });
+            try func.addMemArg(.i32_load8_u, .{ .offset = operand.offset(), .alignment = 1 });
         }
     }
 
     // Compare the null value with '0'
-    try self.addImm32(0);
-    try self.addTag(Mir.Inst.Tag.fromOpcode(opcode));
+    try func.addImm32(0);
+    try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
 
     return WValue{ .stack = {} };
 }
 
-fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const ty_op = self.air.instructions.items(.data)[inst].ty_op;
-    const opt_ty = self.air.typeOf(ty_op.operand);
-    const payload_ty = self.air.typeOfIndex(inst);
-    if (self.liveness.isUnused(inst) or !payload_ty.hasRuntimeBitsIgnoreComptime()) {
-        return self.finishAir(inst, .none, &.{ty_op.operand});
+fn airOptionalPayload(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_op = func.air.instructions.items(.data)[inst].ty_op;
+    const opt_ty = func.air.typeOf(ty_op.operand);
+    const payload_ty = func.air.typeOfIndex(inst);
+    if (func.liveness.isUnused(inst) or !payload_ty.hasRuntimeBitsIgnoreComptime()) {
+        return func.finishAir(inst, .none, &.{ty_op.operand});
     }
 
     const result = result: {
-        const operand = try self.resolveInst(ty_op.operand);
-        if (opt_ty.optionalReprIsPayload()) break :result self.reuseOperand(ty_op.operand, operand);
+        const operand = try func.resolveInst(ty_op.operand);
+        if (opt_ty.optionalReprIsPayload()) break :result func.reuseOperand(ty_op.operand, operand);
 
-        const offset = opt_ty.abiSize(self.target) - payload_ty.abiSize(self.target);
+        const offset = opt_ty.abiSize(func.target) - payload_ty.abiSize(func.target);
 
-        if (isByRef(payload_ty, self.target)) {
-            break :result try self.buildPointerOffset(operand, offset, .new);
+        if (isByRef(payload_ty, func.target)) {
+            break :result try func.buildPointerOffset(operand, offset, .new);
         }
 
-        const payload = try self.load(operand, payload_ty, @intCast(u32, offset));
-        break :result try payload.toLocal(self, payload_ty);
+        const payload = try func.load(operand, payload_ty, @intCast(u32, offset));
+        break :result try payload.toLocal(func, payload_ty);
     };
-    self.finishAir(inst, result, &.{ty_op.operand});
+    func.finishAir(inst, result, &.{ty_op.operand});
 }
 
-fn airOptionalPayloadPtr(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const ty_op = self.air.instructions.items(.data)[inst].ty_op;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{ty_op.operand});
-    const operand = try self.resolveInst(ty_op.operand);
-    const opt_ty = self.air.typeOf(ty_op.operand).childType();
+fn airOptionalPayloadPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_op = func.air.instructions.items(.data)[inst].ty_op;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
+    const operand = try func.resolveInst(ty_op.operand);
+    const opt_ty = func.air.typeOf(ty_op.operand).childType();
 
     const result = result: {
         var buf: Type.Payload.ElemType = undefined;
         const payload_ty = opt_ty.optionalChild(&buf);
         if (!payload_ty.hasRuntimeBitsIgnoreComptime() or opt_ty.optionalReprIsPayload()) {
-            break :result self.reuseOperand(ty_op.operand, operand);
+            break :result func.reuseOperand(ty_op.operand, operand);
         }
 
-        const offset = opt_ty.abiSize(self.target) - payload_ty.abiSize(self.target);
-        break :result try self.buildPointerOffset(operand, offset, .new);
+        const offset = opt_ty.abiSize(func.target) - payload_ty.abiSize(func.target);
+        break :result try func.buildPointerOffset(operand, offset, .new);
     };
-    self.finishAir(inst, result, &.{ty_op.operand});
+    func.finishAir(inst, result, &.{ty_op.operand});
 }
 
-fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const ty_op = self.air.instructions.items(.data)[inst].ty_op;
-    const operand = try self.resolveInst(ty_op.operand);
-    const opt_ty = self.air.typeOf(ty_op.operand).childType();
+fn airOptionalPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_op = func.air.instructions.items(.data)[inst].ty_op;
+    const operand = try func.resolveInst(ty_op.operand);
+    const opt_ty = func.air.typeOf(ty_op.operand).childType();
     var buf: Type.Payload.ElemType = undefined;
     const payload_ty = opt_ty.optionalChild(&buf);
     if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
-        return self.fail("TODO: Implement OptionalPayloadPtrSet for optional with zero-sized type {}", .{payload_ty.fmtDebug()});
+        return func.fail("TODO: Implement OptionalPayloadPtrSet for optional with zero-sized type {}", .{payload_ty.fmtDebug()});
     }
 
     if (opt_ty.optionalReprIsPayload()) {
-        return self.finishAir(inst, operand, &.{ty_op.operand});
+        return func.finishAir(inst, operand, &.{ty_op.operand});
     }
 
-    const offset = std.math.cast(u32, opt_ty.abiSize(self.target) - payload_ty.abiSize(self.target)) orelse {
-        const module = self.bin_file.base.options.module.?;
-        return self.fail("Optional type {} too big to fit into stack frame", .{opt_ty.fmt(module)});
+    const offset = std.math.cast(u32, opt_ty.abiSize(func.target) - payload_ty.abiSize(func.target)) orelse {
+        const module = func.bin_file.base.options.module.?;
+        return func.fail("Optional type {} too big to fit into stack frame", .{opt_ty.fmt(module)});
     };
 
-    try self.emitWValue(operand);
-    try self.addImm32(1);
-    try self.addMemArg(.i32_store8, .{ .offset = operand.offset(), .alignment = 1 });
+    try func.emitWValue(operand);
+    try func.addImm32(1);
+    try func.addMemArg(.i32_store8, .{ .offset = operand.offset(), .alignment = 1 });
 
-    const result = try self.buildPointerOffset(operand, offset, .new);
-    return self.finishAir(inst, result, &.{ty_op.operand});
+    const result = try func.buildPointerOffset(operand, offset, .new);
+    return func.finishAir(inst, result, &.{ty_op.operand});
 }
 
-fn airWrapOptional(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const ty_op = self.air.instructions.items(.data)[inst].ty_op;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{ty_op.operand});
-    const payload_ty = self.air.typeOf(ty_op.operand);
+fn airWrapOptional(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_op = func.air.instructions.items(.data)[inst].ty_op;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
+    const payload_ty = func.air.typeOf(ty_op.operand);
 
     const result = result: {
         if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
-            const non_null_bit = try self.allocStack(Type.initTag(.u1));
-            try self.emitWValue(non_null_bit);
-            try self.addImm32(1);
-            try self.addMemArg(.i32_store8, .{ .offset = non_null_bit.offset(), .alignment = 1 });
+            const non_null_bit = try func.allocStack(Type.initTag(.u1));
+            try func.emitWValue(non_null_bit);
+            try func.addImm32(1);
+            try func.addMemArg(.i32_store8, .{ .offset = non_null_bit.offset(), .alignment = 1 });
             break :result non_null_bit;
         }
 
-        const operand = try self.resolveInst(ty_op.operand);
-        const op_ty = self.air.typeOfIndex(inst);
+        const operand = try func.resolveInst(ty_op.operand);
+        const op_ty = func.air.typeOfIndex(inst);
         if (op_ty.optionalReprIsPayload()) {
-            break :result self.reuseOperand(ty_op.operand, operand);
+            break :result func.reuseOperand(ty_op.operand, operand);
         }
-        const offset = std.math.cast(u32, op_ty.abiSize(self.target) - payload_ty.abiSize(self.target)) orelse {
-            const module = self.bin_file.base.options.module.?;
-            return self.fail("Optional type {} too big to fit into stack frame", .{op_ty.fmt(module)});
+        const offset = std.math.cast(u32, op_ty.abiSize(func.target) - payload_ty.abiSize(func.target)) orelse {
+            const module = func.bin_file.base.options.module.?;
+            return func.fail("Optional type {} too big to fit into stack frame", .{op_ty.fmt(module)});
         };
 
         // Create optional type, set the non-null bit, and store the operand inside the optional type
-        const result_ptr = try self.allocStack(op_ty);
-        try self.emitWValue(result_ptr);
-        try self.addImm32(1);
-        try self.addMemArg(.i32_store8, .{ .offset = result_ptr.offset(), .alignment = 1 });
+        const result_ptr = try func.allocStack(op_ty);
+        try func.emitWValue(result_ptr);
+        try func.addImm32(1);
+        try func.addMemArg(.i32_store8, .{ .offset = result_ptr.offset(), .alignment = 1 });
 
-        const payload_ptr = try self.buildPointerOffset(result_ptr, offset, .new);
-        try self.store(payload_ptr, operand, payload_ty, 0);
+        const payload_ptr = try func.buildPointerOffset(result_ptr, offset, .new);
+        try func.store(payload_ptr, operand, payload_ty, 0);
         break :result result_ptr;
     };
 
-    self.finishAir(inst, result, &.{ty_op.operand});
+    func.finishAir(inst, result, &.{ty_op.operand});
 }
 
-fn airSlice(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
-    const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
+fn airSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
+    const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
 
-    const lhs = try self.resolveInst(bin_op.lhs);
-    const rhs = try self.resolveInst(bin_op.rhs);
-    const slice_ty = self.air.typeOfIndex(inst);
+    const lhs = try func.resolveInst(bin_op.lhs);
+    const rhs = try func.resolveInst(bin_op.rhs);
+    const slice_ty = func.air.typeOfIndex(inst);
 
-    const slice = try self.allocStack(slice_ty);
-    try self.store(slice, lhs, Type.usize, 0);
-    try self.store(slice, rhs, Type.usize, self.ptrSize());
+    const slice = try func.allocStack(slice_ty);
+    try func.store(slice, lhs, Type.usize, 0);
+    try func.store(slice, rhs, Type.usize, func.ptrSize());
 
-    self.finishAir(inst, slice, &.{ bin_op.lhs, bin_op.rhs });
+    func.finishAir(inst, slice, &.{ bin_op.lhs, bin_op.rhs });
 }
 
-fn airSliceLen(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const ty_op = self.air.instructions.items(.data)[inst].ty_op;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{ty_op.operand});
+fn airSliceLen(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_op = func.air.instructions.items(.data)[inst].ty_op;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
 
-    const operand = try self.resolveInst(ty_op.operand);
-    const len = try self.load(operand, Type.usize, self.ptrSize());
-    const result = try len.toLocal(self, Type.usize);
-    self.finishAir(inst, result, &.{ty_op.operand});
+    const operand = try func.resolveInst(ty_op.operand);
+    const len = try func.load(operand, Type.usize, func.ptrSize());
+    const result = try len.toLocal(func, Type.usize);
+    func.finishAir(inst, result, &.{ty_op.operand});
 }
 
-fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const bin_op = self.air.instructions.items(.data)[inst].bin_op;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
+fn airSliceElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const bin_op = func.air.instructions.items(.data)[inst].bin_op;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
 
-    const slice_ty = self.air.typeOf(bin_op.lhs);
-    const slice = try self.resolveInst(bin_op.lhs);
-    const index = try self.resolveInst(bin_op.rhs);
+    const slice_ty = func.air.typeOf(bin_op.lhs);
+    const slice = try func.resolveInst(bin_op.lhs);
+    const index = try func.resolveInst(bin_op.rhs);
     const elem_ty = slice_ty.childType();
-    const elem_size = elem_ty.abiSize(self.target);
+    const elem_size = elem_ty.abiSize(func.target);
 
     // load pointer onto stack
-    _ = try self.load(slice, Type.usize, 0);
+    _ = try func.load(slice, Type.usize, 0);
 
     // calculate index into slice
-    try self.emitWValue(index);
-    try self.addImm32(@bitCast(i32, @intCast(u32, elem_size)));
-    try self.addTag(.i32_mul);
-    try self.addTag(.i32_add);
+    try func.emitWValue(index);
+    try func.addImm32(@bitCast(i32, @intCast(u32, elem_size)));
+    try func.addTag(.i32_mul);
+    try func.addTag(.i32_add);
 
-    const result_ptr = try self.allocLocal(elem_ty);
-    try self.addLabel(.local_set, result_ptr.local.value);
+    const result_ptr = try func.allocLocal(elem_ty);
+    try func.addLabel(.local_set, result_ptr.local.value);
 
-    const result = if (!isByRef(elem_ty, self.target)) result: {
-        const elem_val = try self.load(result_ptr, elem_ty, 0);
-        break :result try elem_val.toLocal(self, elem_ty);
+    const result = if (!isByRef(elem_ty, func.target)) result: {
+        const elem_val = try func.load(result_ptr, elem_ty, 0);
+        break :result try elem_val.toLocal(func, elem_ty);
     } else result_ptr;
 
-    self.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
+    func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
 }
 
-fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
-    const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
+fn airSliceElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
+    const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
 
-    const elem_ty = self.air.getRefType(ty_pl.ty).childType();
-    const elem_size = elem_ty.abiSize(self.target);
+    const elem_ty = func.air.getRefType(ty_pl.ty).childType();
+    const elem_size = elem_ty.abiSize(func.target);
 
-    const slice = try self.resolveInst(bin_op.lhs);
-    const index = try self.resolveInst(bin_op.rhs);
+    const slice = try func.resolveInst(bin_op.lhs);
+    const index = try func.resolveInst(bin_op.rhs);
 
-    _ = try self.load(slice, Type.usize, 0);
+    _ = try func.load(slice, Type.usize, 0);
 
     // calculate index into slice
-    try self.emitWValue(index);
-    try self.addImm32(@bitCast(i32, @intCast(u32, elem_size)));
-    try self.addTag(.i32_mul);
-    try self.addTag(.i32_add);
+    try func.emitWValue(index);
+    try func.addImm32(@bitCast(i32, @intCast(u32, elem_size)));
+    try func.addTag(.i32_mul);
+    try func.addTag(.i32_add);
 
-    const result = try self.allocLocal(Type.i32);
-    try self.addLabel(.local_set, result.local.value);
-    self.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
+    const result = try func.allocLocal(Type.i32);
+    try func.addLabel(.local_set, result.local.value);
+    func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
 }
 
-fn airSlicePtr(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const ty_op = self.air.instructions.items(.data)[inst].ty_op;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{ty_op.operand});
-    const operand = try self.resolveInst(ty_op.operand);
-    const ptr = try self.load(operand, Type.usize, 0);
-    const result = try ptr.toLocal(self, Type.usize);
-    self.finishAir(inst, result, &.{ty_op.operand});
+fn airSlicePtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_op = func.air.instructions.items(.data)[inst].ty_op;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
+    const operand = try func.resolveInst(ty_op.operand);
+    const ptr = try func.load(operand, Type.usize, 0);
+    const result = try ptr.toLocal(func, Type.usize);
+    func.finishAir(inst, result, &.{ty_op.operand});
 }
 
-fn airTrunc(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const ty_op = self.air.instructions.items(.data)[inst].ty_op;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{ty_op.operand});
+fn airTrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_op = func.air.instructions.items(.data)[inst].ty_op;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
 
-    const operand = try self.resolveInst(ty_op.operand);
-    const wanted_ty = self.air.getRefType(ty_op.ty);
-    const op_ty = self.air.typeOf(ty_op.operand);
+    const operand = try func.resolveInst(ty_op.operand);
+    const wanted_ty = func.air.getRefType(ty_op.ty);
+    const op_ty = func.air.typeOf(ty_op.operand);
 
-    const int_info = op_ty.intInfo(self.target);
+    const int_info = op_ty.intInfo(func.target);
     if (toWasmBits(int_info.bits) == null) {
-        return self.fail("TODO: Implement wasm integer truncation for integer bitsize: {d}", .{int_info.bits});
+        return func.fail("TODO: Implement wasm integer truncation for integer bitsize: {d}", .{int_info.bits});
     }
 
-    var result = try self.intcast(operand, op_ty, wanted_ty);
-    const wanted_bits = wanted_ty.intInfo(self.target).bits;
+    var result = try func.intcast(operand, op_ty, wanted_ty);
+    const wanted_bits = wanted_ty.intInfo(func.target).bits;
     const wasm_bits = toWasmBits(wanted_bits).?;
     if (wasm_bits != wanted_bits) {
-        result = try self.wrapOperand(result, wanted_ty);
+        result = try func.wrapOperand(result, wanted_ty);
     }
 
-    self.finishAir(inst, try result.toLocal(self, wanted_ty), &.{ty_op.operand});
+    func.finishAir(inst, try result.toLocal(func, wanted_ty), &.{ty_op.operand});
 }
 
-fn airBoolToInt(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const un_op = self.air.instructions.items(.data)[inst].un_op;
-    const result = if (self.liveness.isUnused(inst))
+fn airBoolToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const un_op = func.air.instructions.items(.data)[inst].un_op;
+    const result = if (func.liveness.isUnused(inst))
         WValue{ .none = {} }
     else result: {
-        const operand = try self.resolveInst(un_op);
-        break :result self.reuseOperand(un_op, operand);
+        const operand = try func.resolveInst(un_op);
+        break :result func.reuseOperand(un_op, operand);
     };
 
-    self.finishAir(inst, result, &.{un_op});
+    func.finishAir(inst, result, &.{un_op});
 }
 
-fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const ty_op = self.air.instructions.items(.data)[inst].ty_op;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{ty_op.operand});
+fn airArrayToSlice(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_op = func.air.instructions.items(.data)[inst].ty_op;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
 
-    const operand = try self.resolveInst(ty_op.operand);
-    const array_ty = self.air.typeOf(ty_op.operand).childType();
-    const slice_ty = self.air.getRefType(ty_op.ty);
+    const operand = try func.resolveInst(ty_op.operand);
+    const array_ty = func.air.typeOf(ty_op.operand).childType();
+    const slice_ty = func.air.getRefType(ty_op.ty);
 
     // create a slice on the stack
-    const slice_local = try self.allocStack(slice_ty);
+    const slice_local = try func.allocStack(slice_ty);
 
     // store the array ptr in the slice
     if (array_ty.hasRuntimeBitsIgnoreComptime()) {
-        try self.store(slice_local, operand, Type.usize, 0);
+        try func.store(slice_local, operand, Type.usize, 0);
     }
 
     // store the length of the array in the slice
     const len = WValue{ .imm32 = @intCast(u32, array_ty.arrayLen()) };
-    try self.store(slice_local, len, Type.usize, self.ptrSize());
+    try func.store(slice_local, len, Type.usize, func.ptrSize());
 
-    self.finishAir(inst, slice_local, &.{ty_op.operand});
+    func.finishAir(inst, slice_local, &.{ty_op.operand});
 }
 
-fn airPtrToInt(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const un_op = self.air.instructions.items(.data)[inst].un_op;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{un_op});
-    const operand = try self.resolveInst(un_op);
+fn airPtrToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const un_op = func.air.instructions.items(.data)[inst].un_op;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{un_op});
+    const operand = try func.resolveInst(un_op);
 
     const result = switch (operand) {
         // for stack offset, return a pointer to this offset.
-        .stack_offset => try self.buildPointerOffset(operand, 0, .new),
-        else => self.reuseOperand(un_op, operand),
+        .stack_offset => try func.buildPointerOffset(operand, 0, .new),
+        else => func.reuseOperand(un_op, operand),
     };
-    self.finishAir(inst, result, &.{un_op});
+    func.finishAir(inst, result, &.{un_op});
 }
 
-fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const bin_op = self.air.instructions.items(.data)[inst].bin_op;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
+fn airPtrElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const bin_op = func.air.instructions.items(.data)[inst].bin_op;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
 
-    const ptr_ty = self.air.typeOf(bin_op.lhs);
-    const ptr = try self.resolveInst(bin_op.lhs);
-    const index = try self.resolveInst(bin_op.rhs);
+    const ptr_ty = func.air.typeOf(bin_op.lhs);
+    const ptr = try func.resolveInst(bin_op.lhs);
+    const index = try func.resolveInst(bin_op.rhs);
     const elem_ty = ptr_ty.childType();
-    const elem_size = elem_ty.abiSize(self.target);
+    const elem_size = elem_ty.abiSize(func.target);
 
     // load pointer onto the stack
     if (ptr_ty.isSlice()) {
-        _ = try self.load(ptr, Type.usize, 0);
+        _ = try func.load(ptr, Type.usize, 0);
     } else {
-        try self.lowerToStack(ptr);
+        try func.lowerToStack(ptr);
     }
 
     // calculate index into slice
-    try self.emitWValue(index);
-    try self.addImm32(@bitCast(i32, @intCast(u32, elem_size)));
-    try self.addTag(.i32_mul);
-    try self.addTag(.i32_add);
+    try func.emitWValue(index);
+    try func.addImm32(@bitCast(i32, @intCast(u32, elem_size)));
+    try func.addTag(.i32_mul);
+    try func.addTag(.i32_add);
 
     const elem_result = val: {
-        var result = try self.allocLocal(elem_ty);
-        try self.addLabel(.local_set, result.local.value);
-        if (isByRef(elem_ty, self.target)) {
+        var result = try func.allocLocal(elem_ty);
+        try func.addLabel(.local_set, result.local.value);
+        if (isByRef(elem_ty, func.target)) {
             break :val result;
         }
-        defer result.free(self); // only free if it's not returned like above
+        defer result.free(func); // only free if it's not returned like above
 
-        const elem_val = try self.load(result, elem_ty, 0);
-        break :val try elem_val.toLocal(self, elem_ty);
+        const elem_val = try func.load(result, elem_ty, 0);
+        break :val try elem_val.toLocal(func, elem_ty);
     };
-    self.finishAir(inst, elem_result, &.{ bin_op.lhs, bin_op.rhs });
+    func.finishAir(inst, elem_result, &.{ bin_op.lhs, bin_op.rhs });
 }
 
-fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
-    const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
+fn airPtrElemPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
+    const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
 
-    const ptr_ty = self.air.typeOf(bin_op.lhs);
-    const elem_ty = self.air.getRefType(ty_pl.ty).childType();
-    const elem_size = elem_ty.abiSize(self.target);
+    const ptr_ty = func.air.typeOf(bin_op.lhs);
+    const elem_ty = func.air.getRefType(ty_pl.ty).childType();
+    const elem_size = elem_ty.abiSize(func.target);
 
-    const ptr = try self.resolveInst(bin_op.lhs);
-    const index = try self.resolveInst(bin_op.rhs);
+    const ptr = try func.resolveInst(bin_op.lhs);
+    const index = try func.resolveInst(bin_op.rhs);
 
     // load pointer onto the stack
     if (ptr_ty.isSlice()) {
-        _ = try self.load(ptr, Type.usize, 0);
+        _ = try func.load(ptr, Type.usize, 0);
     } else {
-        try self.lowerToStack(ptr);
+        try func.lowerToStack(ptr);
     }
 
     // calculate index into ptr
-    try self.emitWValue(index);
-    try self.addImm32(@bitCast(i32, @intCast(u32, elem_size)));
-    try self.addTag(.i32_mul);
-    try self.addTag(.i32_add);
+    try func.emitWValue(index);
+    try func.addImm32(@bitCast(i32, @intCast(u32, elem_size)));
+    try func.addTag(.i32_mul);
+    try func.addTag(.i32_add);
 
-    const result = try self.allocLocal(Type.i32);
-    try self.addLabel(.local_set, result.local.value);
-    self.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
+    const result = try func.allocLocal(Type.i32);
+    try func.addLabel(.local_set, result.local.value);
+    func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
 }
 
-fn airPtrBinOp(self: *Self, inst: Air.Inst.Index, op: Op) InnerError!void {
-    const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
-    const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
+fn airPtrBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
+    const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
+    const bin_op = func.air.extraData(Air.Bin, ty_pl.payload).data;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
 
-    const ptr = try self.resolveInst(bin_op.lhs);
-    const offset = try self.resolveInst(bin_op.rhs);
-    const ptr_ty = self.air.typeOf(bin_op.lhs);
+    const ptr = try func.resolveInst(bin_op.lhs);
+    const offset = try func.resolveInst(bin_op.rhs);
+    const ptr_ty = func.air.typeOf(bin_op.lhs);
     const pointee_ty = switch (ptr_ty.ptrSize()) {
         .One => ptr_ty.childType().childType(), // ptr to array, so get array element type
         else => ptr_ty.childType(),
     };
 
-    const valtype = typeToValtype(Type.usize, self.target);
+    const valtype = typeToValtype(Type.usize, func.target);
     const mul_opcode = buildOpcode(.{ .valtype1 = valtype, .op = .mul });
     const bin_opcode = buildOpcode(.{ .valtype1 = valtype, .op = op });
 
-    try self.lowerToStack(ptr);
-    try self.emitWValue(offset);
-    try self.addImm32(@bitCast(i32, @intCast(u32, pointee_ty.abiSize(self.target))));
-    try self.addTag(Mir.Inst.Tag.fromOpcode(mul_opcode));
-    try self.addTag(Mir.Inst.Tag.fromOpcode(bin_opcode));
+    try func.lowerToStack(ptr);
+    try func.emitWValue(offset);
+    try func.addImm32(@bitCast(i32, @intCast(u32, pointee_ty.abiSize(func.target))));
+    try func.addTag(Mir.Inst.Tag.fromOpcode(mul_opcode));
+    try func.addTag(Mir.Inst.Tag.fromOpcode(bin_opcode));
 
-    const result = try self.allocLocal(Type.usize);
-    try self.addLabel(.local_set, result.local.value);
-    self.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
+    const result = try func.allocLocal(Type.usize);
+    try func.addLabel(.local_set, result.local.value);
+    func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
 }
 
-fn airMemset(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const pl_op = self.air.instructions.items(.data)[inst].pl_op;
-    const bin_op = self.air.extraData(Air.Bin, pl_op.payload).data;
+fn airMemset(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pl_op = func.air.instructions.items(.data)[inst].pl_op;
+    const bin_op = func.air.extraData(Air.Bin, pl_op.payload).data;
 
-    const ptr = try self.resolveInst(pl_op.operand);
-    const value = try self.resolveInst(bin_op.lhs);
-    const len = try self.resolveInst(bin_op.rhs);
-    try self.memset(ptr, len, value);
+    const ptr = try func.resolveInst(pl_op.operand);
+    const value = try func.resolveInst(bin_op.lhs);
+    const len = try func.resolveInst(bin_op.rhs);
+    try func.memset(ptr, len, value);
 
-    self.finishAir(inst, .none, &.{pl_op.operand});
+    func.finishAir(inst, .none, &.{pl_op.operand});
 }
 
 /// Sets a region of memory at `ptr` to the value of `value`
 /// When the user has enabled the bulk_memory feature, we lower
 /// this to wasm's memset instruction. When the feature is not present,
 /// we implement it manually.
-fn memset(self: *Self, ptr: WValue, len: WValue, value: WValue) InnerError!void {
+fn memset(func: *CodeGen, ptr: WValue, len: WValue, value: WValue) InnerError!void {
     // When bulk_memory is enabled, we lower it to wasm's memset instruction.
     // If not, we lower it ourselves
-    if (std.Target.wasm.featureSetHas(self.target.cpu.features, .bulk_memory)) {
-        try self.lowerToStack(ptr);
-        try self.emitWValue(value);
-        try self.emitWValue(len);
-        try self.addExtended(.memory_fill);
+    if (std.Target.wasm.featureSetHas(func.target.cpu.features, .bulk_memory)) {
+        try func.lowerToStack(ptr);
+        try func.emitWValue(value);
+        try func.emitWValue(len);
+        try func.addExtended(.memory_fill);
         return;
     }
 
@@ -3972,14 +3970,14 @@ fn memset(self: *Self, ptr: WValue, len: WValue, value: WValue) InnerError!void
             var offset: u32 = 0;
             const base = ptr.offset();
             while (offset < length) : (offset += 1) {
-                try self.emitWValue(ptr);
-                try self.emitWValue(value);
-                switch (self.arch()) {
+                try func.emitWValue(ptr);
+                try func.emitWValue(value);
+                switch (func.arch()) {
                     .wasm32 => {
-                        try self.addMemArg(.i32_store8, .{ .offset = base + offset, .alignment = 1 });
+                        try func.addMemArg(.i32_store8, .{ .offset = base + offset, .alignment = 1 });
                     },
                     .wasm64 => {
-                        try self.addMemArg(.i64_store8, .{ .offset = base + offset, .alignment = 1 });
+                        try func.addMemArg(.i64_store8, .{ .offset = base + offset, .alignment = 1 });
                     },
                     else => unreachable,
                 }
@@ -3988,378 +3986,378 @@ fn memset(self: *Self, ptr: WValue, len: WValue, value: WValue) InnerError!void
         else => {
             // TODO: We should probably lower this to a call to compiler_rt
             // But for now, we implement it manually
-            const offset = try self.ensureAllocLocal(Type.usize); // local for counter
+            const offset = try func.ensureAllocLocal(Type.usize); // local for counter
             // outer block to jump to when loop is done
-            try self.startBlock(.block, wasm.block_empty);
-            try self.startBlock(.loop, wasm.block_empty);
-            try self.emitWValue(offset);
-            try self.emitWValue(len);
-            switch (self.arch()) {
-                .wasm32 => try self.addTag(.i32_eq),
-                .wasm64 => try self.addTag(.i64_eq),
+            try func.startBlock(.block, wasm.block_empty);
+            try func.startBlock(.loop, wasm.block_empty);
+            try func.emitWValue(offset);
+            try func.emitWValue(len);
+            switch (func.arch()) {
+                .wasm32 => try func.addTag(.i32_eq),
+                .wasm64 => try func.addTag(.i64_eq),
                 else => unreachable,
             }
-            try self.addLabel(.br_if, 1); // jump out of loop into outer block (finished)
-            try self.emitWValue(ptr);
-            try self.emitWValue(offset);
-            switch (self.arch()) {
-                .wasm32 => try self.addTag(.i32_add),
-                .wasm64 => try self.addTag(.i64_add),
+            try func.addLabel(.br_if, 1); // jump out of loop into outer block (finished)
+            try func.emitWValue(ptr);
+            try func.emitWValue(offset);
+            switch (func.arch()) {
+                .wasm32 => try func.addTag(.i32_add),
+                .wasm64 => try func.addTag(.i64_add),
                 else => unreachable,
             }
-            try self.emitWValue(value);
-            const mem_store_op: Mir.Inst.Tag = switch (self.arch()) {
+            try func.emitWValue(value);
+            const mem_store_op: Mir.Inst.Tag = switch (func.arch()) {
                 .wasm32 => .i32_store8,
                 .wasm64 => .i64_store8,
                 else => unreachable,
             };
-            try self.addMemArg(mem_store_op, .{ .offset = ptr.offset(), .alignment = 1 });
-            try self.emitWValue(offset);
-            try self.addImm32(1);
-            switch (self.arch()) {
-                .wasm32 => try self.addTag(.i32_add),
-                .wasm64 => try self.addTag(.i64_add),
+            try func.addMemArg(mem_store_op, .{ .offset = ptr.offset(), .alignment = 1 });
+            try func.emitWValue(offset);
+            try func.addImm32(1);
+            switch (func.arch()) {
+                .wasm32 => try func.addTag(.i32_add),
+                .wasm64 => try func.addTag(.i64_add),
                 else => unreachable,
             }
-            try self.addLabel(.local_set, offset.local.value);
-            try self.addLabel(.br, 0); // jump to start of loop
-            try self.endBlock();
-            try self.endBlock();
+            try func.addLabel(.local_set, offset.local.value);
+            try func.addLabel(.br, 0); // jump to start of loop
+            try func.endBlock();
+            try func.endBlock();
         },
     }
 }
 
-fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const bin_op = self.air.instructions.items(.data)[inst].bin_op;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
+fn airArrayElemVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const bin_op = func.air.instructions.items(.data)[inst].bin_op;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
 
-    const array_ty = self.air.typeOf(bin_op.lhs);
-    const array = try self.resolveInst(bin_op.lhs);
-    const index = try self.resolveInst(bin_op.rhs);
+    const array_ty = func.air.typeOf(bin_op.lhs);
+    const array = try func.resolveInst(bin_op.lhs);
+    const index = try func.resolveInst(bin_op.rhs);
     const elem_ty = array_ty.childType();
-    const elem_size = elem_ty.abiSize(self.target);
+    const elem_size = elem_ty.abiSize(func.target);
 
-    try self.lowerToStack(array);
-    try self.emitWValue(index);
-    try self.addImm32(@bitCast(i32, @intCast(u32, elem_size)));
-    try self.addTag(.i32_mul);
-    try self.addTag(.i32_add);
+    try func.lowerToStack(array);
+    try func.emitWValue(index);
+    try func.addImm32(@bitCast(i32, @intCast(u32, elem_size)));
+    try func.addTag(.i32_mul);
+    try func.addTag(.i32_add);
 
     const elem_result = val: {
-        var result = try self.allocLocal(Type.usize);
-        try self.addLabel(.local_set, result.local.value);
+        var result = try func.allocLocal(Type.usize);
+        try func.addLabel(.local_set, result.local.value);
 
-        if (isByRef(elem_ty, self.target)) {
+        if (isByRef(elem_ty, func.target)) {
             break :val result;
         }
-        defer result.free(self); // only free if no longer needed and not returned like above
+        defer result.free(func); // only free if no longer needed and not returned like above
 
-        const elem_val = try self.load(result, elem_ty, 0);
-        break :val try elem_val.toLocal(self, elem_ty);
+        const elem_val = try func.load(result, elem_ty, 0);
+        break :val try elem_val.toLocal(func, elem_ty);
     };
 
-    self.finishAir(inst, elem_result, &.{ bin_op.lhs, bin_op.rhs });
+    func.finishAir(inst, elem_result, &.{ bin_op.lhs, bin_op.rhs });
 }
 
-fn airFloatToInt(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const ty_op = self.air.instructions.items(.data)[inst].ty_op;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{ty_op.operand});
+fn airFloatToInt(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_op = func.air.instructions.items(.data)[inst].ty_op;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
 
-    const operand = try self.resolveInst(ty_op.operand);
-    const dest_ty = self.air.typeOfIndex(inst);
-    const op_ty = self.air.typeOf(ty_op.operand);
+    const operand = try func.resolveInst(ty_op.operand);
+    const dest_ty = func.air.typeOfIndex(inst);
+    const op_ty = func.air.typeOf(ty_op.operand);
 
-    if (op_ty.abiSize(self.target) > 8) {
-        return self.fail("TODO: floatToInt for integers/floats with bitsize larger than 64 bits", .{});
+    if (op_ty.abiSize(func.target) > 8) {
+        return func.fail("TODO: floatToInt for integers/floats with bitsize larger than 64 bits", .{});
     }
 
-    try self.emitWValue(operand);
+    try func.emitWValue(operand);
     const op = buildOpcode(.{
         .op = .trunc,
-        .valtype1 = typeToValtype(dest_ty, self.target),
-        .valtype2 = typeToValtype(op_ty, self.target),
+        .valtype1 = typeToValtype(dest_ty, func.target),
+        .valtype2 = typeToValtype(op_ty, func.target),
         .signedness = if (dest_ty.isSignedInt()) .signed else .unsigned,
     });
-    try self.addTag(Mir.Inst.Tag.fromOpcode(op));
-    const wrapped = try self.wrapOperand(.{ .stack = {} }, dest_ty);
-    const result = try wrapped.toLocal(self, dest_ty);
-    self.finishAir(inst, result, &.{ty_op.operand});
+    try func.addTag(Mir.Inst.Tag.fromOpcode(op));
+    const wrapped = try func.wrapOperand(.{ .stack = {} }, dest_ty);
+    const result = try wrapped.toLocal(func, dest_ty);
+    func.finishAir(inst, result, &.{ty_op.operand});
 }
 
-fn airIntToFloat(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const ty_op = self.air.instructions.items(.data)[inst].ty_op;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{ty_op.operand});
+fn airIntToFloat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_op = func.air.instructions.items(.data)[inst].ty_op;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
 
-    const operand = try self.resolveInst(ty_op.operand);
-    const dest_ty = self.air.typeOfIndex(inst);
-    const op_ty = self.air.typeOf(ty_op.operand);
+    const operand = try func.resolveInst(ty_op.operand);
+    const dest_ty = func.air.typeOfIndex(inst);
+    const op_ty = func.air.typeOf(ty_op.operand);
 
-    if (op_ty.abiSize(self.target) > 8) {
-        return self.fail("TODO: intToFloat for integers/floats with bitsize larger than 64 bits", .{});
+    if (op_ty.abiSize(func.target) > 8) {
+        return func.fail("TODO: intToFloat for integers/floats with bitsize larger than 64 bits", .{});
     }
 
-    try self.emitWValue(operand);
+    try func.emitWValue(operand);
     const op = buildOpcode(.{
         .op = .convert,
-        .valtype1 = typeToValtype(dest_ty, self.target),
-        .valtype2 = typeToValtype(op_ty, self.target),
+        .valtype1 = typeToValtype(dest_ty, func.target),
+        .valtype2 = typeToValtype(op_ty, func.target),
         .signedness = if (op_ty.isSignedInt()) .signed else .unsigned,
     });
-    try self.addTag(Mir.Inst.Tag.fromOpcode(op));
+    try func.addTag(Mir.Inst.Tag.fromOpcode(op));
 
-    const result = try self.allocLocal(dest_ty);
-    try self.addLabel(.local_set, result.local.value);
-    self.finishAir(inst, result, &.{ty_op.operand});
+    const result = try func.allocLocal(dest_ty);
+    try func.addLabel(.local_set, result.local.value);
+    func.finishAir(inst, result, &.{ty_op.operand});
 }
 
-fn airSplat(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const ty_op = self.air.instructions.items(.data)[inst].ty_op;
-    const operand = try self.resolveInst(ty_op.operand);
+fn airSplat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_op = func.air.instructions.items(.data)[inst].ty_op;
+    const operand = try func.resolveInst(ty_op.operand);
 
     _ = operand;
-    return self.fail("TODO: Implement wasm airSplat", .{});
+    return func.fail("TODO: Implement wasm airSplat", .{});
 }
 
-fn airSelect(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const pl_op = self.air.instructions.items(.data)[inst].pl_op;
-    const operand = try self.resolveInst(pl_op.operand);
+fn airSelect(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pl_op = func.air.instructions.items(.data)[inst].pl_op;
+    const operand = try func.resolveInst(pl_op.operand);
 
     _ = operand;
-    return self.fail("TODO: Implement wasm airSelect", .{});
+    return func.fail("TODO: Implement wasm airSelect", .{});
 }
 
-fn airShuffle(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const ty_op = self.air.instructions.items(.data)[inst].ty_op;
-    const operand = try self.resolveInst(ty_op.operand);
+fn airShuffle(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_op = func.air.instructions.items(.data)[inst].ty_op;
+    const operand = try func.resolveInst(ty_op.operand);
 
     _ = operand;
-    return self.fail("TODO: Implement wasm airShuffle", .{});
+    return func.fail("TODO: Implement wasm airShuffle", .{});
 }
 
-fn airReduce(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const reduce = self.air.instructions.items(.data)[inst].reduce;
-    const operand = try self.resolveInst(reduce.operand);
+fn airReduce(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const reduce = func.air.instructions.items(.data)[inst].reduce;
+    const operand = try func.resolveInst(reduce.operand);
 
     _ = operand;
-    return self.fail("TODO: Implement wasm airReduce", .{});
+    return func.fail("TODO: Implement wasm airReduce", .{});
 }
 
-fn airAggregateInit(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
-    const result_ty = self.air.typeOfIndex(inst);
+fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
+    const result_ty = func.air.typeOfIndex(inst);
     const len = @intCast(usize, result_ty.arrayLen());
-    const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]);
+    const elements = @ptrCast([]const Air.Inst.Ref, func.air.extra[ty_pl.payload..][0..len]);
 
     const result: WValue = result_value: {
-        if (self.liveness.isUnused(inst)) break :result_value WValue.none;
+        if (func.liveness.isUnused(inst)) break :result_value WValue.none;
         switch (result_ty.zigTypeTag()) {
             .Array => {
-                const result = try self.allocStack(result_ty);
+                const result = try func.allocStack(result_ty);
                 const elem_ty = result_ty.childType();
-                const elem_size = @intCast(u32, elem_ty.abiSize(self.target));
+                const elem_size = @intCast(u32, elem_ty.abiSize(func.target));
 
                 // When the element type is by reference, we must copy the entire
                 // value. It is therefore safer to move the offset pointer and store
                 // each value individually, instead of using store offsets.
-                if (isByRef(elem_ty, self.target)) {
+                if (isByRef(elem_ty, func.target)) {
                     // copy stack pointer into a temporary local, which is
                     // moved for each element to store each value in the right position.
-                    const offset = try self.buildPointerOffset(result, 0, .new);
+                    const offset = try func.buildPointerOffset(result, 0, .new);
                     for (elements) |elem, elem_index| {
-                        const elem_val = try self.resolveInst(elem);
-                        try self.store(offset, elem_val, elem_ty, 0);
+                        const elem_val = try func.resolveInst(elem);
+                        try func.store(offset, elem_val, elem_ty, 0);
 
                         if (elem_index < elements.len - 1) {
-                            _ = try self.buildPointerOffset(offset, elem_size, .modify);
+                            _ = try func.buildPointerOffset(offset, elem_size, .modify);
                         }
                     }
                 } else {
                     var offset: u32 = 0;
                     for (elements) |elem| {
-                        const elem_val = try self.resolveInst(elem);
-                        try self.store(result, elem_val, elem_ty, offset);
+                        const elem_val = try func.resolveInst(elem);
+                        try func.store(result, elem_val, elem_ty, offset);
                         offset += elem_size;
                     }
                 }
                 break :result_value result;
             },
             .Struct => {
-                const result = try self.allocStack(result_ty);
-                const offset = try self.buildPointerOffset(result, 0, .new); // pointer to offset
+                const result = try func.allocStack(result_ty);
+                const offset = try func.buildPointerOffset(result, 0, .new); // pointer to offset
                 for (elements) |elem, elem_index| {
                     if (result_ty.structFieldValueComptime(elem_index) != null) continue;
 
                     const elem_ty = result_ty.structFieldType(elem_index);
-                    const elem_size = @intCast(u32, elem_ty.abiSize(self.target));
-                    const value = try self.resolveInst(elem);
-                    try self.store(offset, value, elem_ty, 0);
+                    const elem_size = @intCast(u32, elem_ty.abiSize(func.target));
+                    const value = try func.resolveInst(elem);
+                    try func.store(offset, value, elem_ty, 0);
 
                     if (elem_index < elements.len - 1) {
-                        _ = try self.buildPointerOffset(offset, elem_size, .modify);
+                        _ = try func.buildPointerOffset(offset, elem_size, .modify);
                     }
                 }
 
                 break :result_value result;
             },
-            .Vector => return self.fail("TODO: Wasm backend: implement airAggregateInit for vectors", .{}),
+            .Vector => return func.fail("TODO: Wasm backend: implement airAggregateInit for vectors", .{}),
             else => unreachable,
         }
     };
-    self.finishAir(inst, result, &.{});
+    func.finishAir(inst, result, &.{});
 }
 
-fn airUnionInit(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
-    const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{extra.init});
+fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
+    const extra = func.air.extraData(Air.UnionInit, ty_pl.payload).data;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{extra.init});
 
     const result = result: {
-        const union_ty = self.air.typeOfIndex(inst);
-        const layout = union_ty.unionGetLayout(self.target);
+        const union_ty = func.air.typeOfIndex(inst);
+        const layout = union_ty.unionGetLayout(func.target);
         if (layout.payload_size == 0) {
             if (layout.tag_size == 0) {
                 break :result WValue{ .none = {} };
             }
-            assert(!isByRef(union_ty, self.target));
+            assert(!isByRef(union_ty, func.target));
             break :result WValue{ .imm32 = extra.field_index };
         }
-        assert(isByRef(union_ty, self.target));
+        assert(isByRef(union_ty, func.target));
 
-        const result_ptr = try self.allocStack(union_ty);
-        const payload = try self.resolveInst(extra.init);
+        const result_ptr = try func.allocStack(union_ty);
+        const payload = try func.resolveInst(extra.init);
         const union_obj = union_ty.cast(Type.Payload.Union).?.data;
         assert(union_obj.haveFieldTypes());
         const field = union_obj.fields.values()[extra.field_index];
 
         if (layout.tag_align >= layout.payload_align) {
-            const payload_ptr = try self.buildPointerOffset(result_ptr, layout.tag_size, .new);
-            try self.store(payload_ptr, payload, field.ty, 0);
+            const payload_ptr = try func.buildPointerOffset(result_ptr, layout.tag_size, .new);
+            try func.store(payload_ptr, payload, field.ty, 0);
         } else {
-            try self.store(result_ptr, payload, field.ty, 0);
+            try func.store(result_ptr, payload, field.ty, 0);
         }
         break :result result_ptr;
     };
 
-    self.finishAir(inst, result, &.{extra.init});
+    func.finishAir(inst, result, &.{extra.init});
 }
 
-fn airPrefetch(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const prefetch = self.air.instructions.items(.data)[inst].prefetch;
-    self.finishAir(inst, .none, &.{prefetch.ptr});
+fn airPrefetch(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const prefetch = func.air.instructions.items(.data)[inst].prefetch;
+    func.finishAir(inst, .none, &.{prefetch.ptr});
 }
 
-fn airWasmMemorySize(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const pl_op = self.air.instructions.items(.data)[inst].pl_op;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{pl_op.operand});
+fn airWasmMemorySize(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pl_op = func.air.instructions.items(.data)[inst].pl_op;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{pl_op.operand});
 
-    const result = try self.allocLocal(self.air.typeOfIndex(inst));
-    try self.addLabel(.memory_size, pl_op.payload);
-    try self.addLabel(.local_set, result.local.value);
-    self.finishAir(inst, result, &.{pl_op.operand});
+    const result = try func.allocLocal(func.air.typeOfIndex(inst));
+    try func.addLabel(.memory_size, pl_op.payload);
+    try func.addLabel(.local_set, result.local.value);
+    func.finishAir(inst, result, &.{pl_op.operand});
 }
 
-fn airWasmMemoryGrow(self: *Self, inst: Air.Inst.Index) !void {
-    const pl_op = self.air.instructions.items(.data)[inst].pl_op;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{pl_op.operand});
+fn airWasmMemoryGrow(func: *CodeGen, inst: Air.Inst.Index) !void {
+    const pl_op = func.air.instructions.items(.data)[inst].pl_op;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{pl_op.operand});
 
-    const operand = try self.resolveInst(pl_op.operand);
-    const result = try self.allocLocal(self.air.typeOfIndex(inst));
-    try self.emitWValue(operand);
-    try self.addLabel(.memory_grow, pl_op.payload);
-    try self.addLabel(.local_set, result.local.value);
-    self.finishAir(inst, result, &.{pl_op.operand});
+    const operand = try func.resolveInst(pl_op.operand);
+    const result = try func.allocLocal(func.air.typeOfIndex(inst));
+    try func.emitWValue(operand);
+    try func.addLabel(.memory_grow, pl_op.payload);
+    try func.addLabel(.local_set, result.local.value);
+    func.finishAir(inst, result, &.{pl_op.operand});
 }
 
-fn cmpOptionals(self: *Self, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue {
+fn cmpOptionals(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue {
     assert(operand_ty.hasRuntimeBitsIgnoreComptime());
     assert(op == .eq or op == .neq);
     var buf: Type.Payload.ElemType = undefined;
     const payload_ty = operand_ty.optionalChild(&buf);
-    const offset = @intCast(u32, operand_ty.abiSize(self.target) - payload_ty.abiSize(self.target));
+    const offset = @intCast(u32, operand_ty.abiSize(func.target) - payload_ty.abiSize(func.target));
 
     // We store the final result in here that will be validated
     // if the optional is truly equal.
-    var result = try self.ensureAllocLocal(Type.initTag(.i32));
-    defer result.free(self);
-
-    try self.startBlock(.block, wasm.block_empty);
-    _ = try self.isNull(lhs, operand_ty, .i32_eq);
-    _ = try self.isNull(rhs, operand_ty, .i32_eq);
-    try self.addTag(.i32_ne); // inverse so we can exit early
-    try self.addLabel(.br_if, 0);
-
-    _ = try self.load(lhs, payload_ty, offset);
-    _ = try self.load(rhs, payload_ty, offset);
-    const opcode = buildOpcode(.{ .op = .ne, .valtype1 = typeToValtype(payload_ty, self.target) });
-    try self.addTag(Mir.Inst.Tag.fromOpcode(opcode));
-    try self.addLabel(.br_if, 0);
-
-    try self.addImm32(1);
-    try self.addLabel(.local_set, result.local.value);
-    try self.endBlock();
-
-    try self.emitWValue(result);
-    try self.addImm32(0);
-    try self.addTag(if (op == .eq) .i32_ne else .i32_eq);
+    var result = try func.ensureAllocLocal(Type.initTag(.i32));
+    defer result.free(func);
+
+    try func.startBlock(.block, wasm.block_empty);
+    _ = try func.isNull(lhs, operand_ty, .i32_eq);
+    _ = try func.isNull(rhs, operand_ty, .i32_eq);
+    try func.addTag(.i32_ne); // inverse so we can exit early
+    try func.addLabel(.br_if, 0);
+
+    _ = try func.load(lhs, payload_ty, offset);
+    _ = try func.load(rhs, payload_ty, offset);
+    const opcode = buildOpcode(.{ .op = .ne, .valtype1 = typeToValtype(payload_ty, func.target) });
+    try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
+    try func.addLabel(.br_if, 0);
+
+    try func.addImm32(1);
+    try func.addLabel(.local_set, result.local.value);
+    try func.endBlock();
+
+    try func.emitWValue(result);
+    try func.addImm32(0);
+    try func.addTag(if (op == .eq) .i32_ne else .i32_eq);
     return WValue{ .stack = {} };
 }
 
 /// Compares big integers by checking both its high bits and low bits.
 /// NOTE: Leaves the result of the comparison on top of the stack.
 /// TODO: Lower this to compiler_rt call when bitsize > 128
-fn cmpBigInt(self: *Self, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue {
-    assert(operand_ty.abiSize(self.target) >= 16);
+fn cmpBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue {
+    assert(operand_ty.abiSize(func.target) >= 16);
     assert(!(lhs != .stack and rhs == .stack));
-    if (operand_ty.intInfo(self.target).bits > 128) {
-        return self.fail("TODO: Support cmpBigInt for integer bitsize: '{d}'", .{operand_ty.intInfo(self.target).bits});
+    if (operand_ty.intInfo(func.target).bits > 128) {
+        return func.fail("TODO: Support cmpBigInt for integer bitsize: '{d}'", .{operand_ty.intInfo(func.target).bits});
     }
 
-    var lhs_high_bit = try (try self.load(lhs, Type.u64, 0)).toLocal(self, Type.u64);
-    defer lhs_high_bit.free(self);
-    var rhs_high_bit = try (try self.load(rhs, Type.u64, 0)).toLocal(self, Type.u64);
-    defer rhs_high_bit.free(self);
+    var lhs_high_bit = try (try func.load(lhs, Type.u64, 0)).toLocal(func, Type.u64);
+    defer lhs_high_bit.free(func);
+    var rhs_high_bit = try (try func.load(rhs, Type.u64, 0)).toLocal(func, Type.u64);
+    defer rhs_high_bit.free(func);
 
     switch (op) {
         .eq, .neq => {
-            const xor_high = try self.binOp(lhs_high_bit, rhs_high_bit, Type.u64, .xor);
-            const lhs_low_bit = try self.load(lhs, Type.u64, 8);
-            const rhs_low_bit = try self.load(rhs, Type.u64, 8);
-            const xor_low = try self.binOp(lhs_low_bit, rhs_low_bit, Type.u64, .xor);
-            const or_result = try self.binOp(xor_high, xor_low, Type.u64, .@"or");
+            const xor_high = try func.binOp(lhs_high_bit, rhs_high_bit, Type.u64, .xor);
+            const lhs_low_bit = try func.load(lhs, Type.u64, 8);
+            const rhs_low_bit = try func.load(rhs, Type.u64, 8);
+            const xor_low = try func.binOp(lhs_low_bit, rhs_low_bit, Type.u64, .xor);
+            const or_result = try func.binOp(xor_high, xor_low, Type.u64, .@"or");
 
             switch (op) {
-                .eq => return self.cmp(or_result, .{ .imm64 = 0 }, Type.u64, .eq),
-                .neq => return self.cmp(or_result, .{ .imm64 = 0 }, Type.u64, .neq),
+                .eq => return func.cmp(or_result, .{ .imm64 = 0 }, Type.u64, .eq),
+                .neq => return func.cmp(or_result, .{ .imm64 = 0 }, Type.u64, .neq),
                 else => unreachable,
             }
         },
         else => {
             const ty = if (operand_ty.isSignedInt()) Type.i64 else Type.u64;
             // leave those value on top of the stack for '.select'
-            const lhs_low_bit = try self.load(lhs, Type.u64, 8);
-            const rhs_low_bit = try self.load(rhs, Type.u64, 8);
-            _ = try self.cmp(lhs_low_bit, rhs_low_bit, ty, op);
-            _ = try self.cmp(lhs_high_bit, rhs_high_bit, ty, op);
-            _ = try self.cmp(lhs_high_bit, rhs_high_bit, ty, .eq);
-            try self.addTag(.select);
+            const lhs_low_bit = try func.load(lhs, Type.u64, 8);
+            const rhs_low_bit = try func.load(rhs, Type.u64, 8);
+            _ = try func.cmp(lhs_low_bit, rhs_low_bit, ty, op);
+            _ = try func.cmp(lhs_high_bit, rhs_high_bit, ty, op);
+            _ = try func.cmp(lhs_high_bit, rhs_high_bit, ty, .eq);
+            try func.addTag(.select);
         },
     }
 
     return WValue{ .stack = {} };
 }
 
-fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const bin_op = self.air.instructions.items(.data)[inst].bin_op;
-    const un_ty = self.air.typeOf(bin_op.lhs).childType();
-    const tag_ty = self.air.typeOf(bin_op.rhs);
-    const layout = un_ty.unionGetLayout(self.target);
-    if (layout.tag_size == 0) return self.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
+fn airSetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const bin_op = func.air.instructions.items(.data)[inst].bin_op;
+    const un_ty = func.air.typeOf(bin_op.lhs).childType();
+    const tag_ty = func.air.typeOf(bin_op.rhs);
+    const layout = un_ty.unionGetLayout(func.target);
+    if (layout.tag_size == 0) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
 
-    const union_ptr = try self.resolveInst(bin_op.lhs);
-    const new_tag = try self.resolveInst(bin_op.rhs);
+    const union_ptr = try func.resolveInst(bin_op.lhs);
+    const new_tag = try func.resolveInst(bin_op.rhs);
     if (layout.payload_size == 0) {
-        try self.store(union_ptr, new_tag, tag_ty, 0);
-        return self.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
+        try func.store(union_ptr, new_tag, tag_ty, 0);
+        return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
     }
 
     // when the tag alignment is smaller than the payload, the field will be stored
@@ -4367,54 +4365,54 @@ fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) InnerError!void {
     const offset = if (layout.tag_align < layout.payload_align) blk: {
         break :blk @intCast(u32, layout.payload_size);
     } else @as(u32, 0);
-    try self.store(union_ptr, new_tag, tag_ty, offset);
-    self.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
+    try func.store(union_ptr, new_tag, tag_ty, offset);
+    func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
 }
 
-fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const ty_op = self.air.instructions.items(.data)[inst].ty_op;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{ty_op.operand});
+fn airGetUnionTag(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_op = func.air.instructions.items(.data)[inst].ty_op;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
 
-    const un_ty = self.air.typeOf(ty_op.operand);
-    const tag_ty = self.air.typeOfIndex(inst);
-    const layout = un_ty.unionGetLayout(self.target);
-    if (layout.tag_size == 0) return self.finishAir(inst, .none, &.{ty_op.operand});
+    const un_ty = func.air.typeOf(ty_op.operand);
+    const tag_ty = func.air.typeOfIndex(inst);
+    const layout = un_ty.unionGetLayout(func.target);
+    if (layout.tag_size == 0) return func.finishAir(inst, .none, &.{ty_op.operand});
 
-    const operand = try self.resolveInst(ty_op.operand);
+    const operand = try func.resolveInst(ty_op.operand);
     // when the tag alignment is smaller than the payload, the field will be stored
     // after the payload.
     const offset = if (layout.tag_align < layout.payload_align) blk: {
         break :blk @intCast(u32, layout.payload_size);
     } else @as(u32, 0);
-    const tag = try self.load(operand, tag_ty, offset);
-    const result = try tag.toLocal(self, tag_ty);
-    self.finishAir(inst, result, &.{ty_op.operand});
+    const tag = try func.load(operand, tag_ty, offset);
+    const result = try tag.toLocal(func, tag_ty);
+    func.finishAir(inst, result, &.{ty_op.operand});
 }
 
-fn airFpext(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const ty_op = self.air.instructions.items(.data)[inst].ty_op;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{ty_op.operand});
+fn airFpext(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_op = func.air.instructions.items(.data)[inst].ty_op;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
 
-    const dest_ty = self.air.typeOfIndex(inst);
-    const operand = try self.resolveInst(ty_op.operand);
-    const extended = try self.fpext(operand, self.air.typeOf(ty_op.operand), dest_ty);
-    const result = try extended.toLocal(self, dest_ty);
-    self.finishAir(inst, result, &.{ty_op.operand});
+    const dest_ty = func.air.typeOfIndex(inst);
+    const operand = try func.resolveInst(ty_op.operand);
+    const extended = try func.fpext(operand, func.air.typeOf(ty_op.operand), dest_ty);
+    const result = try extended.toLocal(func, dest_ty);
+    func.finishAir(inst, result, &.{ty_op.operand});
 }
 
 /// Extends a float from a given `Type` to a larger wanted `Type`
 /// NOTE: Leaves the result on the stack
-fn fpext(self: *Self, operand: WValue, given: Type, wanted: Type) InnerError!WValue {
-    const given_bits = given.floatBits(self.target);
-    const wanted_bits = wanted.floatBits(self.target);
+fn fpext(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!WValue {
+    const given_bits = given.floatBits(func.target);
+    const wanted_bits = wanted.floatBits(func.target);
 
     if (wanted_bits == 64 and given_bits == 32) {
-        try self.emitWValue(operand);
-        try self.addTag(.f64_promote_f32);
+        try func.emitWValue(operand);
+        try func.addTag(.f64_promote_f32);
         return WValue{ .stack = {} };
     } else if (given_bits == 16) {
         // call __extendhfsf2(f16) f32
-        const f32_result = try self.callIntrinsic(
+        const f32_result = try func.callIntrinsic(
             "__extendhfsf2",
             &.{Type.f16},
             Type.f32,
@@ -4425,162 +4423,162 @@ fn fpext(self: *Self, operand: WValue, given: Type, wanted: Type) InnerError!WVa
             return f32_result;
         }
         if (wanted_bits == 64) {
-            try self.addTag(.f64_promote_f32);
+            try func.addTag(.f64_promote_f32);
             return WValue{ .stack = {} };
         }
-        return self.fail("TODO: Implement 'fpext' for floats with bitsize: {d}", .{wanted_bits});
+        return func.fail("TODO: Implement 'fpext' for floats with bitsize: {d}", .{wanted_bits});
     } else {
         // TODO: Emit a call to compiler-rt to extend the float. e.g. __extendhfsf2
-        return self.fail("TODO: Implement 'fpext' for floats with bitsize: {d}", .{wanted_bits});
+        return func.fail("TODO: Implement 'fpext' for floats with bitsize: {d}", .{wanted_bits});
     }
 }
 
-fn airFptrunc(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const ty_op = self.air.instructions.items(.data)[inst].ty_op;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{ty_op.operand});
+fn airFptrunc(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_op = func.air.instructions.items(.data)[inst].ty_op;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
 
-    const dest_ty = self.air.typeOfIndex(inst);
-    const operand = try self.resolveInst(ty_op.operand);
-    const trunc = try self.fptrunc(operand, self.air.typeOf(ty_op.operand), dest_ty);
-    const result = try trunc.toLocal(self, dest_ty);
-    self.finishAir(inst, result, &.{ty_op.operand});
+    const dest_ty = func.air.typeOfIndex(inst);
+    const operand = try func.resolveInst(ty_op.operand);
+    const trunc = try func.fptrunc(operand, func.air.typeOf(ty_op.operand), dest_ty);
+    const result = try trunc.toLocal(func, dest_ty);
+    func.finishAir(inst, result, &.{ty_op.operand});
 }
 
 /// Truncates a float from a given `Type` to its wanted `Type`
 /// NOTE: The result value remains on the stack
-fn fptrunc(self: *Self, operand: WValue, given: Type, wanted: Type) InnerError!WValue {
-    const given_bits = given.floatBits(self.target);
-    const wanted_bits = wanted.floatBits(self.target);
+fn fptrunc(func: *CodeGen, operand: WValue, given: Type, wanted: Type) InnerError!WValue {
+    const given_bits = given.floatBits(func.target);
+    const wanted_bits = wanted.floatBits(func.target);
 
     if (wanted_bits == 32 and given_bits == 64) {
-        try self.emitWValue(operand);
-        try self.addTag(.f32_demote_f64);
+        try func.emitWValue(operand);
+        try func.addTag(.f32_demote_f64);
         return WValue{ .stack = {} };
     } else if (wanted_bits == 16) {
         const op: WValue = if (given_bits == 64) blk: {
-            try self.emitWValue(operand);
-            try self.addTag(.f32_demote_f64);
+            try func.emitWValue(operand);
+            try func.addTag(.f32_demote_f64);
             break :blk WValue{ .stack = {} };
         } else operand;
 
         // call __truncsfhf2(f32) f16
-        return self.callIntrinsic("__truncsfhf2", &.{Type.f32}, Type.f16, &.{op});
+        return func.callIntrinsic("__truncsfhf2", &.{Type.f32}, Type.f16, &.{op});
     } else {
         // TODO: Emit a call to compiler-rt to trunc the float. e.g. __truncdfhf2
-        return self.fail("TODO: Implement 'fptrunc' for floats with bitsize: {d}", .{wanted_bits});
+        return func.fail("TODO: Implement 'fptrunc' for floats with bitsize: {d}", .{wanted_bits});
     }
 }
 
-fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const ty_op = self.air.instructions.items(.data)[inst].ty_op;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{ty_op.operand});
+fn airErrUnionPayloadPtrSet(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_op = func.air.instructions.items(.data)[inst].ty_op;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
 
-    const err_set_ty = self.air.typeOf(ty_op.operand).childType();
+    const err_set_ty = func.air.typeOf(ty_op.operand).childType();
     const payload_ty = err_set_ty.errorUnionPayload();
-    const operand = try self.resolveInst(ty_op.operand);
+    const operand = try func.resolveInst(ty_op.operand);
 
     // set error-tag to '0' to annotate error union is non-error
-    try self.store(
+    try func.store(
         operand,
         .{ .imm32 = 0 },
         Type.anyerror,
-        @intCast(u32, errUnionErrorOffset(payload_ty, self.target)),
+        @intCast(u32, errUnionErrorOffset(payload_ty, func.target)),
     );
 
     const result = result: {
-        if (self.liveness.isUnused(inst)) break :result WValue{ .none = {} };
+        if (func.liveness.isUnused(inst)) break :result WValue{ .none = {} };
 
         if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
-            break :result self.reuseOperand(ty_op.operand, operand);
+            break :result func.reuseOperand(ty_op.operand, operand);
         }
 
-        break :result try self.buildPointerOffset(operand, @intCast(u32, errUnionPayloadOffset(payload_ty, self.target)), .new);
+        break :result try func.buildPointerOffset(operand, @intCast(u32, errUnionPayloadOffset(payload_ty, func.target)), .new);
     };
-    self.finishAir(inst, result, &.{ty_op.operand});
+    func.finishAir(inst, result, &.{ty_op.operand});
 }
 
-fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
-    const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{extra.field_ptr});
+fn airFieldParentPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
+    const extra = func.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{extra.field_ptr});
 
-    const field_ptr = try self.resolveInst(extra.field_ptr);
-    const struct_ty = self.air.getRefType(ty_pl.ty).childType();
-    const field_offset = struct_ty.structFieldOffset(extra.field_index, self.target);
+    const field_ptr = try func.resolveInst(extra.field_ptr);
+    const struct_ty = func.air.getRefType(ty_pl.ty).childType();
+    const field_offset = struct_ty.structFieldOffset(extra.field_index, func.target);
 
     const result = if (field_offset != 0) result: {
-        const base = try self.buildPointerOffset(field_ptr, 0, .new);
-        try self.addLabel(.local_get, base.local.value);
-        try self.addImm32(@bitCast(i32, @intCast(u32, field_offset)));
-        try self.addTag(.i32_sub);
-        try self.addLabel(.local_set, base.local.value);
+        const base = try func.buildPointerOffset(field_ptr, 0, .new);
+        try func.addLabel(.local_get, base.local.value);
+        try func.addImm32(@bitCast(i32, @intCast(u32, field_offset)));
+        try func.addTag(.i32_sub);
+        try func.addLabel(.local_set, base.local.value);
         break :result base;
-    } else self.reuseOperand(extra.field_ptr, field_ptr);
+    } else func.reuseOperand(extra.field_ptr, field_ptr);
 
-    self.finishAir(inst, result, &.{extra.field_ptr});
+    func.finishAir(inst, result, &.{extra.field_ptr});
 }
 
-fn airMemcpy(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const pl_op = self.air.instructions.items(.data)[inst].pl_op;
-    const bin_op = self.air.extraData(Air.Bin, pl_op.payload).data;
-    const dst = try self.resolveInst(pl_op.operand);
-    const src = try self.resolveInst(bin_op.lhs);
-    const len = try self.resolveInst(bin_op.rhs);
-    try self.memcpy(dst, src, len);
+fn airMemcpy(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pl_op = func.air.instructions.items(.data)[inst].pl_op;
+    const bin_op = func.air.extraData(Air.Bin, pl_op.payload).data;
+    const dst = try func.resolveInst(pl_op.operand);
+    const src = try func.resolveInst(bin_op.lhs);
+    const len = try func.resolveInst(bin_op.rhs);
+    try func.memcpy(dst, src, len);
 
-    self.finishAir(inst, .none, &.{pl_op.operand});
+    func.finishAir(inst, .none, &.{pl_op.operand});
 }
 
-fn airPopcount(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const ty_op = self.air.instructions.items(.data)[inst].ty_op;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{ty_op.operand});
+fn airPopcount(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_op = func.air.instructions.items(.data)[inst].ty_op;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
 
-    const operand = try self.resolveInst(ty_op.operand);
-    const op_ty = self.air.typeOf(ty_op.operand);
-    const result_ty = self.air.typeOfIndex(inst);
+    const operand = try func.resolveInst(ty_op.operand);
+    const op_ty = func.air.typeOf(ty_op.operand);
+    const result_ty = func.air.typeOfIndex(inst);
 
     if (op_ty.zigTypeTag() == .Vector) {
-        return self.fail("TODO: Implement @popCount for vectors", .{});
+        return func.fail("TODO: Implement @popCount for vectors", .{});
     }
 
-    const int_info = op_ty.intInfo(self.target);
+    const int_info = op_ty.intInfo(func.target);
     const bits = int_info.bits;
     const wasm_bits = toWasmBits(bits) orelse {
-        return self.fail("TODO: Implement @popCount for integers with bitsize '{d}'", .{bits});
+        return func.fail("TODO: Implement @popCount for integers with bitsize '{d}'", .{bits});
     };
 
     switch (wasm_bits) {
         128 => {
-            _ = try self.load(operand, Type.u64, 0);
-            try self.addTag(.i64_popcnt);
-            _ = try self.load(operand, Type.u64, 8);
-            try self.addTag(.i64_popcnt);
-            try self.addTag(.i64_add);
-            try self.addTag(.i32_wrap_i64);
+            _ = try func.load(operand, Type.u64, 0);
+            try func.addTag(.i64_popcnt);
+            _ = try func.load(operand, Type.u64, 8);
+            try func.addTag(.i64_popcnt);
+            try func.addTag(.i64_add);
+            try func.addTag(.i32_wrap_i64);
         },
         else => {
-            try self.emitWValue(operand);
+            try func.emitWValue(operand);
             switch (wasm_bits) {
-                32 => try self.addTag(.i32_popcnt),
+                32 => try func.addTag(.i32_popcnt),
                 64 => {
-                    try self.addTag(.i64_popcnt);
-                    try self.addTag(.i32_wrap_i64);
+                    try func.addTag(.i64_popcnt);
+                    try func.addTag(.i32_wrap_i64);
                 },
                 else => unreachable,
             }
         },
     }
 
-    const result = try self.allocLocal(result_ty);
-    try self.addLabel(.local_set, result.local.value);
-    self.finishAir(inst, result, &.{ty_op.operand});
+    const result = try func.allocLocal(result_ty);
+    try func.addLabel(.local_set, result.local.value);
+    func.finishAir(inst, result, &.{ty_op.operand});
 }
 
-fn airErrorName(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const un_op = self.air.instructions.items(.data)[inst].un_op;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{un_op});
+fn airErrorName(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const un_op = func.air.instructions.items(.data)[inst].un_op;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{un_op});
 
-    const operand = try self.resolveInst(un_op);
+    const operand = try func.resolveInst(un_op);
     // First retrieve the symbol index to the error name table
     // that will be used to emit a relocation for the pointer
     // to the error name table.
@@ -4592,63 +4590,63 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) InnerError!void {
     //
     // As the names are global and the slice elements are constant, we do not have
     // to make a copy of the ptr+value but can point towards them directly.
-    const error_table_symbol = try self.bin_file.getErrorTableSymbol();
+    const error_table_symbol = try func.bin_file.getErrorTableSymbol();
     const name_ty = Type.initTag(.const_slice_u8_sentinel_0);
-    const abi_size = name_ty.abiSize(self.target);
+    const abi_size = name_ty.abiSize(func.target);
 
     const error_name_value: WValue = .{ .memory = error_table_symbol }; // emitting this will create a relocation
-    try self.emitWValue(error_name_value);
-    try self.emitWValue(operand);
-    switch (self.arch()) {
+    try func.emitWValue(error_name_value);
+    try func.emitWValue(operand);
+    switch (func.arch()) {
         .wasm32 => {
-            try self.addImm32(@bitCast(i32, @intCast(u32, abi_size)));
-            try self.addTag(.i32_mul);
-            try self.addTag(.i32_add);
+            try func.addImm32(@bitCast(i32, @intCast(u32, abi_size)));
+            try func.addTag(.i32_mul);
+            try func.addTag(.i32_add);
         },
         .wasm64 => {
-            try self.addImm64(abi_size);
-            try self.addTag(.i64_mul);
-            try self.addTag(.i64_add);
+            try func.addImm64(abi_size);
+            try func.addTag(.i64_mul);
+            try func.addTag(.i64_add);
         },
         else => unreachable,
     }
 
-    const result_ptr = try self.allocLocal(Type.usize);
-    try self.addLabel(.local_set, result_ptr.local.value);
-    self.finishAir(inst, result_ptr, &.{un_op});
+    const result_ptr = try func.allocLocal(Type.usize);
+    try func.addLabel(.local_set, result_ptr.local.value);
+    func.finishAir(inst, result_ptr, &.{un_op});
 }
 
-fn airPtrSliceFieldPtr(self: *Self, inst: Air.Inst.Index, offset: u32) InnerError!void {
-    const ty_op = self.air.instructions.items(.data)[inst].ty_op;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{ty_op.operand});
-    const slice_ptr = try self.resolveInst(ty_op.operand);
-    const result = try self.buildPointerOffset(slice_ptr, offset, .new);
-    self.finishAir(inst, result, &.{ty_op.operand});
+fn airPtrSliceFieldPtr(func: *CodeGen, inst: Air.Inst.Index, offset: u32) InnerError!void {
+    const ty_op = func.air.instructions.items(.data)[inst].ty_op;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
+    const slice_ptr = try func.resolveInst(ty_op.operand);
+    const result = try func.buildPointerOffset(slice_ptr, offset, .new);
+    func.finishAir(inst, result, &.{ty_op.operand});
 }
 
-fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index, op: Op) InnerError!void {
+fn airAddSubWithOverflow(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
     assert(op == .add or op == .sub);
-    const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
-    const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{ extra.lhs, extra.rhs });
+    const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
+    const extra = func.air.extraData(Air.Bin, ty_pl.payload).data;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ extra.lhs, extra.rhs });
 
-    const lhs_op = try self.resolveInst(extra.lhs);
-    const rhs_op = try self.resolveInst(extra.rhs);
-    const lhs_ty = self.air.typeOf(extra.lhs);
+    const lhs_op = try func.resolveInst(extra.lhs);
+    const rhs_op = try func.resolveInst(extra.rhs);
+    const lhs_ty = func.air.typeOf(extra.lhs);
 
     if (lhs_ty.zigTypeTag() == .Vector) {
-        return self.fail("TODO: Implement overflow arithmetic for vectors", .{});
+        return func.fail("TODO: Implement overflow arithmetic for vectors", .{});
     }
 
-    const int_info = lhs_ty.intInfo(self.target);
+    const int_info = lhs_ty.intInfo(func.target);
     const is_signed = int_info.signedness == .signed;
     const wasm_bits = toWasmBits(int_info.bits) orelse {
-        return self.fail("TODO: Implement {{add/sub}}_with_overflow for integer bitsize: {d}", .{int_info.bits});
+        return func.fail("TODO: Implement {{add/sub}}_with_overflow for integer bitsize: {d}", .{int_info.bits});
     };
 
     if (wasm_bits == 128) {
-        const result = try self.addSubWithOverflowBigInt(lhs_op, rhs_op, lhs_ty, self.air.typeOfIndex(inst), op);
-        return self.finishAir(inst, result, &.{ extra.lhs, extra.rhs });
+        const result = try func.addSubWithOverflowBigInt(lhs_op, rhs_op, lhs_ty, func.air.typeOfIndex(inst), op);
+        return func.finishAir(inst, result, &.{ extra.lhs, extra.rhs });
     }
 
     const zero = switch (wasm_bits) {
@@ -4660,10 +4658,10 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index, op: Op) InnerError!v
     // for signed integers, we first apply signed shifts by the difference in bits
     // to get the signed value, as we store it internally as 2's complement.
     var lhs = if (wasm_bits != int_info.bits and is_signed) blk: {
-        break :blk try (try self.signAbsValue(lhs_op, lhs_ty)).toLocal(self, lhs_ty);
+        break :blk try (try func.signAbsValue(lhs_op, lhs_ty)).toLocal(func, lhs_ty);
     } else lhs_op;
     var rhs = if (wasm_bits != int_info.bits and is_signed) blk: {
-        break :blk try (try self.signAbsValue(rhs_op, lhs_ty)).toLocal(self, lhs_ty);
+        break :blk try (try func.signAbsValue(rhs_op, lhs_ty)).toLocal(func, lhs_ty);
     } else rhs_op;
 
     // in this case, we performed a signAbsValue which created a temporary local
@@ -4671,178 +4669,178 @@ fn airAddSubWithOverflow(self: *Self, inst: Air.Inst.Index, op: Op) InnerError!v
     // In the other case we do not want to free it, because that would free the
     // resolved instructions which may be referenced by other instructions.
     defer if (wasm_bits != int_info.bits and is_signed) {
-        lhs.free(self);
-        rhs.free(self);
+        lhs.free(func);
+        rhs.free(func);
     };
 
-    var bin_op = try (try self.binOp(lhs, rhs, lhs_ty, op)).toLocal(self, lhs_ty);
-    defer bin_op.free(self);
+    var bin_op = try (try func.binOp(lhs, rhs, lhs_ty, op)).toLocal(func, lhs_ty);
+    defer bin_op.free(func);
     var result = if (wasm_bits != int_info.bits) blk: {
-        break :blk try (try self.wrapOperand(bin_op, lhs_ty)).toLocal(self, lhs_ty);
+        break :blk try (try func.wrapOperand(bin_op, lhs_ty)).toLocal(func, lhs_ty);
     } else bin_op;
-    defer result.free(self); // no-op when wasm_bits == int_info.bits
+    defer result.free(func); // no-op when wasm_bits == int_info.bits
 
     const cmp_op: std.math.CompareOperator = if (op == .sub) .gt else .lt;
     const overflow_bit: WValue = if (is_signed) blk: {
         if (wasm_bits == int_info.bits) {
-            const cmp_zero = try self.cmp(rhs, zero, lhs_ty, cmp_op);
-            const lt = try self.cmp(bin_op, lhs, lhs_ty, .lt);
-            break :blk try self.binOp(cmp_zero, lt, Type.u32, .xor);
+            const cmp_zero = try func.cmp(rhs, zero, lhs_ty, cmp_op);
+            const lt = try func.cmp(bin_op, lhs, lhs_ty, .lt);
+            break :blk try func.binOp(cmp_zero, lt, Type.u32, .xor);
         }
-        const abs = try self.signAbsValue(bin_op, lhs_ty);
-        break :blk try self.cmp(abs, bin_op, lhs_ty, .neq);
+        const abs = try func.signAbsValue(bin_op, lhs_ty);
+        break :blk try func.cmp(abs, bin_op, lhs_ty, .neq);
     } else if (wasm_bits == int_info.bits)
-        try self.cmp(bin_op, lhs, lhs_ty, cmp_op)
+        try func.cmp(bin_op, lhs, lhs_ty, cmp_op)
     else
-        try self.cmp(bin_op, result, lhs_ty, .neq);
-    var overflow_local = try overflow_bit.toLocal(self, Type.u32);
-    defer overflow_local.free(self);
+        try func.cmp(bin_op, result, lhs_ty, .neq);
+    var overflow_local = try overflow_bit.toLocal(func, Type.u32);
+    defer overflow_local.free(func);
 
-    const result_ptr = try self.allocStack(self.air.typeOfIndex(inst));
-    try self.store(result_ptr, result, lhs_ty, 0);
-    const offset = @intCast(u32, lhs_ty.abiSize(self.target));
-    try self.store(result_ptr, overflow_local, Type.initTag(.u1), offset);
+    const result_ptr = try func.allocStack(func.air.typeOfIndex(inst));
+    try func.store(result_ptr, result, lhs_ty, 0);
+    const offset = @intCast(u32, lhs_ty.abiSize(func.target));
+    try func.store(result_ptr, overflow_local, Type.initTag(.u1), offset);
 
-    self.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs });
+    func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs });
 }
 
-fn addSubWithOverflowBigInt(self: *Self, lhs: WValue, rhs: WValue, ty: Type, result_ty: Type, op: Op) InnerError!WValue {
+fn addSubWithOverflowBigInt(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type, result_ty: Type, op: Op) InnerError!WValue {
     assert(op == .add or op == .sub);
-    const int_info = ty.intInfo(self.target);
+    const int_info = ty.intInfo(func.target);
     const is_signed = int_info.signedness == .signed;
     if (int_info.bits != 128) {
-        return self.fail("TODO: Implement @{{add/sub}}WithOverflow for integer bitsize '{d}'", .{int_info.bits});
+        return func.fail("TODO: Implement @{{add/sub}}WithOverflow for integer bitsize '{d}'", .{int_info.bits});
     }
 
-    var lhs_high_bit = try (try self.load(lhs, Type.u64, 0)).toLocal(self, Type.u64);
-    defer lhs_high_bit.free(self);
-    var lhs_low_bit = try (try self.load(lhs, Type.u64, 8)).toLocal(self, Type.u64);
-    defer lhs_low_bit.free(self);
-    var rhs_high_bit = try (try self.load(rhs, Type.u64, 0)).toLocal(self, Type.u64);
-    defer rhs_high_bit.free(self);
-    var rhs_low_bit = try (try self.load(rhs, Type.u64, 8)).toLocal(self, Type.u64);
-    defer rhs_low_bit.free(self);
+    var lhs_high_bit = try (try func.load(lhs, Type.u64, 0)).toLocal(func, Type.u64);
+    defer lhs_high_bit.free(func);
+    var lhs_low_bit = try (try func.load(lhs, Type.u64, 8)).toLocal(func, Type.u64);
+    defer lhs_low_bit.free(func);
+    var rhs_high_bit = try (try func.load(rhs, Type.u64, 0)).toLocal(func, Type.u64);
+    defer rhs_high_bit.free(func);
+    var rhs_low_bit = try (try func.load(rhs, Type.u64, 8)).toLocal(func, Type.u64);
+    defer rhs_low_bit.free(func);
 
-    var low_op_res = try (try self.binOp(lhs_low_bit, rhs_low_bit, Type.u64, op)).toLocal(self, Type.u64);
-    defer low_op_res.free(self);
-    var high_op_res = try (try self.binOp(lhs_high_bit, rhs_high_bit, Type.u64, op)).toLocal(self, Type.u64);
-    defer high_op_res.free(self);
+    var low_op_res = try (try func.binOp(lhs_low_bit, rhs_low_bit, Type.u64, op)).toLocal(func, Type.u64);
+    defer low_op_res.free(func);
+    var high_op_res = try (try func.binOp(lhs_high_bit, rhs_high_bit, Type.u64, op)).toLocal(func, Type.u64);
+    defer high_op_res.free(func);
 
     var lt = if (op == .add) blk: {
-        break :blk try (try self.cmp(high_op_res, lhs_high_bit, Type.u64, .lt)).toLocal(self, Type.u32);
+        break :blk try (try func.cmp(high_op_res, lhs_high_bit, Type.u64, .lt)).toLocal(func, Type.u32);
     } else if (op == .sub) blk: {
-        break :blk try (try self.cmp(lhs_high_bit, rhs_high_bit, Type.u64, .lt)).toLocal(self, Type.u32);
+        break :blk try (try func.cmp(lhs_high_bit, rhs_high_bit, Type.u64, .lt)).toLocal(func, Type.u32);
     } else unreachable;
-    defer lt.free(self);
-    var tmp = try (try self.intcast(lt, Type.u32, Type.u64)).toLocal(self, Type.u64);
-    defer tmp.free(self);
-    var tmp_op = try (try self.binOp(low_op_res, tmp, Type.u64, op)).toLocal(self, Type.u64);
-    defer tmp_op.free(self);
+    defer lt.free(func);
+    var tmp = try (try func.intcast(lt, Type.u32, Type.u64)).toLocal(func, Type.u64);
+    defer tmp.free(func);
+    var tmp_op = try (try func.binOp(low_op_res, tmp, Type.u64, op)).toLocal(func, Type.u64);
+    defer tmp_op.free(func);
 
     const overflow_bit = if (is_signed) blk: {
-        const xor_low = try self.binOp(lhs_low_bit, rhs_low_bit, Type.u64, .xor);
+        const xor_low = try func.binOp(lhs_low_bit, rhs_low_bit, Type.u64, .xor);
         const to_wrap = if (op == .add) wrap: {
-            break :wrap try self.binOp(xor_low, .{ .imm64 = ~@as(u64, 0) }, Type.u64, .xor);
+            break :wrap try func.binOp(xor_low, .{ .imm64 = ~@as(u64, 0) }, Type.u64, .xor);
         } else xor_low;
-        const xor_op = try self.binOp(lhs_low_bit, tmp_op, Type.u64, .xor);
-        const wrap = try self.binOp(to_wrap, xor_op, Type.u64, .@"and");
-        break :blk try self.cmp(wrap, .{ .imm64 = 0 }, Type.i64, .lt); // i64 because signed
+        const xor_op = try func.binOp(lhs_low_bit, tmp_op, Type.u64, .xor);
+        const wrap = try func.binOp(to_wrap, xor_op, Type.u64, .@"and");
+        break :blk try func.cmp(wrap, .{ .imm64 = 0 }, Type.i64, .lt); // i64 because signed
     } else blk: {
         const first_arg = if (op == .sub) arg: {
-            break :arg try self.cmp(high_op_res, lhs_high_bit, Type.u64, .gt);
+            break :arg try func.cmp(high_op_res, lhs_high_bit, Type.u64, .gt);
         } else lt;
 
-        try self.emitWValue(first_arg);
-        _ = try self.cmp(tmp_op, lhs_low_bit, Type.u64, if (op == .add) .lt else .gt);
-        _ = try self.cmp(tmp_op, lhs_low_bit, Type.u64, .eq);
-        try self.addTag(.select);
+        try func.emitWValue(first_arg);
+        _ = try func.cmp(tmp_op, lhs_low_bit, Type.u64, if (op == .add) .lt else .gt);
+        _ = try func.cmp(tmp_op, lhs_low_bit, Type.u64, .eq);
+        try func.addTag(.select);
 
         break :blk WValue{ .stack = {} };
     };
-    var overflow_local = try overflow_bit.toLocal(self, Type.initTag(.u1));
-    defer overflow_local.free(self);
+    var overflow_local = try overflow_bit.toLocal(func, Type.initTag(.u1));
+    defer overflow_local.free(func);
 
-    const result_ptr = try self.allocStack(result_ty);
-    try self.store(result_ptr, high_op_res, Type.u64, 0);
-    try self.store(result_ptr, tmp_op, Type.u64, 8);
-    try self.store(result_ptr, overflow_local, Type.initTag(.u1), 16);
+    const result_ptr = try func.allocStack(result_ty);
+    try func.store(result_ptr, high_op_res, Type.u64, 0);
+    try func.store(result_ptr, tmp_op, Type.u64, 8);
+    try func.store(result_ptr, overflow_local, Type.initTag(.u1), 16);
 
     return result_ptr;
 }
 
-fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
-    const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{ extra.lhs, extra.rhs });
+fn airShlWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
+    const extra = func.air.extraData(Air.Bin, ty_pl.payload).data;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ extra.lhs, extra.rhs });
 
-    const lhs = try self.resolveInst(extra.lhs);
-    const rhs = try self.resolveInst(extra.rhs);
-    const lhs_ty = self.air.typeOf(extra.lhs);
+    const lhs = try func.resolveInst(extra.lhs);
+    const rhs = try func.resolveInst(extra.rhs);
+    const lhs_ty = func.air.typeOf(extra.lhs);
 
     if (lhs_ty.zigTypeTag() == .Vector) {
-        return self.fail("TODO: Implement overflow arithmetic for vectors", .{});
+        return func.fail("TODO: Implement overflow arithmetic for vectors", .{});
     }
 
-    const int_info = lhs_ty.intInfo(self.target);
+    const int_info = lhs_ty.intInfo(func.target);
     const is_signed = int_info.signedness == .signed;
     const wasm_bits = toWasmBits(int_info.bits) orelse {
-        return self.fail("TODO: Implement shl_with_overflow for integer bitsize: {d}", .{int_info.bits});
+        return func.fail("TODO: Implement shl_with_overflow for integer bitsize: {d}", .{int_info.bits});
     };
 
-    var shl = try (try self.binOp(lhs, rhs, lhs_ty, .shl)).toLocal(self, lhs_ty);
-    defer shl.free(self);
+    var shl = try (try func.binOp(lhs, rhs, lhs_ty, .shl)).toLocal(func, lhs_ty);
+    defer shl.free(func);
     var result = if (wasm_bits != int_info.bits) blk: {
-        break :blk try (try self.wrapOperand(shl, lhs_ty)).toLocal(self, lhs_ty);
+        break :blk try (try func.wrapOperand(shl, lhs_ty)).toLocal(func, lhs_ty);
     } else shl;
-    defer result.free(self); // it's a no-op to free the same local twice (when wasm_bits == int_info.bits)
+    defer result.free(func); // it's a no-op to free the same local twice (when wasm_bits == int_info.bits)
 
     const overflow_bit = if (wasm_bits != int_info.bits and is_signed) blk: {
         // emit lhs to stack to we can keep 'wrapped' on the stack also
-        try self.emitWValue(lhs);
-        const abs = try self.signAbsValue(shl, lhs_ty);
-        const wrapped = try self.wrapBinOp(abs, rhs, lhs_ty, .shr);
-        break :blk try self.cmp(.{ .stack = {} }, wrapped, lhs_ty, .neq);
+        try func.emitWValue(lhs);
+        const abs = try func.signAbsValue(shl, lhs_ty);
+        const wrapped = try func.wrapBinOp(abs, rhs, lhs_ty, .shr);
+        break :blk try func.cmp(.{ .stack = {} }, wrapped, lhs_ty, .neq);
     } else blk: {
-        try self.emitWValue(lhs);
-        const shr = try self.binOp(result, rhs, lhs_ty, .shr);
-        break :blk try self.cmp(.{ .stack = {} }, shr, lhs_ty, .neq);
+        try func.emitWValue(lhs);
+        const shr = try func.binOp(result, rhs, lhs_ty, .shr);
+        break :blk try func.cmp(.{ .stack = {} }, shr, lhs_ty, .neq);
     };
-    var overflow_local = try overflow_bit.toLocal(self, Type.initTag(.u1));
-    defer overflow_local.free(self);
+    var overflow_local = try overflow_bit.toLocal(func, Type.initTag(.u1));
+    defer overflow_local.free(func);
 
-    const result_ptr = try self.allocStack(self.air.typeOfIndex(inst));
-    try self.store(result_ptr, result, lhs_ty, 0);
-    const offset = @intCast(u32, lhs_ty.abiSize(self.target));
-    try self.store(result_ptr, overflow_local, Type.initTag(.u1), offset);
+    const result_ptr = try func.allocStack(func.air.typeOfIndex(inst));
+    try func.store(result_ptr, result, lhs_ty, 0);
+    const offset = @intCast(u32, lhs_ty.abiSize(func.target));
+    try func.store(result_ptr, overflow_local, Type.initTag(.u1), offset);
 
-    self.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs });
+    func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs });
 }
 
-fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
-    const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{ extra.lhs, extra.rhs });
+fn airMulWithOverflow(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
+    const extra = func.air.extraData(Air.Bin, ty_pl.payload).data;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ extra.lhs, extra.rhs });
 
-    const lhs = try self.resolveInst(extra.lhs);
-    const rhs = try self.resolveInst(extra.rhs);
-    const lhs_ty = self.air.typeOf(extra.lhs);
+    const lhs = try func.resolveInst(extra.lhs);
+    const rhs = try func.resolveInst(extra.rhs);
+    const lhs_ty = func.air.typeOf(extra.lhs);
 
     if (lhs_ty.zigTypeTag() == .Vector) {
-        return self.fail("TODO: Implement overflow arithmetic for vectors", .{});
+        return func.fail("TODO: Implement overflow arithmetic for vectors", .{});
     }
 
     // We store the bit if it's overflowed or not in this. As it's zero-initialized
     // we only need to update it if an overflow (or underflow) occurred.
-    var overflow_bit = try self.ensureAllocLocal(Type.initTag(.u1));
-    defer overflow_bit.free(self);
+    var overflow_bit = try func.ensureAllocLocal(Type.initTag(.u1));
+    defer overflow_bit.free(func);
 
-    const int_info = lhs_ty.intInfo(self.target);
+    const int_info = lhs_ty.intInfo(func.target);
     const wasm_bits = toWasmBits(int_info.bits) orelse {
-        return self.fail("TODO: Implement overflow arithmetic for integer bitsize: {d}", .{int_info.bits});
+        return func.fail("TODO: Implement overflow arithmetic for integer bitsize: {d}", .{int_info.bits});
     };
 
     if (wasm_bits > 32) {
-        return self.fail("TODO: Implement `@mulWithOverflow` for integer bitsize: {d}", .{int_info.bits});
+        return func.fail("TODO: Implement `@mulWithOverflow` for integer bitsize: {d}", .{int_info.bits});
     }
 
     const zero = switch (wasm_bits) {
@@ -4854,190 +4852,190 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) InnerError!void {
     // for 32 bit integers we upcast it to a 64bit integer
     const bin_op = if (int_info.bits == 32) blk: {
         const new_ty = if (int_info.signedness == .signed) Type.i64 else Type.u64;
-        const lhs_upcast = try self.intcast(lhs, lhs_ty, new_ty);
-        const rhs_upcast = try self.intcast(rhs, lhs_ty, new_ty);
-        const bin_op = try (try self.binOp(lhs_upcast, rhs_upcast, new_ty, .mul)).toLocal(self, new_ty);
+        const lhs_upcast = try func.intcast(lhs, lhs_ty, new_ty);
+        const rhs_upcast = try func.intcast(rhs, lhs_ty, new_ty);
+        const bin_op = try (try func.binOp(lhs_upcast, rhs_upcast, new_ty, .mul)).toLocal(func, new_ty);
         if (int_info.signedness == .unsigned) {
-            const shr = try self.binOp(bin_op, .{ .imm64 = int_info.bits }, new_ty, .shr);
-            const wrap = try self.intcast(shr, new_ty, lhs_ty);
-            _ = try self.cmp(wrap, zero, lhs_ty, .neq);
-            try self.addLabel(.local_set, overflow_bit.local.value);
-            break :blk try self.intcast(bin_op, new_ty, lhs_ty);
+            const shr = try func.binOp(bin_op, .{ .imm64 = int_info.bits }, new_ty, .shr);
+            const wrap = try func.intcast(shr, new_ty, lhs_ty);
+            _ = try func.cmp(wrap, zero, lhs_ty, .neq);
+            try func.addLabel(.local_set, overflow_bit.local.value);
+            break :blk try func.intcast(bin_op, new_ty, lhs_ty);
         } else {
-            const down_cast = try (try self.intcast(bin_op, new_ty, lhs_ty)).toLocal(self, lhs_ty);
-            var shr = try (try self.binOp(down_cast, .{ .imm32 = int_info.bits - 1 }, lhs_ty, .shr)).toLocal(self, lhs_ty);
-            defer shr.free(self);
-
-            const shr_res = try self.binOp(bin_op, .{ .imm64 = int_info.bits }, new_ty, .shr);
-            const down_shr_res = try self.intcast(shr_res, new_ty, lhs_ty);
-            _ = try self.cmp(down_shr_res, shr, lhs_ty, .neq);
-            try self.addLabel(.local_set, overflow_bit.local.value);
+            const down_cast = try (try func.intcast(bin_op, new_ty, lhs_ty)).toLocal(func, lhs_ty);
+            var shr = try (try func.binOp(down_cast, .{ .imm32 = int_info.bits - 1 }, lhs_ty, .shr)).toLocal(func, lhs_ty);
+            defer shr.free(func);
+
+            const shr_res = try func.binOp(bin_op, .{ .imm64 = int_info.bits }, new_ty, .shr);
+            const down_shr_res = try func.intcast(shr_res, new_ty, lhs_ty);
+            _ = try func.cmp(down_shr_res, shr, lhs_ty, .neq);
+            try func.addLabel(.local_set, overflow_bit.local.value);
             break :blk down_cast;
         }
     } else if (int_info.signedness == .signed) blk: {
-        const lhs_abs = try self.signAbsValue(lhs, lhs_ty);
-        const rhs_abs = try self.signAbsValue(rhs, lhs_ty);
-        const bin_op = try (try self.binOp(lhs_abs, rhs_abs, lhs_ty, .mul)).toLocal(self, lhs_ty);
-        const mul_abs = try self.signAbsValue(bin_op, lhs_ty);
-        _ = try self.cmp(mul_abs, bin_op, lhs_ty, .neq);
-        try self.addLabel(.local_set, overflow_bit.local.value);
-        break :blk try self.wrapOperand(bin_op, lhs_ty);
+        const lhs_abs = try func.signAbsValue(lhs, lhs_ty);
+        const rhs_abs = try func.signAbsValue(rhs, lhs_ty);
+        const bin_op = try (try func.binOp(lhs_abs, rhs_abs, lhs_ty, .mul)).toLocal(func, lhs_ty);
+        const mul_abs = try func.signAbsValue(bin_op, lhs_ty);
+        _ = try func.cmp(mul_abs, bin_op, lhs_ty, .neq);
+        try func.addLabel(.local_set, overflow_bit.local.value);
+        break :blk try func.wrapOperand(bin_op, lhs_ty);
     } else blk: {
-        var bin_op = try (try self.binOp(lhs, rhs, lhs_ty, .mul)).toLocal(self, lhs_ty);
-        defer bin_op.free(self);
+        var bin_op = try (try func.binOp(lhs, rhs, lhs_ty, .mul)).toLocal(func, lhs_ty);
+        defer bin_op.free(func);
         const shift_imm = if (wasm_bits == 32)
             WValue{ .imm32 = int_info.bits }
         else
             WValue{ .imm64 = int_info.bits };
-        const shr = try self.binOp(bin_op, shift_imm, lhs_ty, .shr);
-        _ = try self.cmp(shr, zero, lhs_ty, .neq);
-        try self.addLabel(.local_set, overflow_bit.local.value);
-        break :blk try self.wrapOperand(bin_op, lhs_ty);
+        const shr = try func.binOp(bin_op, shift_imm, lhs_ty, .shr);
+        _ = try func.cmp(shr, zero, lhs_ty, .neq);
+        try func.addLabel(.local_set, overflow_bit.local.value);
+        break :blk try func.wrapOperand(bin_op, lhs_ty);
     };
-    var bin_op_local = try bin_op.toLocal(self, lhs_ty);
-    defer bin_op_local.free(self);
+    var bin_op_local = try bin_op.toLocal(func, lhs_ty);
+    defer bin_op_local.free(func);
 
-    const result_ptr = try self.allocStack(self.air.typeOfIndex(inst));
-    try self.store(result_ptr, bin_op_local, lhs_ty, 0);
-    const offset = @intCast(u32, lhs_ty.abiSize(self.target));
-    try self.store(result_ptr, overflow_bit, Type.initTag(.u1), offset);
+    const result_ptr = try func.allocStack(func.air.typeOfIndex(inst));
+    try func.store(result_ptr, bin_op_local, lhs_ty, 0);
+    const offset = @intCast(u32, lhs_ty.abiSize(func.target));
+    try func.store(result_ptr, overflow_bit, Type.initTag(.u1), offset);
 
-    self.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs });
+    func.finishAir(inst, result_ptr, &.{ extra.lhs, extra.rhs });
 }
 
-fn airMaxMin(self: *Self, inst: Air.Inst.Index, op: enum { max, min }) InnerError!void {
-    const bin_op = self.air.instructions.items(.data)[inst].bin_op;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
+fn airMaxMin(func: *CodeGen, inst: Air.Inst.Index, op: enum { max, min }) InnerError!void {
+    const bin_op = func.air.instructions.items(.data)[inst].bin_op;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
 
-    const ty = self.air.typeOfIndex(inst);
+    const ty = func.air.typeOfIndex(inst);
     if (ty.zigTypeTag() == .Vector) {
-        return self.fail("TODO: `@maximum` and `@minimum` for vectors", .{});
+        return func.fail("TODO: `@maximum` and `@minimum` for vectors", .{});
     }
 
-    if (ty.abiSize(self.target) > 16) {
-        return self.fail("TODO: `@maximum` and `@minimum` for types larger than 16 bytes", .{});
+    if (ty.abiSize(func.target) > 16) {
+        return func.fail("TODO: `@maximum` and `@minimum` for types larger than 16 bytes", .{});
     }
 
-    const lhs = try self.resolveInst(bin_op.lhs);
-    const rhs = try self.resolveInst(bin_op.rhs);
+    const lhs = try func.resolveInst(bin_op.lhs);
+    const rhs = try func.resolveInst(bin_op.rhs);
 
     // operands to select from
-    try self.lowerToStack(lhs);
-    try self.lowerToStack(rhs);
-    _ = try self.cmp(lhs, rhs, ty, if (op == .max) .gt else .lt);
+    try func.lowerToStack(lhs);
+    try func.lowerToStack(rhs);
+    _ = try func.cmp(lhs, rhs, ty, if (op == .max) .gt else .lt);
 
     // based on the result from comparison, return operand 0 or 1.
-    try self.addTag(.select);
+    try func.addTag(.select);
 
     // store result in local
-    const result_ty = if (isByRef(ty, self.target)) Type.u32 else ty;
-    const result = try self.allocLocal(result_ty);
-    try self.addLabel(.local_set, result.local.value);
-    self.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
+    const result_ty = if (isByRef(ty, func.target)) Type.u32 else ty;
+    const result = try func.allocLocal(result_ty);
+    try func.addLabel(.local_set, result.local.value);
+    func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
 }
 
-fn airMulAdd(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const pl_op = self.air.instructions.items(.data)[inst].pl_op;
-    const bin_op = self.air.extraData(Air.Bin, pl_op.payload).data;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
+fn airMulAdd(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pl_op = func.air.instructions.items(.data)[inst].pl_op;
+    const bin_op = func.air.extraData(Air.Bin, pl_op.payload).data;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
 
-    const ty = self.air.typeOfIndex(inst);
+    const ty = func.air.typeOfIndex(inst);
     if (ty.zigTypeTag() == .Vector) {
-        return self.fail("TODO: `@mulAdd` for vectors", .{});
+        return func.fail("TODO: `@mulAdd` for vectors", .{});
     }
 
-    const addend = try self.resolveInst(pl_op.operand);
-    const lhs = try self.resolveInst(bin_op.lhs);
-    const rhs = try self.resolveInst(bin_op.rhs);
+    const addend = try func.resolveInst(pl_op.operand);
+    const lhs = try func.resolveInst(bin_op.lhs);
+    const rhs = try func.resolveInst(bin_op.rhs);
 
-    const result = if (ty.floatBits(self.target) == 16) fl_result: {
-        const rhs_ext = try self.fpext(rhs, ty, Type.f32);
-        const lhs_ext = try self.fpext(lhs, ty, Type.f32);
-        const addend_ext = try self.fpext(addend, ty, Type.f32);
+    const result = if (ty.floatBits(func.target) == 16) fl_result: {
+        const rhs_ext = try func.fpext(rhs, ty, Type.f32);
+        const lhs_ext = try func.fpext(lhs, ty, Type.f32);
+        const addend_ext = try func.fpext(addend, ty, Type.f32);
         // call to compiler-rt `fn fmaf(f32, f32, f32) f32`
-        var result = try self.callIntrinsic(
+        var result = try func.callIntrinsic(
             "fmaf",
             &.{ Type.f32, Type.f32, Type.f32 },
             Type.f32,
             &.{ rhs_ext, lhs_ext, addend_ext },
         );
-        break :fl_result try (try self.fptrunc(result, Type.f32, ty)).toLocal(self, ty);
+        break :fl_result try (try func.fptrunc(result, Type.f32, ty)).toLocal(func, ty);
     } else result: {
-        const mul_result = try self.binOp(lhs, rhs, ty, .mul);
-        break :result try (try self.binOp(mul_result, addend, ty, .add)).toLocal(self, ty);
+        const mul_result = try func.binOp(lhs, rhs, ty, .mul);
+        break :result try (try func.binOp(mul_result, addend, ty, .add)).toLocal(func, ty);
     };
 
-    self.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
+    func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
 }
 
-fn airClz(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const ty_op = self.air.instructions.items(.data)[inst].ty_op;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{ty_op.operand});
+fn airClz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_op = func.air.instructions.items(.data)[inst].ty_op;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
 
-    const ty = self.air.typeOf(ty_op.operand);
-    const result_ty = self.air.typeOfIndex(inst);
+    const ty = func.air.typeOf(ty_op.operand);
+    const result_ty = func.air.typeOfIndex(inst);
     if (ty.zigTypeTag() == .Vector) {
-        return self.fail("TODO: `@clz` for vectors", .{});
+        return func.fail("TODO: `@clz` for vectors", .{});
     }
 
-    const operand = try self.resolveInst(ty_op.operand);
-    const int_info = ty.intInfo(self.target);
+    const operand = try func.resolveInst(ty_op.operand);
+    const int_info = ty.intInfo(func.target);
     const wasm_bits = toWasmBits(int_info.bits) orelse {
-        return self.fail("TODO: `@clz` for integers with bitsize '{d}'", .{int_info.bits});
+        return func.fail("TODO: `@clz` for integers with bitsize '{d}'", .{int_info.bits});
     };
 
     switch (wasm_bits) {
         32 => {
-            try self.emitWValue(operand);
-            try self.addTag(.i32_clz);
+            try func.emitWValue(operand);
+            try func.addTag(.i32_clz);
         },
         64 => {
-            try self.emitWValue(operand);
-            try self.addTag(.i64_clz);
-            try self.addTag(.i32_wrap_i64);
+            try func.emitWValue(operand);
+            try func.addTag(.i64_clz);
+            try func.addTag(.i32_wrap_i64);
         },
         128 => {
-            var lsb = try (try self.load(operand, Type.u64, 8)).toLocal(self, Type.u64);
-            defer lsb.free(self);
-
-            try self.emitWValue(lsb);
-            try self.addTag(.i64_clz);
-            _ = try self.load(operand, Type.u64, 0);
-            try self.addTag(.i64_clz);
-            try self.emitWValue(.{ .imm64 = 64 });
-            try self.addTag(.i64_add);
-            _ = try self.cmp(lsb, .{ .imm64 = 0 }, Type.u64, .neq);
-            try self.addTag(.select);
-            try self.addTag(.i32_wrap_i64);
+            var lsb = try (try func.load(operand, Type.u64, 8)).toLocal(func, Type.u64);
+            defer lsb.free(func);
+
+            try func.emitWValue(lsb);
+            try func.addTag(.i64_clz);
+            _ = try func.load(operand, Type.u64, 0);
+            try func.addTag(.i64_clz);
+            try func.emitWValue(.{ .imm64 = 64 });
+            try func.addTag(.i64_add);
+            _ = try func.cmp(lsb, .{ .imm64 = 0 }, Type.u64, .neq);
+            try func.addTag(.select);
+            try func.addTag(.i32_wrap_i64);
         },
         else => unreachable,
     }
 
     if (wasm_bits != int_info.bits) {
-        try self.emitWValue(.{ .imm32 = wasm_bits - int_info.bits });
-        try self.addTag(.i32_sub);
+        try func.emitWValue(.{ .imm32 = wasm_bits - int_info.bits });
+        try func.addTag(.i32_sub);
     }
 
-    const result = try self.allocLocal(result_ty);
-    try self.addLabel(.local_set, result.local.value);
-    self.finishAir(inst, result, &.{ty_op.operand});
+    const result = try func.allocLocal(result_ty);
+    try func.addLabel(.local_set, result.local.value);
+    func.finishAir(inst, result, &.{ty_op.operand});
 }
 
-fn airCtz(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const ty_op = self.air.instructions.items(.data)[inst].ty_op;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{ty_op.operand});
+fn airCtz(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_op = func.air.instructions.items(.data)[inst].ty_op;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
 
-    const ty = self.air.typeOf(ty_op.operand);
-    const result_ty = self.air.typeOfIndex(inst);
+    const ty = func.air.typeOf(ty_op.operand);
+    const result_ty = func.air.typeOfIndex(inst);
 
     if (ty.zigTypeTag() == .Vector) {
-        return self.fail("TODO: `@ctz` for vectors", .{});
+        return func.fail("TODO: `@ctz` for vectors", .{});
     }
 
-    const operand = try self.resolveInst(ty_op.operand);
-    const int_info = ty.intInfo(self.target);
+    const operand = try func.resolveInst(ty_op.operand);
+    const int_info = ty.intInfo(func.target);
     const wasm_bits = toWasmBits(int_info.bits) orelse {
-        return self.fail("TODO: `@clz` for integers with bitsize '{d}'", .{int_info.bits});
+        return func.fail("TODO: `@clz` for integers with bitsize '{d}'", .{int_info.bits});
     };
 
     switch (wasm_bits) {
@@ -5045,63 +5043,63 @@ fn airCtz(self: *Self, inst: Air.Inst.Index) InnerError!void {
             if (wasm_bits != int_info.bits) {
                 const val: u32 = @as(u32, 1) << @intCast(u5, int_info.bits);
                 // leave value on the stack
-                _ = try self.binOp(operand, .{ .imm32 = val }, ty, .@"or");
-            } else try self.emitWValue(operand);
-            try self.addTag(.i32_ctz);
+                _ = try func.binOp(operand, .{ .imm32 = val }, ty, .@"or");
+            } else try func.emitWValue(operand);
+            try func.addTag(.i32_ctz);
         },
         64 => {
             if (wasm_bits != int_info.bits) {
                 const val: u64 = @as(u64, 1) << @intCast(u6, int_info.bits);
                 // leave value on the stack
-                _ = try self.binOp(operand, .{ .imm64 = val }, ty, .@"or");
-            } else try self.emitWValue(operand);
-            try self.addTag(.i64_ctz);
-            try self.addTag(.i32_wrap_i64);
+                _ = try func.binOp(operand, .{ .imm64 = val }, ty, .@"or");
+            } else try func.emitWValue(operand);
+            try func.addTag(.i64_ctz);
+            try func.addTag(.i32_wrap_i64);
         },
         128 => {
-            var msb = try (try self.load(operand, Type.u64, 0)).toLocal(self, Type.u64);
-            defer msb.free(self);
+            var msb = try (try func.load(operand, Type.u64, 0)).toLocal(func, Type.u64);
+            defer msb.free(func);
 
-            try self.emitWValue(msb);
-            try self.addTag(.i64_ctz);
-            _ = try self.load(operand, Type.u64, 8);
+            try func.emitWValue(msb);
+            try func.addTag(.i64_ctz);
+            _ = try func.load(operand, Type.u64, 8);
             if (wasm_bits != int_info.bits) {
-                try self.addImm64(@as(u64, 1) << @intCast(u6, int_info.bits - 64));
-                try self.addTag(.i64_or);
+                try func.addImm64(@as(u64, 1) << @intCast(u6, int_info.bits - 64));
+                try func.addTag(.i64_or);
             }
-            try self.addTag(.i64_ctz);
-            try self.addImm64(64);
+            try func.addTag(.i64_ctz);
+            try func.addImm64(64);
             if (wasm_bits != int_info.bits) {
-                try self.addTag(.i64_or);
+                try func.addTag(.i64_or);
             } else {
-                try self.addTag(.i64_add);
+                try func.addTag(.i64_add);
             }
-            _ = try self.cmp(msb, .{ .imm64 = 0 }, Type.u64, .neq);
-            try self.addTag(.select);
-            try self.addTag(.i32_wrap_i64);
+            _ = try func.cmp(msb, .{ .imm64 = 0 }, Type.u64, .neq);
+            try func.addTag(.select);
+            try func.addTag(.i32_wrap_i64);
         },
         else => unreachable,
     }
 
-    const result = try self.allocLocal(result_ty);
-    try self.addLabel(.local_set, result.local.value);
-    self.finishAir(inst, result, &.{ty_op.operand});
+    const result = try func.allocLocal(result_ty);
+    try func.addLabel(.local_set, result.local.value);
+    func.finishAir(inst, result, &.{ty_op.operand});
 }
 
-fn airDbgVar(self: *Self, inst: Air.Inst.Index, is_ptr: bool) !void {
-    if (self.debug_output != .dwarf) return self.finishAir(inst, .none, &.{});
+fn airDbgVar(func: *CodeGen, inst: Air.Inst.Index, is_ptr: bool) !void {
+    if (func.debug_output != .dwarf) return func.finishAir(inst, .none, &.{});
 
-    const pl_op = self.air.instructions.items(.data)[inst].pl_op;
-    const ty = self.air.typeOf(pl_op.operand);
-    const operand = try self.resolveInst(pl_op.operand);
+    const pl_op = func.air.instructions.items(.data)[inst].pl_op;
+    const ty = func.air.typeOf(pl_op.operand);
+    const operand = try func.resolveInst(pl_op.operand);
     const op_ty = if (is_ptr) ty.childType() else ty;
 
     log.debug("airDbgVar: %{d}: {}, {}", .{ inst, op_ty.fmtDebug(), operand });
 
-    const name = self.air.nullTerminatedString(pl_op.payload);
+    const name = func.air.nullTerminatedString(pl_op.payload);
     log.debug(" var name = ({s})", .{name});
 
-    const dbg_info = &self.debug_output.dwarf.dbg_info;
+    const dbg_info = &func.debug_output.dwarf.dbg_info;
     try dbg_info.append(@enumToInt(link.File.Dwarf.AbbrevKind.variable));
     switch (operand) {
         .local => |local| {
@@ -5123,54 +5121,54 @@ fn airDbgVar(self: *Self, inst: Air.Inst.Index, is_ptr: bool) !void {
     }
 
     try dbg_info.ensureUnusedCapacity(5 + name.len + 1);
-    try self.addDbgInfoTypeReloc(op_ty);
+    try func.addDbgInfoTypeReloc(op_ty);
     dbg_info.appendSliceAssumeCapacity(name);
     dbg_info.appendAssumeCapacity(0);
-    self.finishAir(inst, .none, &.{});
+    func.finishAir(inst, .none, &.{});
 }
 
-fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void {
-    if (self.debug_output != .dwarf) return self.finishAir(inst, .none, &.{});
+fn airDbgStmt(func: *CodeGen, inst: Air.Inst.Index) !void {
+    if (func.debug_output != .dwarf) return func.finishAir(inst, .none, &.{});
 
-    const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt;
-    try self.addInst(.{ .tag = .dbg_line, .data = .{
-        .payload = try self.addExtra(Mir.DbgLineColumn{
+    const dbg_stmt = func.air.instructions.items(.data)[inst].dbg_stmt;
+    try func.addInst(.{ .tag = .dbg_line, .data = .{
+        .payload = try func.addExtra(Mir.DbgLineColumn{
             .line = dbg_stmt.line,
             .column = dbg_stmt.column,
         }),
     } });
-    self.finishAir(inst, .none, &.{});
+    func.finishAir(inst, .none, &.{});
 }
 
-fn airTry(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const pl_op = self.air.instructions.items(.data)[inst].pl_op;
-    const err_union = try self.resolveInst(pl_op.operand);
-    const extra = self.air.extraData(Air.Try, pl_op.payload);
-    const body = self.air.extra[extra.end..][0..extra.data.body_len];
-    const err_union_ty = self.air.typeOf(pl_op.operand);
-    const result = try lowerTry(self, err_union, body, err_union_ty, false);
-    self.finishAir(inst, result, &.{pl_op.operand});
+fn airTry(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const pl_op = func.air.instructions.items(.data)[inst].pl_op;
+    const err_union = try func.resolveInst(pl_op.operand);
+    const extra = func.air.extraData(Air.Try, pl_op.payload);
+    const body = func.air.extra[extra.end..][0..extra.data.body_len];
+    const err_union_ty = func.air.typeOf(pl_op.operand);
+    const result = try lowerTry(func, err_union, body, err_union_ty, false);
+    func.finishAir(inst, result, &.{pl_op.operand});
 }
 
-fn airTryPtr(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
-    const extra = self.air.extraData(Air.TryPtr, ty_pl.payload);
-    const err_union_ptr = try self.resolveInst(extra.data.ptr);
-    const body = self.air.extra[extra.end..][0..extra.data.body_len];
-    const err_union_ty = self.air.typeOf(extra.data.ptr).childType();
-    const result = try lowerTry(self, err_union_ptr, body, err_union_ty, true);
-    self.finishAir(inst, result, &.{extra.data.ptr});
+fn airTryPtr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_pl = func.air.instructions.items(.data)[inst].ty_pl;
+    const extra = func.air.extraData(Air.TryPtr, ty_pl.payload);
+    const err_union_ptr = try func.resolveInst(extra.data.ptr);
+    const body = func.air.extra[extra.end..][0..extra.data.body_len];
+    const err_union_ty = func.air.typeOf(extra.data.ptr).childType();
+    const result = try lowerTry(func, err_union_ptr, body, err_union_ty, true);
+    func.finishAir(inst, result, &.{extra.data.ptr});
 }
 
 fn lowerTry(
-    self: *Self,
+    func: *CodeGen,
     err_union: WValue,
     body: []const Air.Inst.Index,
     err_union_ty: Type,
     operand_is_ptr: bool,
 ) InnerError!WValue {
     if (operand_is_ptr) {
-        return self.fail("TODO: lowerTry for pointers", .{});
+        return func.fail("TODO: lowerTry for pointers", .{});
     }
 
     const pl_ty = err_union_ty.errorUnionPayload();
@@ -5178,21 +5176,21 @@ fn lowerTry(
 
     if (!err_union_ty.errorUnionSet().errorSetIsEmpty()) {
         // Block we can jump out of when error is not set
-        try self.startBlock(.block, wasm.block_empty);
+        try func.startBlock(.block, wasm.block_empty);
 
         // check if the error tag is set for the error union.
-        try self.emitWValue(err_union);
+        try func.emitWValue(err_union);
         if (pl_has_bits) {
-            const err_offset = @intCast(u32, errUnionErrorOffset(pl_ty, self.target));
-            try self.addMemArg(.i32_load16_u, .{
+            const err_offset = @intCast(u32, errUnionErrorOffset(pl_ty, func.target));
+            try func.addMemArg(.i32_load16_u, .{
                 .offset = err_union.offset() + err_offset,
-                .alignment = Type.anyerror.abiAlignment(self.target),
+                .alignment = Type.anyerror.abiAlignment(func.target),
             });
         }
-        try self.addTag(.i32_eqz);
-        try self.addLabel(.br_if, 0); // jump out of block when error is '0'
-        try self.genBody(body);
-        try self.endBlock();
+        try func.addTag(.i32_eqz);
+        try func.addLabel(.br_if, 0); // jump out of block when error is '0'
+        try func.genBody(body);
+        try func.endBlock();
     }
 
     // if we reach here it means error was not set, and we want the payload
@@ -5200,121 +5198,121 @@ fn lowerTry(
         return WValue{ .none = {} };
     }
 
-    const pl_offset = @intCast(u32, errUnionPayloadOffset(pl_ty, self.target));
-    if (isByRef(pl_ty, self.target)) {
-        return buildPointerOffset(self, err_union, pl_offset, .new);
+    const pl_offset = @intCast(u32, errUnionPayloadOffset(pl_ty, func.target));
+    if (isByRef(pl_ty, func.target)) {
+        return buildPointerOffset(func, err_union, pl_offset, .new);
     }
-    const payload = try self.load(err_union, pl_ty, pl_offset);
-    return payload.toLocal(self, pl_ty);
+    const payload = try func.load(err_union, pl_ty, pl_offset);
+    return payload.toLocal(func, pl_ty);
 }
 
-fn airByteSwap(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const ty_op = self.air.instructions.items(.data)[inst].ty_op;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{ty_op.operand});
+fn airByteSwap(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const ty_op = func.air.instructions.items(.data)[inst].ty_op;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ty_op.operand});
 
-    const ty = self.air.typeOfIndex(inst);
-    const operand = try self.resolveInst(ty_op.operand);
+    const ty = func.air.typeOfIndex(inst);
+    const operand = try func.resolveInst(ty_op.operand);
 
     if (ty.zigTypeTag() == .Vector) {
-        return self.fail("TODO: @byteSwap for vectors", .{});
+        return func.fail("TODO: @byteSwap for vectors", .{});
     }
-    const int_info = ty.intInfo(self.target);
+    const int_info = ty.intInfo(func.target);
 
     // bytes are no-op
     if (int_info.bits == 8) {
-        return self.finishAir(inst, self.reuseOperand(ty_op.operand, operand), &.{ty_op.operand});
+        return func.finishAir(inst, func.reuseOperand(ty_op.operand, operand), &.{ty_op.operand});
     }
 
     const result = result: {
         switch (int_info.bits) {
             16 => {
-                const shl_res = try self.binOp(operand, .{ .imm32 = 8 }, ty, .shl);
-                const lhs = try self.binOp(shl_res, .{ .imm32 = 0xFF00 }, ty, .@"and");
-                const shr_res = try self.binOp(operand, .{ .imm32 = 8 }, ty, .shr);
+                const shl_res = try func.binOp(operand, .{ .imm32 = 8 }, ty, .shl);
+                const lhs = try func.binOp(shl_res, .{ .imm32 = 0xFF00 }, ty, .@"and");
+                const shr_res = try func.binOp(operand, .{ .imm32 = 8 }, ty, .shr);
                 const res = if (int_info.signedness == .signed) blk: {
-                    break :blk try self.wrapOperand(shr_res, Type.u8);
+                    break :blk try func.wrapOperand(shr_res, Type.u8);
                 } else shr_res;
-                break :result try (try self.binOp(lhs, res, ty, .@"or")).toLocal(self, ty);
+                break :result try (try func.binOp(lhs, res, ty, .@"or")).toLocal(func, ty);
             },
             24 => {
-                var msb = try (try self.wrapOperand(operand, Type.u16)).toLocal(self, Type.u16);
-                defer msb.free(self);
+                var msb = try (try func.wrapOperand(operand, Type.u16)).toLocal(func, Type.u16);
+                defer msb.free(func);
 
-                const shl_res = try self.binOp(msb, .{ .imm32 = 8 }, Type.u16, .shl);
-                const lhs = try self.binOp(shl_res, .{ .imm32 = 0xFF0000 }, Type.u16, .@"and");
-                const shr_res = try self.binOp(msb, .{ .imm32 = 8 }, ty, .shr);
+                const shl_res = try func.binOp(msb, .{ .imm32 = 8 }, Type.u16, .shl);
+                const lhs = try func.binOp(shl_res, .{ .imm32 = 0xFF0000 }, Type.u16, .@"and");
+                const shr_res = try func.binOp(msb, .{ .imm32 = 8 }, ty, .shr);
 
                 const res = if (int_info.signedness == .signed) blk: {
-                    break :blk try self.wrapOperand(shr_res, Type.u8);
+                    break :blk try func.wrapOperand(shr_res, Type.u8);
                 } else shr_res;
-                const lhs_tmp = try self.binOp(lhs, res, ty, .@"or");
-                const lhs_result = try self.binOp(lhs_tmp, .{ .imm32 = 8 }, ty, .shr);
-                const rhs_wrap = try self.wrapOperand(msb, Type.u8);
-                const rhs_result = try self.binOp(rhs_wrap, .{ .imm32 = 16 }, ty, .shl);
-
-                const lsb = try self.wrapBinOp(operand, .{ .imm32 = 16 }, Type.u8, .shr);
-                const tmp = try self.binOp(lhs_result, rhs_result, ty, .@"or");
-                break :result try (try self.binOp(tmp, lsb, ty, .@"or")).toLocal(self, ty);
+                const lhs_tmp = try func.binOp(lhs, res, ty, .@"or");
+                const lhs_result = try func.binOp(lhs_tmp, .{ .imm32 = 8 }, ty, .shr);
+                const rhs_wrap = try func.wrapOperand(msb, Type.u8);
+                const rhs_result = try func.binOp(rhs_wrap, .{ .imm32 = 16 }, ty, .shl);
+
+                const lsb = try func.wrapBinOp(operand, .{ .imm32 = 16 }, Type.u8, .shr);
+                const tmp = try func.binOp(lhs_result, rhs_result, ty, .@"or");
+                break :result try (try func.binOp(tmp, lsb, ty, .@"or")).toLocal(func, ty);
             },
             32 => {
-                const shl_tmp = try self.binOp(operand, .{ .imm32 = 8 }, ty, .shl);
-                var lhs = try (try self.binOp(shl_tmp, .{ .imm32 = 0xFF00FF00 }, ty, .@"and")).toLocal(self, ty);
-                defer lhs.free(self);
-                const shr_tmp = try self.binOp(operand, .{ .imm32 = 8 }, ty, .shr);
-                var rhs = try (try self.binOp(shr_tmp, .{ .imm32 = 0xFF00FF }, ty, .@"and")).toLocal(self, ty);
-                defer rhs.free(self);
-                var tmp_or = try (try self.binOp(lhs, rhs, ty, .@"or")).toLocal(self, ty);
-                defer tmp_or.free(self);
-
-                const shl = try self.binOp(tmp_or, .{ .imm32 = 16 }, ty, .shl);
-                const shr = try self.binOp(tmp_or, .{ .imm32 = 16 }, ty, .shr);
+                const shl_tmp = try func.binOp(operand, .{ .imm32 = 8 }, ty, .shl);
+                var lhs = try (try func.binOp(shl_tmp, .{ .imm32 = 0xFF00FF00 }, ty, .@"and")).toLocal(func, ty);
+                defer lhs.free(func);
+                const shr_tmp = try func.binOp(operand, .{ .imm32 = 8 }, ty, .shr);
+                var rhs = try (try func.binOp(shr_tmp, .{ .imm32 = 0xFF00FF }, ty, .@"and")).toLocal(func, ty);
+                defer rhs.free(func);
+                var tmp_or = try (try func.binOp(lhs, rhs, ty, .@"or")).toLocal(func, ty);
+                defer tmp_or.free(func);
+
+                const shl = try func.binOp(tmp_or, .{ .imm32 = 16 }, ty, .shl);
+                const shr = try func.binOp(tmp_or, .{ .imm32 = 16 }, ty, .shr);
                 const res = if (int_info.signedness == .signed) blk: {
-                    break :blk try self.wrapOperand(shr, Type.u16);
+                    break :blk try func.wrapOperand(shr, Type.u16);
                 } else shr;
-                break :result try (try self.binOp(shl, res, ty, .@"or")).toLocal(self, ty);
+                break :result try (try func.binOp(shl, res, ty, .@"or")).toLocal(func, ty);
             },
-            else => return self.fail("TODO: @byteSwap for integers with bitsize {d}", .{int_info.bits}),
+            else => return func.fail("TODO: @byteSwap for integers with bitsize {d}", .{int_info.bits}),
         }
     };
-    self.finishAir(inst, result, &.{ty_op.operand});
+    func.finishAir(inst, result, &.{ty_op.operand});
 }
 
-fn airDiv(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const bin_op = self.air.instructions.items(.data)[inst].bin_op;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
+fn airDiv(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const bin_op = func.air.instructions.items(.data)[inst].bin_op;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
 
-    const ty = self.air.typeOfIndex(inst);
-    const lhs = try self.resolveInst(bin_op.lhs);
-    const rhs = try self.resolveInst(bin_op.rhs);
+    const ty = func.air.typeOfIndex(inst);
+    const lhs = try func.resolveInst(bin_op.lhs);
+    const rhs = try func.resolveInst(bin_op.rhs);
 
     const result = if (ty.isSignedInt())
-        try self.divSigned(lhs, rhs, ty)
+        try func.divSigned(lhs, rhs, ty)
     else
-        try (try self.binOp(lhs, rhs, ty, .div)).toLocal(self, ty);
-    self.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
+        try (try func.binOp(lhs, rhs, ty, .div)).toLocal(func, ty);
+    func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
 }
 
-fn airDivFloor(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const bin_op = self.air.instructions.items(.data)[inst].bin_op;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
+fn airDivFloor(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const bin_op = func.air.instructions.items(.data)[inst].bin_op;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
 
-    const ty = self.air.typeOfIndex(inst);
-    const lhs = try self.resolveInst(bin_op.lhs);
-    const rhs = try self.resolveInst(bin_op.rhs);
+    const ty = func.air.typeOfIndex(inst);
+    const lhs = try func.resolveInst(bin_op.lhs);
+    const rhs = try func.resolveInst(bin_op.rhs);
 
     if (ty.isUnsignedInt()) {
-        const result = try (try self.binOp(lhs, rhs, ty, .div)).toLocal(self, ty);
-        return self.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
+        const result = try (try func.binOp(lhs, rhs, ty, .div)).toLocal(func, ty);
+        return func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
     } else if (ty.isSignedInt()) {
-        const int_bits = ty.intInfo(self.target).bits;
+        const int_bits = ty.intInfo(func.target).bits;
         const wasm_bits = toWasmBits(int_bits) orelse {
-            return self.fail("TODO: `@divFloor` for signed integers larger than '{d}' bits", .{int_bits});
+            return func.fail("TODO: `@divFloor` for signed integers larger than '{d}' bits", .{int_bits});
         };
         const lhs_res = if (wasm_bits != int_bits) blk: {
-            break :blk try (try self.signAbsValue(lhs, ty)).toLocal(self, ty);
+            break :blk try (try func.signAbsValue(lhs, ty)).toLocal(func, ty);
         } else lhs;
         const rhs_res = if (wasm_bits != int_bits) blk: {
-            break :blk try (try self.signAbsValue(rhs, ty)).toLocal(self, ty);
+            break :blk try (try func.signAbsValue(rhs, ty)).toLocal(func, ty);
         } else rhs;
 
         const zero = switch (wasm_bits) {
@@ -5323,118 +5321,118 @@ fn airDivFloor(self: *Self, inst: Air.Inst.Index) InnerError!void {
             else => unreachable,
         };
 
-        const div_result = try self.allocLocal(ty);
+        const div_result = try func.allocLocal(ty);
         // leave on stack
-        _ = try self.binOp(lhs_res, rhs_res, ty, .div);
-        try self.addLabel(.local_tee, div_result.local.value);
-        _ = try self.cmp(lhs_res, zero, ty, .lt);
-        _ = try self.cmp(rhs_res, zero, ty, .lt);
+        _ = try func.binOp(lhs_res, rhs_res, ty, .div);
+        try func.addLabel(.local_tee, div_result.local.value);
+        _ = try func.cmp(lhs_res, zero, ty, .lt);
+        _ = try func.cmp(rhs_res, zero, ty, .lt);
         switch (wasm_bits) {
             32 => {
-                try self.addTag(.i32_xor);
-                try self.addTag(.i32_sub);
+                try func.addTag(.i32_xor);
+                try func.addTag(.i32_sub);
             },
             64 => {
-                try self.addTag(.i64_xor);
-                try self.addTag(.i64_sub);
+                try func.addTag(.i64_xor);
+                try func.addTag(.i64_sub);
             },
             else => unreachable,
         }
-        try self.emitWValue(div_result);
+        try func.emitWValue(div_result);
         // leave value on the stack
-        _ = try self.binOp(lhs_res, rhs_res, ty, .rem);
-        try self.addTag(.select);
+        _ = try func.binOp(lhs_res, rhs_res, ty, .rem);
+        try func.addTag(.select);
     } else {
-        const float_bits = ty.floatBits(self.target);
+        const float_bits = ty.floatBits(func.target);
         if (float_bits > 64) {
-            return self.fail("TODO: `@divFloor` for floats with bitsize: {d}", .{float_bits});
+            return func.fail("TODO: `@divFloor` for floats with bitsize: {d}", .{float_bits});
         }
         const is_f16 = float_bits == 16;
 
         const lhs_operand = if (is_f16) blk: {
-            break :blk try self.fpext(lhs, Type.f16, Type.f32);
+            break :blk try func.fpext(lhs, Type.f16, Type.f32);
         } else lhs;
         const rhs_operand = if (is_f16) blk: {
-            break :blk try self.fpext(rhs, Type.f16, Type.f32);
+            break :blk try func.fpext(rhs, Type.f16, Type.f32);
         } else rhs;
 
-        try self.emitWValue(lhs_operand);
-        try self.emitWValue(rhs_operand);
+        try func.emitWValue(lhs_operand);
+        try func.emitWValue(rhs_operand);
 
         switch (float_bits) {
             16, 32 => {
-                try self.addTag(.f32_div);
-                try self.addTag(.f32_floor);
+                try func.addTag(.f32_div);
+                try func.addTag(.f32_floor);
             },
             64 => {
-                try self.addTag(.f64_div);
-                try self.addTag(.f64_floor);
+                try func.addTag(.f64_div);
+                try func.addTag(.f64_floor);
             },
             else => unreachable,
         }
 
         if (is_f16) {
-            _ = try self.fptrunc(.{ .stack = {} }, Type.f32, Type.f16);
+            _ = try func.fptrunc(.{ .stack = {} }, Type.f32, Type.f16);
         }
     }
 
-    const result = try self.allocLocal(ty);
-    try self.addLabel(.local_set, result.local.value);
-    self.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
+    const result = try func.allocLocal(ty);
+    try func.addLabel(.local_set, result.local.value);
+    func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
 }
 
-fn divSigned(self: *Self, lhs: WValue, rhs: WValue, ty: Type) InnerError!WValue {
-    const int_bits = ty.intInfo(self.target).bits;
+fn divSigned(func: *CodeGen, lhs: WValue, rhs: WValue, ty: Type) InnerError!WValue {
+    const int_bits = ty.intInfo(func.target).bits;
     const wasm_bits = toWasmBits(int_bits) orelse {
-        return self.fail("TODO: Implement signed division for integers with bitsize '{d}'", .{int_bits});
+        return func.fail("TODO: Implement signed division for integers with bitsize '{d}'", .{int_bits});
     };
 
     if (wasm_bits == 128) {
-        return self.fail("TODO: Implement signed division for 128-bit integerrs", .{});
+        return func.fail("TODO: Implement signed division for 128-bit integerrs", .{});
     }
 
     if (wasm_bits != int_bits) {
         // Leave both values on the stack
-        _ = try self.signAbsValue(lhs, ty);
-        _ = try self.signAbsValue(rhs, ty);
+        _ = try func.signAbsValue(lhs, ty);
+        _ = try func.signAbsValue(rhs, ty);
     } else {
-        try self.emitWValue(lhs);
-        try self.emitWValue(rhs);
+        try func.emitWValue(lhs);
+        try func.emitWValue(rhs);
     }
-    try self.addTag(.i32_div_s);
+    try func.addTag(.i32_div_s);
 
-    const result = try self.allocLocal(ty);
-    try self.addLabel(.local_set, result.local.value);
+    const result = try func.allocLocal(ty);
+    try func.addLabel(.local_set, result.local.value);
     return result;
 }
 
 /// Retrieves the absolute value of a signed integer
 /// NOTE: Leaves the result value on the stack.
-fn signAbsValue(self: *Self, operand: WValue, ty: Type) InnerError!WValue {
-    const int_bits = ty.intInfo(self.target).bits;
+fn signAbsValue(func: *CodeGen, operand: WValue, ty: Type) InnerError!WValue {
+    const int_bits = ty.intInfo(func.target).bits;
     const wasm_bits = toWasmBits(int_bits) orelse {
-        return self.fail("TODO: signAbsValue for signed integers larger than '{d}' bits", .{int_bits});
+        return func.fail("TODO: signAbsValue for signed integers larger than '{d}' bits", .{int_bits});
     };
 
     const shift_val = switch (wasm_bits) {
         32 => WValue{ .imm32 = wasm_bits - int_bits },
         64 => WValue{ .imm64 = wasm_bits - int_bits },
-        else => return self.fail("TODO: signAbsValue for i128", .{}),
+        else => return func.fail("TODO: signAbsValue for i128", .{}),
     };
 
-    try self.emitWValue(operand);
+    try func.emitWValue(operand);
     switch (wasm_bits) {
         32 => {
-            try self.emitWValue(shift_val);
-            try self.addTag(.i32_shl);
-            try self.emitWValue(shift_val);
-            try self.addTag(.i32_shr_s);
+            try func.emitWValue(shift_val);
+            try func.addTag(.i32_shl);
+            try func.emitWValue(shift_val);
+            try func.addTag(.i32_shr_s);
         },
         64 => {
-            try self.emitWValue(shift_val);
-            try self.addTag(.i64_shl);
-            try self.emitWValue(shift_val);
-            try self.addTag(.i64_shr_s);
+            try func.emitWValue(shift_val);
+            try func.addTag(.i64_shl);
+            try func.emitWValue(shift_val);
+            try func.addTag(.i64_shr_s);
         },
         else => unreachable,
     }
@@ -5442,62 +5440,62 @@ fn signAbsValue(self: *Self, operand: WValue, ty: Type) InnerError!WValue {
     return WValue{ .stack = {} };
 }
 
-fn airCeilFloorTrunc(self: *Self, inst: Air.Inst.Index, op: Op) InnerError!void {
-    const un_op = self.air.instructions.items(.data)[inst].un_op;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{un_op});
+fn airCeilFloorTrunc(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
+    const un_op = func.air.instructions.items(.data)[inst].un_op;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{un_op});
 
-    const ty = self.air.typeOfIndex(inst);
-    const float_bits = ty.floatBits(self.target);
+    const ty = func.air.typeOfIndex(inst);
+    const float_bits = ty.floatBits(func.target);
     const is_f16 = float_bits == 16;
 
     if (ty.zigTypeTag() == .Vector) {
-        return self.fail("TODO: Implement `@ceil` for vectors", .{});
+        return func.fail("TODO: Implement `@ceil` for vectors", .{});
     }
     if (float_bits > 64) {
-        return self.fail("TODO: implement `@ceil`, `@trunc`, `@floor` for floats larger than 64bits", .{});
+        return func.fail("TODO: implement `@ceil`, `@trunc`, `@floor` for floats larger than 64bits", .{});
     }
 
-    const operand = try self.resolveInst(un_op);
+    const operand = try func.resolveInst(un_op);
     const op_to_lower = if (is_f16) blk: {
-        break :blk try self.fpext(operand, Type.f16, Type.f32);
+        break :blk try func.fpext(operand, Type.f16, Type.f32);
     } else operand;
-    try self.emitWValue(op_to_lower);
-    const opcode = buildOpcode(.{ .op = op, .valtype1 = typeToValtype(ty, self.target) });
-    try self.addTag(Mir.Inst.Tag.fromOpcode(opcode));
+    try func.emitWValue(op_to_lower);
+    const opcode = buildOpcode(.{ .op = op, .valtype1 = typeToValtype(ty, func.target) });
+    try func.addTag(Mir.Inst.Tag.fromOpcode(opcode));
 
     if (is_f16) {
-        _ = try self.fptrunc(.{ .stack = {} }, Type.f32, Type.f16);
+        _ = try func.fptrunc(.{ .stack = {} }, Type.f32, Type.f16);
     }
 
-    const result = try self.allocLocal(ty);
-    try self.addLabel(.local_set, result.local.value);
-    self.finishAir(inst, result, &.{un_op});
+    const result = try func.allocLocal(ty);
+    try func.addLabel(.local_set, result.local.value);
+    func.finishAir(inst, result, &.{un_op});
 }
 
-fn airSatBinOp(self: *Self, inst: Air.Inst.Index, op: Op) InnerError!void {
+fn airSatBinOp(func: *CodeGen, inst: Air.Inst.Index, op: Op) InnerError!void {
     assert(op == .add or op == .sub);
-    const bin_op = self.air.instructions.items(.data)[inst].bin_op;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
+    const bin_op = func.air.instructions.items(.data)[inst].bin_op;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
 
-    const ty = self.air.typeOfIndex(inst);
-    const lhs = try self.resolveInst(bin_op.lhs);
-    const rhs = try self.resolveInst(bin_op.rhs);
+    const ty = func.air.typeOfIndex(inst);
+    const lhs = try func.resolveInst(bin_op.lhs);
+    const rhs = try func.resolveInst(bin_op.rhs);
 
-    const int_info = ty.intInfo(self.target);
+    const int_info = ty.intInfo(func.target);
     const is_signed = int_info.signedness == .signed;
 
     if (int_info.bits > 64) {
-        return self.fail("TODO: saturating arithmetic for integers with bitsize '{d}'", .{int_info.bits});
+        return func.fail("TODO: saturating arithmetic for integers with bitsize '{d}'", .{int_info.bits});
     }
 
     if (is_signed) {
-        const result = try signedSat(self, lhs, rhs, ty, op);
-        return self.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
+        const result = try signedSat(func, lhs, rhs, ty, op);
+        return func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
     }
 
     const wasm_bits = toWasmBits(int_info.bits).?;
-    var bin_result = try (try self.binOp(lhs, rhs, ty, op)).toLocal(self, ty);
-    defer bin_result.free(self);
+    var bin_result = try (try func.binOp(lhs, rhs, ty, op)).toLocal(func, ty);
+    defer bin_result.free(func);
     if (wasm_bits != int_info.bits and op == .add) {
         const val: u64 = @intCast(u64, (@as(u65, 1) << @intCast(u7, int_info.bits)) - 1);
         const imm_val = switch (wasm_bits) {
@@ -5506,35 +5504,35 @@ fn airSatBinOp(self: *Self, inst: Air.Inst.Index, op: Op) InnerError!void {
             else => unreachable,
         };
 
-        try self.emitWValue(bin_result);
-        try self.emitWValue(imm_val);
-        _ = try self.cmp(bin_result, imm_val, ty, .lt);
+        try func.emitWValue(bin_result);
+        try func.emitWValue(imm_val);
+        _ = try func.cmp(bin_result, imm_val, ty, .lt);
     } else {
         switch (wasm_bits) {
-            32 => try self.addImm32(if (op == .add) @as(i32, -1) else 0),
-            64 => try self.addImm64(if (op == .add) @bitCast(u64, @as(i64, -1)) else 0),
+            32 => try func.addImm32(if (op == .add) @as(i32, -1) else 0),
+            64 => try func.addImm64(if (op == .add) @bitCast(u64, @as(i64, -1)) else 0),
             else => unreachable,
         }
-        try self.emitWValue(bin_result);
-        _ = try self.cmp(bin_result, lhs, ty, if (op == .add) .lt else .gt);
+        try func.emitWValue(bin_result);
+        _ = try func.cmp(bin_result, lhs, ty, if (op == .add) .lt else .gt);
     }
 
-    try self.addTag(.select);
-    const result = try self.allocLocal(ty);
-    try self.addLabel(.local_set, result.local.value);
-    return self.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
+    try func.addTag(.select);
+    const result = try func.allocLocal(ty);
+    try func.addLabel(.local_set, result.local.value);
+    return func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
 }
 
-fn signedSat(self: *Self, lhs_operand: WValue, rhs_operand: WValue, ty: Type, op: Op) InnerError!WValue {
-    const int_info = ty.intInfo(self.target);
+fn signedSat(func: *CodeGen, lhs_operand: WValue, rhs_operand: WValue, ty: Type, op: Op) InnerError!WValue {
+    const int_info = ty.intInfo(func.target);
     const wasm_bits = toWasmBits(int_info.bits).?;
     const is_wasm_bits = wasm_bits == int_info.bits;
 
     var lhs = if (!is_wasm_bits) lhs: {
-        break :lhs try (try self.signAbsValue(lhs_operand, ty)).toLocal(self, ty);
+        break :lhs try (try func.signAbsValue(lhs_operand, ty)).toLocal(func, ty);
     } else lhs_operand;
     var rhs = if (!is_wasm_bits) rhs: {
-        break :rhs try (try self.signAbsValue(rhs_operand, ty)).toLocal(self, ty);
+        break :rhs try (try func.signAbsValue(rhs_operand, ty)).toLocal(func, ty);
     } else rhs_operand;
 
     const max_val: u64 = @intCast(u64, (@as(u65, 1) << @intCast(u7, int_info.bits - 1)) - 1);
@@ -5550,93 +5548,93 @@ fn signedSat(self: *Self, lhs_operand: WValue, rhs_operand: WValue, ty: Type, op
         else => unreachable,
     };
 
-    var bin_result = try (try self.binOp(lhs, rhs, ty, op)).toLocal(self, ty);
+    var bin_result = try (try func.binOp(lhs, rhs, ty, op)).toLocal(func, ty);
     if (!is_wasm_bits) {
-        defer bin_result.free(self); // not returned in this branch
-        defer lhs.free(self); // uses temporary local for absvalue
-        defer rhs.free(self); // uses temporary local for absvalue
-        try self.emitWValue(bin_result);
-        try self.emitWValue(max_wvalue);
-        _ = try self.cmp(bin_result, max_wvalue, ty, .lt);
-        try self.addTag(.select);
-        try self.addLabel(.local_set, bin_result.local.value); // re-use local
-
-        try self.emitWValue(bin_result);
-        try self.emitWValue(min_wvalue);
-        _ = try self.cmp(bin_result, min_wvalue, ty, .gt);
-        try self.addTag(.select);
-        try self.addLabel(.local_set, bin_result.local.value); // re-use local
-        return (try self.wrapOperand(bin_result, ty)).toLocal(self, ty);
+        defer bin_result.free(func); // not returned in this branch
+        defer lhs.free(func); // uses temporary local for absvalue
+        defer rhs.free(func); // uses temporary local for absvalue
+        try func.emitWValue(bin_result);
+        try func.emitWValue(max_wvalue);
+        _ = try func.cmp(bin_result, max_wvalue, ty, .lt);
+        try func.addTag(.select);
+        try func.addLabel(.local_set, bin_result.local.value); // re-use local
+
+        try func.emitWValue(bin_result);
+        try func.emitWValue(min_wvalue);
+        _ = try func.cmp(bin_result, min_wvalue, ty, .gt);
+        try func.addTag(.select);
+        try func.addLabel(.local_set, bin_result.local.value); // re-use local
+        return (try func.wrapOperand(bin_result, ty)).toLocal(func, ty);
     } else {
         const zero = switch (wasm_bits) {
             32 => WValue{ .imm32 = 0 },
             64 => WValue{ .imm64 = 0 },
             else => unreachable,
         };
-        try self.emitWValue(max_wvalue);
-        try self.emitWValue(min_wvalue);
-        _ = try self.cmp(bin_result, zero, ty, .lt);
-        try self.addTag(.select);
-        try self.emitWValue(bin_result);
+        try func.emitWValue(max_wvalue);
+        try func.emitWValue(min_wvalue);
+        _ = try func.cmp(bin_result, zero, ty, .lt);
+        try func.addTag(.select);
+        try func.emitWValue(bin_result);
         // leave on stack
-        const cmp_zero_result = try self.cmp(rhs, zero, ty, if (op == .add) .lt else .gt);
-        const cmp_bin_result = try self.cmp(bin_result, lhs, ty, .lt);
-        _ = try self.binOp(cmp_zero_result, cmp_bin_result, Type.u32, .xor); // comparisons always return i32, so provide u32 as type to xor.
-        try self.addTag(.select);
-        try self.addLabel(.local_set, bin_result.local.value); // re-use local
+        const cmp_zero_result = try func.cmp(rhs, zero, ty, if (op == .add) .lt else .gt);
+        const cmp_bin_result = try func.cmp(bin_result, lhs, ty, .lt);
+        _ = try func.binOp(cmp_zero_result, cmp_bin_result, Type.u32, .xor); // comparisons always return i32, so provide u32 as type to xor.
+        try func.addTag(.select);
+        try func.addLabel(.local_set, bin_result.local.value); // re-use local
         return bin_result;
     }
 }
 
-fn airShlSat(self: *Self, inst: Air.Inst.Index) InnerError!void {
-    const bin_op = self.air.instructions.items(.data)[inst].bin_op;
-    if (self.liveness.isUnused(inst)) return self.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
+fn airShlSat(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
+    const bin_op = func.air.instructions.items(.data)[inst].bin_op;
+    if (func.liveness.isUnused(inst)) return func.finishAir(inst, .none, &.{ bin_op.lhs, bin_op.rhs });
 
-    const ty = self.air.typeOfIndex(inst);
-    const int_info = ty.intInfo(self.target);
+    const ty = func.air.typeOfIndex(inst);
+    const int_info = ty.intInfo(func.target);
     const is_signed = int_info.signedness == .signed;
     if (int_info.bits > 64) {
-        return self.fail("TODO: Saturating shifting left for integers with bitsize '{d}'", .{int_info.bits});
+        return func.fail("TODO: Saturating shifting left for integers with bitsize '{d}'", .{int_info.bits});
     }
 
-    const lhs = try self.resolveInst(bin_op.lhs);
-    const rhs = try self.resolveInst(bin_op.rhs);
+    const lhs = try func.resolveInst(bin_op.lhs);
+    const rhs = try func.resolveInst(bin_op.rhs);
     const wasm_bits = toWasmBits(int_info.bits).?;
-    const result = try self.allocLocal(ty);
+    const result = try func.allocLocal(ty);
 
     if (wasm_bits == int_info.bits) outer_blk: {
-        var shl = try (try self.binOp(lhs, rhs, ty, .shl)).toLocal(self, ty);
-        defer shl.free(self);
-        var shr = try (try self.binOp(shl, rhs, ty, .shr)).toLocal(self, ty);
-        defer shr.free(self);
+        var shl = try (try func.binOp(lhs, rhs, ty, .shl)).toLocal(func, ty);
+        defer shl.free(func);
+        var shr = try (try func.binOp(shl, rhs, ty, .shr)).toLocal(func, ty);
+        defer shr.free(func);
 
         switch (wasm_bits) {
             32 => blk: {
                 if (!is_signed) {
-                    try self.addImm32(-1);
+                    try func.addImm32(-1);
                     break :blk;
                 }
-                try self.addImm32(std.math.minInt(i32));
-                try self.addImm32(std.math.maxInt(i32));
-                _ = try self.cmp(lhs, .{ .imm32 = 0 }, ty, .lt);
-                try self.addTag(.select);
+                try func.addImm32(std.math.minInt(i32));
+                try func.addImm32(std.math.maxInt(i32));
+                _ = try func.cmp(lhs, .{ .imm32 = 0 }, ty, .lt);
+                try func.addTag(.select);
             },
             64 => blk: {
                 if (!is_signed) {
-                    try self.addImm64(@bitCast(u64, @as(i64, -1)));
+                    try func.addImm64(@bitCast(u64, @as(i64, -1)));
                     break :blk;
                 }
-                try self.addImm64(@bitCast(u64, @as(i64, std.math.minInt(i64))));
-                try self.addImm64(@bitCast(u64, @as(i64, std.math.maxInt(i64))));
-                _ = try self.cmp(lhs, .{ .imm64 = 0 }, ty, .lt);
-                try self.addTag(.select);
+                try func.addImm64(@bitCast(u64, @as(i64, std.math.minInt(i64))));
+                try func.addImm64(@bitCast(u64, @as(i64, std.math.maxInt(i64))));
+                _ = try func.cmp(lhs, .{ .imm64 = 0 }, ty, .lt);
+                try func.addTag(.select);
             },
             else => unreachable,
         }
-        try self.emitWValue(shl);
-        _ = try self.cmp(lhs, shr, ty, .neq);
-        try self.addTag(.select);
-        try self.addLabel(.local_set, result.local.value);
+        try func.emitWValue(shl);
+        _ = try func.cmp(lhs, shr, ty, .neq);
+        try func.addTag(.select);
+        try func.addLabel(.local_set, result.local.value);
         break :outer_blk;
     } else {
         const shift_size = wasm_bits - int_info.bits;
@@ -5646,50 +5644,50 @@ fn airShlSat(self: *Self, inst: Air.Inst.Index) InnerError!void {
             else => unreachable,
         };
 
-        var shl_res = try (try self.binOp(lhs, shift_value, ty, .shl)).toLocal(self, ty);
-        defer shl_res.free(self);
-        var shl = try (try self.binOp(shl_res, rhs, ty, .shl)).toLocal(self, ty);
-        defer shl.free(self);
-        var shr = try (try self.binOp(shl, rhs, ty, .shr)).toLocal(self, ty);
-        defer shr.free(self);
+        var shl_res = try (try func.binOp(lhs, shift_value, ty, .shl)).toLocal(func, ty);
+        defer shl_res.free(func);
+        var shl = try (try func.binOp(shl_res, rhs, ty, .shl)).toLocal(func, ty);
+        defer shl.free(func);
+        var shr = try (try func.binOp(shl, rhs, ty, .shr)).toLocal(func, ty);
+        defer shr.free(func);
 
         switch (wasm_bits) {
             32 => blk: {
                 if (!is_signed) {
-                    try self.addImm32(-1);
+                    try func.addImm32(-1);
                     break :blk;
                 }
 
-                try self.addImm32(std.math.minInt(i32));
-                try self.addImm32(std.math.maxInt(i32));
-                _ = try self.cmp(shl_res, .{ .imm32 = 0 }, ty, .lt);
-                try self.addTag(.select);
+                try func.addImm32(std.math.minInt(i32));
+                try func.addImm32(std.math.maxInt(i32));
+                _ = try func.cmp(shl_res, .{ .imm32 = 0 }, ty, .lt);
+                try func.addTag(.select);
             },
             64 => blk: {
                 if (!is_signed) {
-                    try self.addImm64(@bitCast(u64, @as(i64, -1)));
+                    try func.addImm64(@bitCast(u64, @as(i64, -1)));
                     break :blk;
                 }
 
-                try self.addImm64(@bitCast(u64, @as(i64, std.math.minInt(i64))));
-                try self.addImm64(@bitCast(u64, @as(i64, std.math.maxInt(i64))));
-                _ = try self.cmp(shl_res, .{ .imm64 = 0 }, ty, .lt);
-                try self.addTag(.select);
+                try func.addImm64(@bitCast(u64, @as(i64, std.math.minInt(i64))));
+                try func.addImm64(@bitCast(u64, @as(i64, std.math.maxInt(i64))));
+                _ = try func.cmp(shl_res, .{ .imm64 = 0 }, ty, .lt);
+                try func.addTag(.select);
             },
             else => unreachable,
         }
-        try self.emitWValue(shl);
-        _ = try self.cmp(shl_res, shr, ty, .neq);
-        try self.addTag(.select);
-        try self.addLabel(.local_set, result.local.value);
-        var shift_result = try self.binOp(result, shift_value, ty, .shr);
+        try func.emitWValue(shl);
+        _ = try func.cmp(shl_res, shr, ty, .neq);
+        try func.addTag(.select);
+        try func.addLabel(.local_set, result.local.value);
+        var shift_result = try func.binOp(result, shift_value, ty, .shr);
         if (is_signed) {
-            shift_result = try self.wrapOperand(shift_result, ty);
+            shift_result = try func.wrapOperand(shift_result, ty);
         }
-        try self.addLabel(.local_set, result.local.value);
+        try func.addLabel(.local_set, result.local.value);
     }
 
-    return self.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
+    return func.finishAir(inst, result, &.{ bin_op.lhs, bin_op.rhs });
 }
 
 /// Calls a compiler-rt intrinsic by creating an undefined symbol,
@@ -5699,29 +5697,29 @@ fn airShlSat(self: *Self, inst: Air.Inst.Index) InnerError!void {
 /// passed as the first parameter.
 /// May leave the return value on the stack.
 fn callIntrinsic(
-    self: *Self,
+    func: *CodeGen,
     name: []const u8,
     param_types: []const Type,
     return_type: Type,
     args: []const WValue,
 ) InnerError!WValue {
     assert(param_types.len == args.len);
-    const symbol_index = self.bin_file.base.getGlobalSymbol(name) catch |err| {
-        return self.fail("Could not find or create global symbol '{s}'", .{@errorName(err)});
+    const symbol_index = func.bin_file.base.getGlobalSymbol(name) catch |err| {
+        return func.fail("Could not find or create global symbol '{s}'", .{@errorName(err)});
     };
 
     // Always pass over C-ABI
-    var func_type = try genFunctype(self.gpa, .C, param_types, return_type, self.target);
-    defer func_type.deinit(self.gpa);
-    const func_type_index = try self.bin_file.putOrGetFuncType(func_type);
-    try self.bin_file.addOrUpdateImport(name, symbol_index, null, func_type_index);
+    var func_type = try genFunctype(func.gpa, .C, param_types, return_type, func.target);
+    defer func_type.deinit(func.gpa);
+    const func_type_index = try func.bin_file.putOrGetFuncType(func_type);
+    try func.bin_file.addOrUpdateImport(name, symbol_index, null, func_type_index);
 
-    const want_sret_param = firstParamSRet(.C, return_type, self.target);
+    const want_sret_param = firstParamSRet(.C, return_type, func.target);
     // if we want return as first param, we allocate a pointer to stack,
     // and emit it as our first argument
     const sret = if (want_sret_param) blk: {
-        const sret_local = try self.allocStack(return_type);
-        try self.lowerToStack(sret_local);
+        const sret_local = try func.allocStack(return_type);
+        try func.lowerToStack(sret_local);
         break :blk sret_local;
     } else WValue{ .none = {} };
 
@@ -5729,16 +5727,16 @@ fn callIntrinsic(
     for (args) |arg, arg_i| {
         assert(!(want_sret_param and arg == .stack));
         assert(param_types[arg_i].hasRuntimeBitsIgnoreComptime());
-        try self.lowerArg(.C, param_types[arg_i], arg);
+        try func.lowerArg(.C, param_types[arg_i], arg);
     }
 
     // Actually call our intrinsic
-    try self.addLabel(.call, symbol_index);
+    try func.addLabel(.call, symbol_index);
 
     if (!return_type.hasRuntimeBitsIgnoreComptime()) {
         return WValue.none;
     } else if (return_type.isNoReturn()) {
-        try self.addTag(.@"unreachable");
+        try func.addTag(.@"unreachable");
         return WValue.none;
     } else if (want_sret_param) {
         return sret;