Commit bf11cdc9d8

Jakub Konka <kubkon@jakubkonka.com>
2022-05-07 11:28:57
x64: refactor code to avoid stage1 sema limitations
1 parent 197c2a4
Changed files (2)
src/arch/x86_64/CodeGen.zig
@@ -191,36 +191,12 @@ pub const MCValue = union(enum) {
         };
     }
 
-    fn usesCompareFlags(mcv: MCValue) bool {
-        return switch (mcv) {
-            .compare_flags_unsigned,
-            .compare_flags_signed,
-            .register_overflow_unsigned,
-            .register_overflow_signed,
-            => true,
-            else => false,
-        };
-    }
-
     fn isRegister(mcv: MCValue) bool {
         return switch (mcv) {
-            .register,
-            .register_overflow_unsigned,
-            .register_overflow_signed,
-            => true,
+            .register => true,
             else => false,
         };
     }
-
-    fn asRegister(mcv: MCValue) ?Register {
-        return switch (mcv) {
-            .register,
-            .register_overflow_unsigned,
-            .register_overflow_signed,
-            => |reg| reg,
-            else => null,
-        };
-    }
 };
 
 const Branch = struct {
@@ -852,15 +828,21 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live
         const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
         branch.inst_table.putAssumeCapacityNoClobber(inst, result);
 
-        if (result.asRegister()) |reg| {
-            // In some cases (such as bitcast), an operand
-            // may be the same MCValue as the result. If
-            // that operand died and was a register, it
-            // was freed by processDeath. We have to
-            // "re-allocate" the register.
-            if (self.register_manager.isRegFree(reg)) {
-                self.register_manager.getRegAssumeFree(reg, inst);
-            }
+        switch (result) {
+            .register,
+            .register_overflow_signed,
+            .register_overflow_unsigned,
+            => |reg| {
+                // In some cases (such as bitcast), an operand
+                // may be the same MCValue as the result. If
+                // that operand died and was a register, it
+                // was freed by processDeath. We have to
+                // "re-allocate" the register.
+                if (self.register_manager.isRegFree(reg)) {
+                    self.register_manager.getRegAssumeFree(reg, inst);
+                }
+            },
+            else => {},
         }
     }
     self.finishAirBookkeeping();
@@ -948,18 +930,32 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void
 pub fn spillCompareFlagsIfOccupied(self: *Self) !void {
     if (self.compare_flags_inst) |inst_to_save| {
         const mcv = self.getResolvedInstValue(inst_to_save);
-        assert(mcv.usesCompareFlags());
+        const new_mcv = switch (mcv) {
+            .register_overflow_signed,
+            .register_overflow_unsigned,
+            => try self.allocRegOrMem(inst_to_save, false),
+            .compare_flags_signed,
+            .compare_flags_unsigned,
+            => try self.allocRegOrMem(inst_to_save, true),
+            else => unreachable,
+        };
 
-        const new_mcv = try self.allocRegOrMem(inst_to_save, !mcv.isRegister());
         try self.setRegOrMem(self.air.typeOfIndex(inst_to_save), new_mcv, mcv);
         log.debug("spilling {d} to mcv {any}", .{ inst_to_save, new_mcv });
+
         const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
         try branch.inst_table.put(self.gpa, inst_to_save, new_mcv);
 
         self.compare_flags_inst = null;
+
         // TODO consolidate with register manager and spillInstruction
         // this call should really belong in the register manager!
-        if (mcv.isRegister()) self.register_manager.freeReg(mcv.asRegister().?);
+        switch (mcv) {
+            .register_overflow_signed,
+            .register_overflow_unsigned,
+            => |reg| self.register_manager.freeReg(reg),
+            else => {},
+        }
     }
 }
 
@@ -1031,7 +1027,7 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void {
             .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
             else => null,
         };
-        defer if (operand_lock) |reg| self.register_manager.unlockReg(reg);
+        defer if (operand_lock) |lock| self.register_manager.unlockReg(lock);
 
         const reg = try self.register_manager.allocReg(inst);
         try self.genSetReg(dest_ty, reg, .{ .immediate = 0 });
@@ -1062,7 +1058,7 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
         .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
         else => null,
     };
-    defer if (operand_lock) |reg| self.register_manager.unlockReg(reg);
+    defer if (operand_lock) |lock| self.register_manager.unlockReg(lock);
 
     const reg: Register = blk: {
         if (operand.isRegister()) {
@@ -1150,7 +1146,7 @@ fn airMin(self: *Self, inst: Air.Inst.Index) !void {
             .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
             else => null,
         };
-        defer if (lhs_lock) |reg| self.register_manager.unlockReg(reg);
+        defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock);
 
         const lhs_reg = try self.copyToTmpRegister(ty, lhs);
         const lhs_reg_lock = self.register_manager.lockRegAssumeUnused(lhs_reg);
@@ -1161,7 +1157,7 @@ fn airMin(self: *Self, inst: Air.Inst.Index) !void {
             .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
             else => null,
         };
-        defer if (rhs_lock) |reg| self.register_manager.unlockReg(reg);
+        defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock);
 
         try self.genBinMathOpMir(.cmp, ty, .{ .register = lhs_reg }, rhs_mcv);
 
@@ -1200,9 +1196,9 @@ fn genPtrBinMathOp(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_r
         .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
         else => null,
     };
-    defer if (offset_lock) |reg| self.register_manager.unlockReg(reg);
+    defer if (offset_lock) |lock| self.register_manager.unlockReg(lock);
 
-    const dst_mcv = blk: {
+    const dst_mcv: MCValue = blk: {
         if (self.reuseOperand(inst, op_lhs, 0, ptr)) {
             if (ptr.isMemory() or ptr.isRegister()) break :blk ptr;
         }
@@ -1213,9 +1209,9 @@ fn genPtrBinMathOp(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_r
         .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
         else => null,
     };
-    defer if (dst_mcv_lock) |reg| self.register_manager.unlockReg(reg);
+    defer if (dst_mcv_lock) |lock| self.register_manager.unlockReg(lock);
 
-    const offset_mcv = blk: {
+    const offset_mcv: MCValue = blk: {
         if (self.reuseOperand(inst, op_rhs, 1, offset)) {
             if (offset.isRegister()) break :blk offset;
         }
@@ -1226,7 +1222,7 @@ fn genPtrBinMathOp(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_r
         .register => |reg| self.register_manager.lockReg(reg),
         else => null,
     };
-    defer if (offset_mcv_lock) |reg| self.register_manager.unlockReg(reg);
+    defer if (offset_mcv_lock) |lock| self.register_manager.unlockReg(lock);
 
     try self.genIntMulComplexOpMir(offset_ty, offset_mcv, .{ .immediate = elem_size });
 
@@ -1315,16 +1311,16 @@ fn genSubOp(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs: Air
         .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
         else => null,
     };
-    defer if (lhs_lock) |reg| self.register_manager.unlockReg(reg);
+    defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock);
 
     const rhs = try self.resolveInst(op_rhs);
     const rhs_lock: ?RegisterLock = switch (rhs) {
         .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
         else => null,
     };
-    defer if (rhs_lock) |reg| self.register_manager.unlockReg(reg);
+    defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock);
 
-    const dst_mcv = blk: {
+    const dst_mcv: MCValue = blk: {
         if (self.reuseOperand(inst, op_lhs, 0, lhs) and lhs.isRegister()) {
             break :blk lhs;
         }
@@ -1334,9 +1330,9 @@ fn genSubOp(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs: Air
         .register => |reg| self.register_manager.lockReg(reg),
         else => null,
     };
-    defer if (dst_mcv_lock) |reg| self.register_manager.unlockReg(reg);
+    defer if (dst_mcv_lock) |lock| self.register_manager.unlockReg(lock);
 
-    const rhs_mcv = blk: {
+    const rhs_mcv: MCValue = blk: {
         if (rhs.isMemory() or rhs.isRegister()) break :blk rhs;
         break :blk MCValue{ .register = try self.copyToTmpRegister(dst_ty, rhs) };
     };
@@ -1344,7 +1340,7 @@ fn genSubOp(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs: Air
         .register => |reg| self.register_manager.lockReg(reg),
         else => null,
     };
-    defer if (rhs_mcv_lock) |reg| self.register_manager.unlockReg(reg);
+    defer if (rhs_mcv_lock) |lock| self.register_manager.unlockReg(lock);
 
     try self.genBinMathOpMir(.sub, dst_ty, dst_mcv, rhs_mcv);
 
@@ -1476,9 +1472,13 @@ fn airSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
 fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
     const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
     const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
-    const result = if (self.liveness.isUnused(inst)) .dead else result: {
-        const ty = self.air.typeOf(bin_op.lhs);
 
+    if (self.liveness.isUnused(inst)) {
+        return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none });
+    }
+
+    const ty = self.air.typeOf(bin_op.lhs);
+    const result: MCValue = result: {
         switch (ty.zigTypeTag()) {
             .Vector => return self.fail("TODO implement mul_with_overflow for Vector type", .{}),
             .Int => {
@@ -1529,7 +1529,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
                                 .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
                                 else => null,
                             };
-                            defer if (rhs_lock) |reg| self.register_manager.unlockReg(reg);
+                            defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock);
 
                             const dst_reg: Register = blk: {
                                 if (lhs.isRegister()) break :blk lhs.register;
@@ -1538,7 +1538,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
                             const dst_reg_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
                             defer self.register_manager.unlockReg(dst_reg_lock);
 
-                            const rhs_mcv = blk: {
+                            const rhs_mcv: MCValue = blk: {
                                 if (rhs.isRegister() or rhs.isMemory()) break :blk rhs;
                                 break :blk MCValue{ .register = try self.copyToTmpRegister(ty, rhs) };
                             };
@@ -1546,7 +1546,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
                                 .register => |reg| self.register_manager.lockReg(reg),
                                 else => null,
                             };
-                            defer if (rhs_mcv_lock) |reg| self.register_manager.unlockReg(reg);
+                            defer if (rhs_mcv_lock) |lock| self.register_manager.unlockReg(lock);
 
                             try self.genIntMulComplexOpMir(Type.isize, .{ .register = dst_reg }, rhs_mcv);
 
@@ -1734,19 +1734,19 @@ fn genIntMulDivOpMir(
 /// Clobbers .rax and .rdx registers.
 fn genInlineIntDivFloor(self: *Self, ty: Type, lhs: MCValue, rhs: MCValue) !MCValue {
     const signedness = ty.intInfo(self.target.*).signedness;
-    const dividend = switch (lhs) {
+    const dividend: Register = switch (lhs) {
         .register => |reg| reg,
         else => try self.copyToTmpRegister(ty, lhs),
     };
     const dividend_lock = self.register_manager.lockReg(dividend);
-    defer if (dividend_lock) |reg| self.register_manager.unlockReg(reg);
+    defer if (dividend_lock) |lock| self.register_manager.unlockReg(lock);
 
-    const divisor = switch (rhs) {
+    const divisor: Register = switch (rhs) {
         .register => |reg| reg,
         else => try self.copyToTmpRegister(ty, rhs),
     };
     const divisor_lock = self.register_manager.lockReg(divisor);
-    defer if (divisor_lock) |reg| self.register_manager.unlockReg(reg);
+    defer if (divisor_lock) |lock| self.register_manager.unlockReg(lock);
 
     try self.genIntMulDivOpMir(switch (signedness) {
         .signed => .idiv,
@@ -1791,67 +1791,72 @@ fn genInlineIntDivFloor(self: *Self, ty: Type, lhs: MCValue, rhs: MCValue) !MCVa
 
 fn airDiv(self: *Self, inst: Air.Inst.Index) !void {
     const bin_op = self.air.instructions.items(.data)[inst].bin_op;
-    const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
-        const tag = self.air.instructions.items(.tag)[inst];
-        const ty = self.air.typeOfIndex(inst);
 
-        if (ty.zigTypeTag() != .Int) {
-            return self.fail("TODO implement {} for operands of dst type {}", .{ tag, ty.zigTypeTag() });
-        }
+    if (self.liveness.isUnused(inst)) {
+        return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none });
+    }
 
-        if (tag == .div_float) {
-            return self.fail("TODO implement {}", .{tag});
+    const tag = self.air.instructions.items(.tag)[inst];
+    const ty = self.air.typeOfIndex(inst);
+
+    if (ty.zigTypeTag() != .Int) {
+        return self.fail("TODO implement {} for operands of dst type {}", .{ tag, ty.zigTypeTag() });
+    }
+
+    if (tag == .div_float) {
+        return self.fail("TODO implement {}", .{tag});
+    }
+
+    const signedness = ty.intInfo(self.target.*).signedness;
+
+    // Spill .rax and .rdx upfront to ensure we don't spill the operands too late.
+    const track_rax: ?Air.Inst.Index = blk: {
+        if (signedness == .unsigned) break :blk inst;
+        switch (tag) {
+            .div_exact, .div_trunc => break :blk inst,
+            else => break :blk null,
         }
+    };
+    try self.register_manager.getReg(.rax, track_rax);
+    try self.register_manager.getReg(.rdx, null);
+    var reg_locks: [2]RegisterLock = undefined;
+    self.register_manager.lockRegsAssumeUnused(2, .{ .rax, .rdx }, &reg_locks);
+    defer for (reg_locks) |reg| {
+        self.register_manager.unlockReg(reg);
+    };
 
-        const signedness = ty.intInfo(self.target.*).signedness;
+    const lhs = try self.resolveInst(bin_op.lhs);
+    const lhs_lock: ?RegisterLock = switch (lhs) {
+        .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
+        else => null,
+    };
+    defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock);
 
-        // Spill .rax and .rdx upfront to ensure we don't spill the operands too late.
-        const track_rax: ?Air.Inst.Index = blk: {
-            if (signedness == .unsigned) break :blk inst;
+    const rhs: MCValue = blk: {
+        const rhs = try self.resolveInst(bin_op.rhs);
+        if (signedness == .signed) {
             switch (tag) {
-                .div_exact, .div_trunc => break :blk inst,
-                else => break :blk null,
-            }
-        };
-        try self.register_manager.getReg(.rax, track_rax);
-        try self.register_manager.getReg(.rdx, null);
-        var reg_locks: [2]RegisterLock = undefined;
-        self.register_manager.lockRegsAssumeUnused(2, .{ .rax, .rdx }, &reg_locks);
-        defer for (reg_locks) |reg| {
-            self.register_manager.unlockReg(reg);
-        };
-
-        const lhs = try self.resolveInst(bin_op.lhs);
-        const lhs_lock: ?RegisterLock = switch (lhs) {
-            .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
-            else => null,
-        };
-        defer if (lhs_lock) |reg| self.register_manager.unlockReg(reg);
-
-        const rhs = blk: {
-            const rhs = try self.resolveInst(bin_op.rhs);
-            if (signedness == .signed) {
-                switch (tag) {
-                    .div_floor => {
-                        const rhs_lock: ?RegisterLock = switch (rhs) {
-                            .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
-                            else => null,
-                        };
-                        defer if (rhs_lock) |reg| self.register_manager.unlockReg(reg);
+                .div_floor => {
+                    const rhs_lock: ?RegisterLock = switch (rhs) {
+                        .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
+                        else => null,
+                    };
+                    defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock);
 
-                        break :blk try self.copyToRegisterWithInstTracking(inst, ty, rhs);
-                    },
-                    else => {},
-                }
+                    break :blk try self.copyToRegisterWithInstTracking(inst, ty, rhs);
+                },
+                else => {},
             }
-            break :blk rhs;
-        };
-        const rhs_lock: ?RegisterLock = switch (rhs) {
-            .register => |reg| self.register_manager.lockReg(reg),
-            else => null,
-        };
-        defer if (rhs_lock) |reg| self.register_manager.unlockReg(reg);
+        }
+        break :blk rhs;
+    };
+    const rhs_lock: ?RegisterLock = switch (rhs) {
+        .register => |reg| self.register_manager.lockReg(reg),
+        else => null,
+    };
+    defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock);
 
+    const result: MCValue = result: {
         if (signedness == .unsigned) {
             try self.genIntMulDivOpMir(.div, ty, signedness, lhs, rhs);
             break :result MCValue{ .register = .rax };
@@ -1871,59 +1876,69 @@ fn airDiv(self: *Self, inst: Air.Inst.Index) !void {
             else => unreachable,
         }
     };
+
     return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
 }
 
 fn airRem(self: *Self, inst: Air.Inst.Index) !void {
     const bin_op = self.air.instructions.items(.data)[inst].bin_op;
-    const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
-        const ty = self.air.typeOfIndex(inst);
-        if (ty.zigTypeTag() != .Int) {
-            return self.fail("TODO implement .rem for operands of dst type {}", .{ty.zigTypeTag()});
-        }
-        // Spill .rax and .rdx upfront to ensure we don't spill the operands too late.
-        try self.register_manager.getReg(.rax, null);
-        try self.register_manager.getReg(.rdx, inst);
-        var reg_locks: [2]RegisterLock = undefined;
-        self.register_manager.lockRegsAssumeUnused(2, .{ .rax, .rdx }, &reg_locks);
-        defer for (reg_locks) |reg| {
-            self.register_manager.unlockReg(reg);
-        };
-
-        const lhs = try self.resolveInst(bin_op.lhs);
-        const rhs = try self.resolveInst(bin_op.rhs);
 
-        const signedness = ty.intInfo(self.target.*).signedness;
-        try self.genIntMulDivOpMir(switch (signedness) {
-            .signed => .idiv,
-            .unsigned => .div,
-        }, ty, signedness, lhs, rhs);
-        break :result MCValue{ .register = .rdx };
+    if (self.liveness.isUnused(inst)) {
+        return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none });
+    }
+    const ty = self.air.typeOfIndex(inst);
+    if (ty.zigTypeTag() != .Int) {
+        return self.fail("TODO implement .rem for operands of dst type {}", .{ty.zigTypeTag()});
+    }
+    // Spill .rax and .rdx upfront to ensure we don't spill the operands too late.
+    try self.register_manager.getReg(.rax, null);
+    try self.register_manager.getReg(.rdx, inst);
+    var reg_locks: [2]RegisterLock = undefined;
+    self.register_manager.lockRegsAssumeUnused(2, .{ .rax, .rdx }, &reg_locks);
+    defer for (reg_locks) |reg| {
+        self.register_manager.unlockReg(reg);
     };
+
+    const lhs = try self.resolveInst(bin_op.lhs);
+    const rhs = try self.resolveInst(bin_op.rhs);
+
+    const signedness = ty.intInfo(self.target.*).signedness;
+    try self.genIntMulDivOpMir(switch (signedness) {
+        .signed => .idiv,
+        .unsigned => .div,
+    }, ty, signedness, lhs, rhs);
+
+    const result: MCValue = .{ .register = .rdx };
+
     return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
 }
 
 fn airMod(self: *Self, inst: Air.Inst.Index) !void {
     const bin_op = self.air.instructions.items(.data)[inst].bin_op;
-    const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
-        const ty = self.air.typeOfIndex(inst);
-        if (ty.zigTypeTag() != .Int) {
-            return self.fail("TODO implement .mod for operands of dst type {}", .{ty.zigTypeTag()});
-        }
-        const signedness = ty.intInfo(self.target.*).signedness;
 
-        // Spill .rax and .rdx upfront to ensure we don't spill the operands too late.
-        try self.register_manager.getReg(.rax, null);
-        try self.register_manager.getReg(.rdx, if (signedness == .unsigned) inst else null);
-        var reg_locks: [2]RegisterLock = undefined;
-        self.register_manager.lockRegsAssumeUnused(2, .{ .rax, .rdx }, &reg_locks);
-        defer for (reg_locks) |reg| {
-            self.register_manager.unlockReg(reg);
-        };
+    if (self.liveness.isUnused(inst)) {
+        return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none });
+    }
 
-        const lhs = try self.resolveInst(bin_op.lhs);
-        const rhs = try self.resolveInst(bin_op.rhs);
+    const ty = self.air.typeOfIndex(inst);
+    if (ty.zigTypeTag() != .Int) {
+        return self.fail("TODO implement .mod for operands of dst type {}", .{ty.zigTypeTag()});
+    }
+    const signedness = ty.intInfo(self.target.*).signedness;
 
+    // Spill .rax and .rdx upfront to ensure we don't spill the operands too late.
+    try self.register_manager.getReg(.rax, null);
+    try self.register_manager.getReg(.rdx, if (signedness == .unsigned) inst else null);
+    var reg_locks: [2]RegisterLock = undefined;
+    self.register_manager.lockRegsAssumeUnused(2, .{ .rax, .rdx }, &reg_locks);
+    defer for (reg_locks) |reg| {
+        self.register_manager.unlockReg(reg);
+    };
+
+    const lhs = try self.resolveInst(bin_op.lhs);
+    const rhs = try self.resolveInst(bin_op.rhs);
+
+    const result: MCValue = result: {
         switch (signedness) {
             .unsigned => {
                 try self.genIntMulDivOpMir(switch (signedness) {
@@ -1943,6 +1958,7 @@ fn airMod(self: *Self, inst: Air.Inst.Index) !void {
             },
         }
     };
+
     return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
 }
 
@@ -2017,7 +2033,7 @@ fn airShl(self: *Self, inst: Air.Inst.Index) !void {
         .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
         else => null,
     };
-    defer if (value_lock) |reg| self.register_manager.unlockReg(reg);
+    defer if (value_lock) |lock| self.register_manager.unlockReg(lock);
 
     const dst_mcv = try self.copyToRegisterWithInstTracking(inst, ty, value);
     _ = try self.addInst(.{
@@ -2117,7 +2133,7 @@ fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void {
         .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
         else => null,
     };
-    defer if (operand_lock) |reg| self.register_manager.unlockReg(reg);
+    defer if (operand_lock) |lock| self.register_manager.unlockReg(lock);
 
     const result: MCValue = result: {
         if (!payload_ty.hasRuntimeBits()) break :result operand;
@@ -2150,7 +2166,7 @@ fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void {
             .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
             else => null,
         };
-        defer if (operand_lock) |reg| self.register_manager.unlockReg(reg);
+        defer if (operand_lock) |lock| self.register_manager.unlockReg(lock);
 
         const abi_align = err_union_ty.abiAlignment(self.target.*);
         const err_ty = err_union_ty.errorUnionSet();
@@ -2222,7 +2238,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
             .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
             else => null,
         };
-        defer if (operand_lock) |reg| self.register_manager.unlockReg(reg);
+        defer if (operand_lock) |lock| self.register_manager.unlockReg(lock);
 
         if (optional_ty.isPtrLikeOptional()) {
             // TODO should we check if we can reuse the operand?
@@ -2359,7 +2375,7 @@ fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue {
         .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
         else => null,
     };
-    defer if (slice_mcv_lock) |reg| self.register_manager.unlockReg(reg);
+    defer if (slice_mcv_lock) |lock| self.register_manager.unlockReg(lock);
 
     const elem_ty = slice_ty.childType();
     const elem_size = elem_ty.abiSize(self.target.*);
@@ -2372,7 +2388,7 @@ fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue {
         .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
         else => null,
     };
-    defer if (index_mcv_lock) |reg| self.register_manager.unlockReg(reg);
+    defer if (index_mcv_lock) |lock| self.register_manager.unlockReg(lock);
 
     const offset_reg = try self.elemOffset(index_ty, index_mcv, elem_size);
     const offset_reg_lock = self.register_manager.lockRegAssumeUnused(offset_reg);
@@ -2429,110 +2445,119 @@ fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
     const bin_op = self.air.instructions.items(.data)[inst].bin_op;
-    const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
-        const array_ty = self.air.typeOf(bin_op.lhs);
-        const array = try self.resolveInst(bin_op.lhs);
-        const array_lock: ?RegisterLock = switch (array) {
-            .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
-            else => null,
-        };
-        defer if (array_lock) |reg| self.register_manager.unlockReg(reg);
 
-        const elem_ty = array_ty.childType();
-        const elem_abi_size = elem_ty.abiSize(self.target.*);
+    if (self.liveness.isUnused(inst)) {
+        return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none });
+    }
 
-        const index_ty = self.air.typeOf(bin_op.rhs);
-        const index = try self.resolveInst(bin_op.rhs);
-        const index_lock: ?RegisterLock = switch (index) {
-            .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
-            else => null,
-        };
-        defer if (index_lock) |reg| self.register_manager.unlockReg(reg);
+    const array_ty = self.air.typeOf(bin_op.lhs);
+    const array = try self.resolveInst(bin_op.lhs);
+    const array_lock: ?RegisterLock = switch (array) {
+        .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
+        else => null,
+    };
+    defer if (array_lock) |lock| self.register_manager.unlockReg(lock);
 
-        const offset_reg = try self.elemOffset(index_ty, index, elem_abi_size);
-        const offset_reg_lock = self.register_manager.lockRegAssumeUnused(offset_reg);
-        defer self.register_manager.unlockReg(offset_reg_lock);
+    const elem_ty = array_ty.childType();
+    const elem_abi_size = elem_ty.abiSize(self.target.*);
 
-        const addr_reg = try self.register_manager.allocReg(null);
-        switch (array) {
-            .register => {
-                const off = @intCast(i32, try self.allocMem(
-                    inst,
-                    @intCast(u32, array_ty.abiSize(self.target.*)),
-                    array_ty.abiAlignment(self.target.*),
-                ));
-                try self.genSetStack(array_ty, off, array, .{});
-                // lea reg, [rbp]
-                _ = try self.addInst(.{
-                    .tag = .lea,
-                    .ops = (Mir.Ops{
-                        .reg1 = addr_reg.to64(),
-                        .reg2 = .rbp,
-                    }).encode(),
-                    .data = .{ .imm = @bitCast(u32, -off) },
-                });
-            },
-            .stack_offset => |off| {
-                // lea reg, [rbp]
-                _ = try self.addInst(.{
-                    .tag = .lea,
-                    .ops = (Mir.Ops{
-                        .reg1 = addr_reg.to64(),
-                        .reg2 = .rbp,
-                    }).encode(),
-                    .data = .{ .imm = @bitCast(u32, -off) },
-                });
-            },
-            .memory,
-            .got_load,
-            .direct_load,
-            => {
-                try self.loadMemPtrIntoRegister(addr_reg, Type.usize, array);
-            },
-            else => return self.fail("TODO implement array_elem_val when array is {}", .{array}),
-        }
-
-        // TODO we could allocate register here, but need to expect addr register and potentially
-        // offset register.
-        const dst_mcv = try self.allocRegOrMem(inst, false);
-        try self.genBinMathOpMir(.add, Type.usize, .{ .register = addr_reg }, .{ .register = offset_reg });
-        try self.load(dst_mcv, .{ .register = addr_reg.to64() }, array_ty);
-        break :result dst_mcv;
+    const index_ty = self.air.typeOf(bin_op.rhs);
+    const index = try self.resolveInst(bin_op.rhs);
+    const index_lock: ?RegisterLock = switch (index) {
+        .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
+        else => null,
     };
-    return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
+    defer if (index_lock) |lock| self.register_manager.unlockReg(lock);
+
+    const offset_reg = try self.elemOffset(index_ty, index, elem_abi_size);
+    const offset_reg_lock = self.register_manager.lockRegAssumeUnused(offset_reg);
+    defer self.register_manager.unlockReg(offset_reg_lock);
+
+    const addr_reg = try self.register_manager.allocReg(null);
+    switch (array) {
+        .register => {
+            const off = @intCast(i32, try self.allocMem(
+                inst,
+                @intCast(u32, array_ty.abiSize(self.target.*)),
+                array_ty.abiAlignment(self.target.*),
+            ));
+            try self.genSetStack(array_ty, off, array, .{});
+            // lea reg, [rbp]
+            _ = try self.addInst(.{
+                .tag = .lea,
+                .ops = (Mir.Ops{
+                    .reg1 = addr_reg.to64(),
+                    .reg2 = .rbp,
+                }).encode(),
+                .data = .{ .imm = @bitCast(u32, -off) },
+            });
+        },
+        .stack_offset => |off| {
+            // lea reg, [rbp]
+            _ = try self.addInst(.{
+                .tag = .lea,
+                .ops = (Mir.Ops{
+                    .reg1 = addr_reg.to64(),
+                    .reg2 = .rbp,
+                }).encode(),
+                .data = .{ .imm = @bitCast(u32, -off) },
+            });
+        },
+        .memory,
+        .got_load,
+        .direct_load,
+        => {
+            try self.loadMemPtrIntoRegister(addr_reg, Type.usize, array);
+        },
+        else => return self.fail("TODO implement array_elem_val when array is {}", .{array}),
+    }
+
+    // TODO we could allocate register here, but need to expect addr register and potentially
+    // offset register.
+    const dst_mcv = try self.allocRegOrMem(inst, false);
+    try self.genBinMathOpMir(.add, Type.usize, .{ .register = addr_reg }, .{ .register = offset_reg });
+    try self.load(dst_mcv, .{ .register = addr_reg.to64() }, array_ty);
+
+    return self.finishAir(inst, dst_mcv, .{ bin_op.lhs, bin_op.rhs, .none });
 }
 
 fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void {
     const is_volatile = false; // TODO
     const bin_op = self.air.instructions.items(.data)[inst].bin_op;
-    const result: MCValue = if (!is_volatile and self.liveness.isUnused(inst)) .dead else result: {
-        // this is identical to the `airPtrElemPtr` codegen expect here an
-        // additional `mov` is needed at the end to get the actual value
 
-        const ptr_ty = self.air.typeOf(bin_op.lhs);
-        const ptr = try self.resolveInst(bin_op.lhs);
-        const ptr_lock: ?RegisterLock = switch (ptr) {
-            .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
-            else => null,
-        };
-        defer if (ptr_lock) |reg| self.register_manager.unlockReg(reg);
+    if (!is_volatile and self.liveness.isUnused(inst)) {
+        return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none });
+    }
 
-        const elem_ty = ptr_ty.elemType2();
-        const elem_abi_size = elem_ty.abiSize(self.target.*);
-        const index_ty = self.air.typeOf(bin_op.rhs);
-        const index = try self.resolveInst(bin_op.rhs);
-        const index_lock: ?RegisterLock = switch (index) {
-            .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
-            else => null,
-        };
-        defer if (index_lock) |reg| self.register_manager.unlockReg(reg);
+    // this is identical to the `airPtrElemPtr` codegen expect here an
+    // additional `mov` is needed at the end to get the actual value
 
-        const offset_reg = try self.elemOffset(index_ty, index, elem_abi_size);
-        const offset_reg_lock = self.register_manager.lockRegAssumeUnused(offset_reg);
-        defer self.register_manager.unlockReg(offset_reg_lock);
+    const ptr_ty = self.air.typeOf(bin_op.lhs);
+    const ptr = try self.resolveInst(bin_op.lhs);
+    const ptr_lock: ?RegisterLock = switch (ptr) {
+        .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
+        else => null,
+    };
+    defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock);
 
-        const dst_mcv = try self.copyToRegisterWithInstTracking(inst, ptr_ty, ptr);
-        try self.genBinMathOpMir(.add, ptr_ty, dst_mcv, .{ .register = offset_reg });
+    const elem_ty = ptr_ty.elemType2();
+    const elem_abi_size = elem_ty.abiSize(self.target.*);
+    const index_ty = self.air.typeOf(bin_op.rhs);
+    const index = try self.resolveInst(bin_op.rhs);
+    const index_lock: ?RegisterLock = switch (index) {
+        .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
+        else => null,
+    };
+    defer if (index_lock) |lock| self.register_manager.unlockReg(lock);
+
+    const offset_reg = try self.elemOffset(index_ty, index, elem_abi_size);
+    const offset_reg_lock = self.register_manager.lockRegAssumeUnused(offset_reg);
+    defer self.register_manager.unlockReg(offset_reg_lock);
+
+    const dst_mcv = try self.copyToRegisterWithInstTracking(inst, ptr_ty, ptr);
+    try self.genBinMathOpMir(.add, ptr_ty, dst_mcv, .{ .register = offset_reg });
+
+    const result: MCValue = result: {
         if (elem_abi_size > 8) {
             return self.fail("TODO copy value with size {} from pointer", .{elem_abi_size});
         } else {
@@ -2549,40 +2574,44 @@ fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void {
             break :result .{ .register = registerAlias(dst_mcv.register, @intCast(u32, elem_abi_size)) };
         }
     };
+
     return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
 }
 
 fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void {
     const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
     const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
-    const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
-        const ptr_ty = self.air.typeOf(extra.lhs);
-        const ptr = try self.resolveInst(extra.lhs);
-        const ptr_lock: ?RegisterLock = switch (ptr) {
-            .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
-            else => null,
-        };
-        defer if (ptr_lock) |reg| self.register_manager.unlockReg(reg);
 
-        const elem_ty = ptr_ty.elemType2();
-        const elem_abi_size = elem_ty.abiSize(self.target.*);
-        const index_ty = self.air.typeOf(extra.rhs);
-        const index = try self.resolveInst(extra.rhs);
-        const index_lock: ?RegisterLock = switch (index) {
-            .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
-            else => null,
-        };
-        defer if (index_lock) |reg| self.register_manager.unlockReg(reg);
+    if (self.liveness.isUnused(inst)) {
+        return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none });
+    }
 
-        const offset_reg = try self.elemOffset(index_ty, index, elem_abi_size);
-        const offset_reg_lock = self.register_manager.lockRegAssumeUnused(offset_reg);
-        defer self.register_manager.unlockReg(offset_reg_lock);
+    const ptr_ty = self.air.typeOf(extra.lhs);
+    const ptr = try self.resolveInst(extra.lhs);
+    const ptr_lock: ?RegisterLock = switch (ptr) {
+        .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
+        else => null,
+    };
+    defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock);
 
-        const dst_mcv = try self.copyToRegisterWithInstTracking(inst, ptr_ty, ptr);
-        try self.genBinMathOpMir(.add, ptr_ty, dst_mcv, .{ .register = offset_reg });
-        break :result dst_mcv;
+    const elem_ty = ptr_ty.elemType2();
+    const elem_abi_size = elem_ty.abiSize(self.target.*);
+    const index_ty = self.air.typeOf(extra.rhs);
+    const index = try self.resolveInst(extra.rhs);
+    const index_lock: ?RegisterLock = switch (index) {
+        .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
+        else => null,
     };
-    return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none });
+    defer if (index_lock) |lock| self.register_manager.unlockReg(lock);
+
+    const offset_reg = try self.elemOffset(index_ty, index, elem_abi_size);
+    const offset_reg_lock = self.register_manager.lockRegAssumeUnused(offset_reg);
+    defer self.register_manager.unlockReg(offset_reg_lock);
+
+    const dst_mcv = try self.copyToRegisterWithInstTracking(inst, ptr_ty, ptr);
+    try self.genBinMathOpMir(.add, ptr_ty, dst_mcv, .{ .register = offset_reg });
+
+    return self.finishAir(inst, dst_mcv, .{ extra.lhs, extra.rhs, .none });
 }
 
 fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
@@ -2601,14 +2630,14 @@ fn airSetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
         .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
         else => null,
     };
-    defer if (ptr_lock) |reg| self.register_manager.unlockReg(reg);
+    defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock);
 
     const tag = try self.resolveInst(bin_op.rhs);
     const tag_lock: ?RegisterLock = switch (tag) {
         .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
         else => null,
     };
-    defer if (tag_lock) |reg| self.register_manager.unlockReg(reg);
+    defer if (tag_lock) |lock| self.register_manager.unlockReg(lock);
 
     const adjusted_ptr: MCValue = if (layout.payload_size > 0 and layout.tag_align < layout.payload_align) blk: {
         // TODO reusing the operand
@@ -2642,7 +2671,7 @@ fn airGetUnionTag(self: *Self, inst: Air.Inst.Index) !void {
         .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
         else => null,
     };
-    defer if (operand_lock) |reg| self.register_manager.unlockReg(reg);
+    defer if (operand_lock) |lock| self.register_manager.unlockReg(lock);
 
     const tag_abi_size = tag_ty.abiSize(self.target.*);
     const dst_mcv: MCValue = blk: {
@@ -2790,7 +2819,7 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
         },
         .register => |reg| {
             const reg_lock = self.register_manager.lockReg(reg);
-            defer if (reg_lock) |locked_reg| self.register_manager.unlockReg(locked_reg);
+            defer if (reg_lock) |lock| self.register_manager.unlockReg(lock);
 
             switch (dst_mcv) {
                 .dead => unreachable,
@@ -2916,7 +2945,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
         },
         .register => |reg| {
             const reg_lock = self.register_manager.lockReg(reg);
-            defer if (reg_lock) |locked_reg| self.register_manager.unlockReg(locked_reg);
+            defer if (reg_lock) |lock| self.register_manager.unlockReg(lock);
 
             switch (value) {
                 .none => unreachable,
@@ -3010,7 +3039,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
                 .register => |reg| self.register_manager.lockReg(reg),
                 else => null,
             };
-            defer if (value_lock) |reg| self.register_manager.unlockReg(reg);
+            defer if (value_lock) |lock| self.register_manager.unlockReg(lock);
 
             const addr_reg = try self.register_manager.allocReg(null);
             const addr_reg_lock = self.register_manager.lockRegAssumeUnused(addr_reg);
@@ -3198,7 +3227,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde
                 defer self.register_manager.unlockReg(offset_reg_lock);
 
                 const can_reuse_operand = self.reuseOperand(inst, operand, 0, mcv);
-                const result_reg = blk: {
+                const result_reg: Register = blk: {
                     if (can_reuse_operand) {
                         break :blk reg;
                     } else {
@@ -3208,7 +3237,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde
                     }
                 };
                 const result_reg_lock = self.register_manager.lockReg(result_reg);
-                defer if (result_reg_lock) |reg_locked| self.register_manager.unlockReg(reg_locked);
+                defer if (result_reg_lock) |lock| self.register_manager.unlockReg(lock);
 
                 try self.genBinMathOpMir(.add, ptr_ty, .{ .register = result_reg }, .{ .register = offset_reg });
                 break :result MCValue{ .register = result_reg };
@@ -3224,12 +3253,17 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
     const extra = self.air.extraData(Air.StructField, ty_pl.payload).data;
     const operand = extra.struct_operand;
     const index = extra.field_index;
-    const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
-        const mcv = try self.resolveInst(operand);
-        const struct_ty = self.air.typeOf(operand);
-        const struct_field_offset = struct_ty.structFieldOffset(index, self.target.*);
-        const struct_field_ty = struct_ty.structFieldType(index);
 
+    if (self.liveness.isUnused(inst)) {
+        return self.finishAir(inst, .dead, .{ extra.struct_operand, .none, .none });
+    }
+
+    const mcv = try self.resolveInst(operand);
+    const struct_ty = self.air.typeOf(operand);
+    const struct_field_offset = struct_ty.structFieldOffset(index, self.target.*);
+    const struct_field_ty = struct_ty.structFieldType(index);
+
+    const result: MCValue = result: {
         switch (mcv) {
             .stack_offset => |off| {
                 const stack_offset = off - @intCast(i32, struct_field_offset);
@@ -3239,7 +3273,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
                 const reg_lock = self.register_manager.lockRegAssumeUnused(reg);
                 defer self.register_manager.unlockReg(reg_lock);
 
-                const dst_mcv = blk: {
+                const dst_mcv: MCValue = blk: {
                     if (self.reuseOperand(inst, operand, 0, mcv)) {
                         break :blk mcv;
                     } else {
@@ -3250,10 +3284,10 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
                     }
                 };
                 const dst_mcv_lock: ?RegisterLock = switch (dst_mcv) {
-                    .register => |reg| self.register_manager.lockReg(reg),
+                    .register => |a_reg| self.register_manager.lockReg(a_reg),
                     else => null,
                 };
-                defer if (dst_mcv_lock) |reg_locked| self.register_manager.unlockReg(reg_locked);
+                defer if (dst_mcv_lock) |lock| self.register_manager.unlockReg(lock);
 
                 // Shift by struct_field_offset.
                 const shift = @intCast(u8, struct_field_offset * @sizeOf(usize));
@@ -3342,17 +3376,17 @@ fn genBinMathOp(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs:
         .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
         else => null,
     };
-    defer if (lhs_lock) |reg| self.register_manager.unlockReg(reg);
+    defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock);
 
     const rhs = try self.resolveInst(op_rhs);
     const rhs_lock: ?RegisterLock = switch (rhs) {
         .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
         else => null,
     };
-    defer if (rhs_lock) |reg| self.register_manager.unlockReg(reg);
+    defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock);
 
     var flipped: bool = false;
-    const dst_mcv = blk: {
+    const dst_mcv: MCValue = blk: {
         if (self.reuseOperand(inst, op_lhs, 0, lhs) and lhs.isRegister()) {
             break :blk lhs;
         }
@@ -3366,9 +3400,9 @@ fn genBinMathOp(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs:
         .register => |reg| self.register_manager.lockReg(reg),
         else => null,
     };
-    defer if (dst_mcv_lock) |reg| self.register_manager.unlockReg(reg);
+    defer if (dst_mcv_lock) |lock| self.register_manager.unlockReg(lock);
 
-    const src_mcv = blk: {
+    const src_mcv: MCValue = blk: {
         const mcv = if (flipped) lhs else rhs;
         if (mcv.isRegister() or mcv.isMemory()) break :blk mcv;
         break :blk MCValue{ .register = try self.copyToTmpRegister(dst_ty, mcv) };
@@ -3377,7 +3411,7 @@ fn genBinMathOp(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs:
         .register => |reg| self.register_manager.lockReg(reg),
         else => null,
     };
-    defer if (src_mcv_lock) |reg| self.register_manager.unlockReg(reg);
+    defer if (src_mcv_lock) |lock| self.register_manager.unlockReg(lock);
 
     const tag = self.air.instructions.items(.tag)[inst];
     switch (tag) {
@@ -3409,7 +3443,7 @@ fn genBinMathOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MC
                 .register_overflow_signed => unreachable,
                 .ptr_stack_offset => {
                     const dst_reg_lock = self.register_manager.lockReg(dst_reg);
-                    defer if (dst_reg_lock) |reg_locked| self.register_manager.unlockReg(reg_locked);
+                    defer if (dst_reg_lock) |lock| self.register_manager.unlockReg(lock);
 
                     const reg = try self.copyToTmpRegister(dst_ty, src_mcv);
                     return self.genBinMathOpMir(mir_tag, dst_ty, dst_mcv, .{ .register = reg });
@@ -3441,7 +3475,7 @@ fn genBinMathOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MC
                 => {
                     assert(abi_size <= 8);
                     const dst_reg_lock = self.register_manager.lockReg(dst_reg);
-                    defer if (dst_reg_lock) |reg_locked| self.register_manager.unlockReg(reg_locked);
+                    defer if (dst_reg_lock) |lock| self.register_manager.unlockReg(lock);
 
                     const reg = try self.copyToTmpRegister(dst_ty, src_mcv);
                     return self.genBinMathOpMir(mir_tag, dst_ty, dst_mcv, .{ .register = reg });
@@ -3782,22 +3816,25 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
         try self.register_manager.getReg(reg, null);
     }
 
-    const rdi_lock: ?RegisterLock = if (info.return_value == .stack_offset) blk: {
-        const ret_ty = fn_ty.fnReturnType();
-        const ret_abi_size = @intCast(u32, ret_ty.abiSize(self.target.*));
-        const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(self.target.*));
-        const stack_offset = @intCast(i32, try self.allocMem(inst, ret_abi_size, ret_abi_align));
-        log.debug("airCall: return value on stack at offset {}", .{stack_offset});
+    const rdi_lock: ?RegisterLock = blk: {
+        if (info.return_value == .stack_offset) {
+            const ret_ty = fn_ty.fnReturnType();
+            const ret_abi_size = @intCast(u32, ret_ty.abiSize(self.target.*));
+            const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(self.target.*));
+            const stack_offset = @intCast(i32, try self.allocMem(inst, ret_abi_size, ret_abi_align));
+            log.debug("airCall: return value on stack at offset {}", .{stack_offset});
 
-        try self.register_manager.getReg(.rdi, null);
-        try self.genSetReg(Type.usize, .rdi, .{ .ptr_stack_offset = stack_offset });
-        const rdi_lock = self.register_manager.lockRegAssumeUnused(.rdi);
+            try self.register_manager.getReg(.rdi, null);
+            try self.genSetReg(Type.usize, .rdi, .{ .ptr_stack_offset = stack_offset });
+            const rdi_lock = self.register_manager.lockRegAssumeUnused(.rdi);
 
-        info.return_value.stack_offset = stack_offset;
+            info.return_value.stack_offset = stack_offset;
 
-        break :blk rdi_lock;
-    } else null;
-    defer if (rdi_lock) |reg| self.register_manager.unlockReg(reg);
+            break :blk rdi_lock;
+        }
+        break :blk null;
+    };
+    defer if (rdi_lock) |lock| self.register_manager.unlockReg(lock);
 
     for (args) |arg, arg_i| {
         const mc_arg = info.args[arg_i];
@@ -4107,7 +4144,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
             .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
             else => null,
         };
-        defer if (lhs_lock) |reg| self.register_manager.unlockReg(reg);
+        defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock);
 
         const dst_reg = try self.copyToTmpRegister(ty, lhs);
         const dst_reg_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
@@ -4572,27 +4609,31 @@ fn airIsNull(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void {
     const un_op = self.air.instructions.items(.data)[inst].un_op;
-    const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
-        const operand_ptr = try self.resolveInst(un_op);
 
-        const operand_ptr_lock: ?RegisterLock = switch (operand_ptr) {
-            .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
-            else => null,
-        };
-        defer if (operand_ptr_lock) |reg| self.register_manager.unlockReg(reg);
+    if (self.liveness.isUnused(inst)) {
+        return self.finishAir(inst, .dead, .{ un_op, .none, .none });
+    }
 
-        const operand: MCValue = blk: {
-            if (self.reuseOperand(inst, un_op, 0, operand_ptr)) {
-                // The MCValue that holds the pointer can be re-used as the value.
-                break :blk operand_ptr;
-            } else {
-                break :blk try self.allocRegOrMem(inst, true);
-            }
-        };
-        const ptr_ty = self.air.typeOf(un_op);
-        try self.load(operand, operand_ptr, ptr_ty);
-        break :result try self.isNull(inst, ptr_ty.elemType(), operand);
+    const operand_ptr = try self.resolveInst(un_op);
+    const operand_ptr_lock: ?RegisterLock = switch (operand_ptr) {
+        .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
+        else => null,
+    };
+    defer if (operand_ptr_lock) |lock| self.register_manager.unlockReg(lock);
+
+    const operand: MCValue = blk: {
+        if (self.reuseOperand(inst, un_op, 0, operand_ptr)) {
+            // The MCValue that holds the pointer can be re-used as the value.
+            break :blk operand_ptr;
+        } else {
+            break :blk try self.allocRegOrMem(inst, true);
+        }
     };
+    const ptr_ty = self.air.typeOf(un_op);
+    try self.load(operand, operand_ptr, ptr_ty);
+
+    const result = try self.isNull(inst, ptr_ty.elemType(), operand);
+
     return self.finishAir(inst, result, .{ un_op, .none, .none });
 }
 
@@ -4608,27 +4649,31 @@ fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void {
     const un_op = self.air.instructions.items(.data)[inst].un_op;
-    const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
-        const operand_ptr = try self.resolveInst(un_op);
 
-        const operand_ptr_lock: ?RegisterLock = switch (operand_ptr) {
-            .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
-            else => null,
-        };
-        defer if (operand_ptr_lock) |reg| self.register_manager.unlockReg(reg);
+    if (self.liveness.isUnused(inst)) {
+        return self.finishAir(inst, .dead, .{ un_op, .none, .none });
+    }
 
-        const operand: MCValue = blk: {
-            if (self.reuseOperand(inst, un_op, 0, operand_ptr)) {
-                // The MCValue that holds the pointer can be re-used as the value.
-                break :blk operand_ptr;
-            } else {
-                break :blk try self.allocRegOrMem(inst, true);
-            }
-        };
-        const ptr_ty = self.air.typeOf(un_op);
-        try self.load(operand, operand_ptr, ptr_ty);
-        break :result try self.isNonNull(inst, ptr_ty.elemType(), operand);
+    const operand_ptr = try self.resolveInst(un_op);
+    const operand_ptr_lock: ?RegisterLock = switch (operand_ptr) {
+        .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
+        else => null,
     };
+    defer if (operand_ptr_lock) |lock| self.register_manager.unlockReg(lock);
+
+    const operand: MCValue = blk: {
+        if (self.reuseOperand(inst, un_op, 0, operand_ptr)) {
+            // The MCValue that holds the pointer can be re-used as the value.
+            break :blk operand_ptr;
+        } else {
+            break :blk try self.allocRegOrMem(inst, true);
+        }
+    };
+    const ptr_ty = self.air.typeOf(un_op);
+    try self.load(operand, operand_ptr, ptr_ty);
+
+    const result = try self.isNonNull(inst, ptr_ty.elemType(), operand);
+
     return self.finishAir(inst, result, .{ un_op, .none, .none });
 }
 
@@ -4644,27 +4689,31 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void {
     const un_op = self.air.instructions.items(.data)[inst].un_op;
-    const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
-        const operand_ptr = try self.resolveInst(un_op);
 
-        const operand_ptr_lock: ?RegisterLock = switch (operand_ptr) {
-            .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
-            else => null,
-        };
-        defer if (operand_ptr_lock) |reg| self.register_manager.unlockReg(reg);
+    if (self.liveness.isUnused(inst)) {
+        return self.finishAir(inst, .dead, .{ un_op, .none, .none });
+    }
 
-        const operand: MCValue = blk: {
-            if (self.reuseOperand(inst, un_op, 0, operand_ptr)) {
-                // The MCValue that holds the pointer can be re-used as the value.
-                break :blk operand_ptr;
-            } else {
-                break :blk try self.allocRegOrMem(inst, true);
-            }
-        };
-        const ptr_ty = self.air.typeOf(un_op);
-        try self.load(operand, operand_ptr, ptr_ty);
-        break :result try self.isErr(inst, ptr_ty.elemType(), operand);
+    const operand_ptr = try self.resolveInst(un_op);
+    const operand_ptr_lock: ?RegisterLock = switch (operand_ptr) {
+        .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
+        else => null,
+    };
+    defer if (operand_ptr_lock) |lock| self.register_manager.unlockReg(lock);
+
+    const operand: MCValue = blk: {
+        if (self.reuseOperand(inst, un_op, 0, operand_ptr)) {
+            // The MCValue that holds the pointer can be re-used as the value.
+            break :blk operand_ptr;
+        } else {
+            break :blk try self.allocRegOrMem(inst, true);
+        }
     };
+    const ptr_ty = self.air.typeOf(un_op);
+    try self.load(operand, operand_ptr, ptr_ty);
+
+    const result = try self.isErr(inst, ptr_ty.elemType(), operand);
+
     return self.finishAir(inst, result, .{ un_op, .none, .none });
 }
 
@@ -4680,27 +4729,31 @@ fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void {
 
 fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void {
     const un_op = self.air.instructions.items(.data)[inst].un_op;
-    const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
-        const operand_ptr = try self.resolveInst(un_op);
 
-        const operand_ptr_lock: ?RegisterLock = switch (operand_ptr) {
-            .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
-            else => null,
-        };
-        defer if (operand_ptr_lock) |reg| self.register_manager.unlockReg(reg);
+    if (self.liveness.isUnused(inst)) {
+        return self.finishAir(inst, .dead, .{ un_op, .none, .none });
+    }
 
-        const operand: MCValue = blk: {
-            if (self.reuseOperand(inst, un_op, 0, operand_ptr)) {
-                // The MCValue that holds the pointer can be re-used as the value.
-                break :blk operand_ptr;
-            } else {
-                break :blk try self.allocRegOrMem(inst, true);
-            }
-        };
-        const ptr_ty = self.air.typeOf(un_op);
-        try self.load(operand, operand_ptr, ptr_ty);
-        break :result try self.isNonErr(inst, ptr_ty.elemType(), operand);
+    const operand_ptr = try self.resolveInst(un_op);
+    const operand_ptr_lock: ?RegisterLock = switch (operand_ptr) {
+        .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
+        else => null,
+    };
+    defer if (operand_ptr_lock) |lock| self.register_manager.unlockReg(lock);
+
+    const operand: MCValue = blk: {
+        if (self.reuseOperand(inst, un_op, 0, operand_ptr)) {
+            // The MCValue that holds the pointer can be re-used as the value.
+            break :blk operand_ptr;
+        } else {
+            break :blk try self.allocRegOrMem(inst, true);
+        }
     };
+    const ptr_ty = self.air.typeOf(un_op);
+    try self.load(operand, operand_ptr, ptr_ty);
+
+    const result = try self.isNonErr(inst, ptr_ty.elemType(), operand);
+
     return self.finishAir(inst, result, .{ un_op, .none, .none });
 }
 
@@ -4757,7 +4810,7 @@ fn genCondSwitchMir(self: *Self, ty: Type, condition: MCValue, case: MCValue) !u
             try self.spillCompareFlagsIfOccupied();
 
             const cond_reg_lock = self.register_manager.lockReg(cond_reg);
-            defer if (cond_reg_lock) |reg| self.register_manager.unlockReg(reg);
+            defer if (cond_reg_lock) |lock| self.register_manager.unlockReg(lock);
 
             switch (case) {
                 .none => unreachable,
@@ -5305,7 +5358,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue, opts: Inl
         .register_overflow_signed,
         => |reg| {
             const reg_lock = self.register_manager.lockReg(reg);
-            defer if (reg_lock) |reg_locked| self.register_manager.unlockReg(reg_locked);
+            defer if (reg_lock) |lock| self.register_manager.unlockReg(lock);
 
             const wrapped_ty = ty.structFieldType(0);
             try self.genSetStack(wrapped_ty, stack_offset, .{ .register = reg }, .{});
@@ -5407,7 +5460,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue, opts: Inl
             const base_reg = opts.dest_stack_base orelse .rbp;
             if (!math.isPowerOfTwo(abi_size)) {
                 const reg_lock = self.register_manager.lockReg(reg);
-                defer if (reg_lock) |reg_locked| self.register_manager.unlockReg(reg_locked);
+                defer if (reg_lock) |lock| self.register_manager.unlockReg(lock);
 
                 const tmp_reg = try self.copyToTmpRegister(ty, mcv);
 
@@ -5501,8 +5554,8 @@ fn genInlineMemcpy(
 
     var reg_locks: [2]RegisterLock = undefined;
     self.register_manager.lockRegsAssumeUnused(2, .{ .rax, .rcx }, &reg_locks);
-    defer for (reg_locks) |reg| {
-        self.register_manager.unlockReg(reg);
+    defer for (reg_locks) |lock| {
+        self.register_manager.unlockReg(lock);
     };
 
     const ssbase_lock: ?RegisterLock = if (opts.source_stack_base) |reg|
@@ -5515,7 +5568,7 @@ fn genInlineMemcpy(
         self.register_manager.lockReg(reg)
     else
         null;
-    defer if (dsbase_lock) |reg| self.register_manager.unlockReg(reg);
+    defer if (dsbase_lock) |lock| self.register_manager.unlockReg(lock);
 
     const dst_addr_reg = try self.register_manager.allocReg(null);
     switch (dst_ptr) {
@@ -6174,21 +6227,21 @@ fn airMemset(self: *Self, inst: Air.Inst.Index) !void {
         .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
         else => null,
     };
-    defer if (dst_ptr_lock) |reg| self.register_manager.unlockReg(reg);
+    defer if (dst_ptr_lock) |lock| self.register_manager.unlockReg(lock);
 
     const src_val = try self.resolveInst(extra.lhs);
     const src_val_lock: ?RegisterLock = switch (src_val) {
         .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
         else => null,
     };
-    defer if (src_val_lock) |reg| self.register_manager.unlockReg(reg);
+    defer if (src_val_lock) |lock| self.register_manager.unlockReg(lock);
 
     const len = try self.resolveInst(extra.rhs);
     const len_lock: ?RegisterLock = switch (len) {
         .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
         else => null,
     };
-    defer if (len_lock) |reg| self.register_manager.unlockReg(reg);
+    defer if (len_lock) |lock| self.register_manager.unlockReg(lock);
 
     try self.genInlineMemset(dst_ptr, src_val, len, .{});
 
@@ -6204,7 +6257,7 @@ fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void {
         .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
         else => null,
     };
-    defer if (dst_ptr_lock) |reg| self.register_manager.unlockReg(reg);
+    defer if (dst_ptr_lock) |lock| self.register_manager.unlockReg(lock);
 
     const src_ty = self.air.typeOf(extra.lhs);
     const src_ptr = try self.resolveInst(extra.lhs);
@@ -6212,14 +6265,14 @@ fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void {
         .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
         else => null,
     };
-    defer if (src_ptr_lock) |reg| self.register_manager.unlockReg(reg);
+    defer if (src_ptr_lock) |lock| self.register_manager.unlockReg(lock);
 
     const len = try self.resolveInst(extra.rhs);
     const len_lock: ?RegisterLock = switch (len) {
         .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
         else => null,
     };
-    defer if (len_lock) |reg| self.register_manager.unlockReg(reg);
+    defer if (len_lock) |lock| self.register_manager.unlockReg(lock);
 
     // TODO Is this the only condition for pointer dereference for memcpy?
     const src: MCValue = blk: {
@@ -6245,7 +6298,7 @@ fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void {
         .register => |reg| self.register_manager.lockReg(reg),
         else => null,
     };
-    defer if (src_lock) |reg| self.register_manager.unlockReg(reg);
+    defer if (src_lock) |lock| self.register_manager.unlockReg(lock);
 
     try self.genInlineMemcpy(dst_ptr, src, len, .{});
 
src/register_manager.zig
@@ -153,7 +153,7 @@ pub fn RegisterManager(
             regs: [count]Register,
             buf: *[count]RegisterLock,
         ) void {
-            for (&regs) |reg, i| {
+            for (regs) |reg, i| {
                 buf[i] = self.lockRegAssumeUnused(reg);
             }
         }