Commit ef7080aed1

Andrew Kelley <andrew@ziglang.org>
2021-07-12 01:32:11
stage2: update Liveness, SPIR-V for new AIR memory layout
also do the inline assembly instruction
1 parent 9918a5f
src/codegen/spirv.zig
@@ -18,14 +18,14 @@ pub const Word = u32;
 pub const ResultId = u32;
 
 pub const TypeMap = std.HashMap(Type, u32, Type.HashContext64, std.hash_map.default_max_load_percentage);
-pub const InstMap = std.AutoHashMap(*Inst, ResultId);
+pub const InstMap = std.AutoHashMap(Air.Inst.Index, ResultId);
 
 const IncomingBlock = struct {
     src_label_id: ResultId,
     break_value_id: ResultId,
 };
 
-pub const BlockMap = std.AutoHashMap(*Inst.Block, struct {
+pub const BlockMap = std.AutoHashMap(Air.Inst.Index, struct {
     label_id: ResultId,
     incoming_blocks: *std.ArrayListUnmanaged(IncomingBlock),
 });
@@ -279,16 +279,17 @@ pub const DeclGen = struct {
         return self.spv.module.getTarget();
     }
 
-    fn fail(self: *DeclGen, src: LazySrcLoc, comptime format: []const u8, args: anytype) Error {
+    fn fail(self: *DeclGen, comptime format: []const u8, args: anytype) Error {
         @setCold(true);
+        const src: LazySrcLoc = .{ .node_offset = 0 };
         const src_loc = src.toSrcLocWithDecl(self.decl);
         self.error_msg = try Module.ErrorMsg.create(self.spv.module.gpa, src_loc, format, args);
         return error.AnalysisFail;
     }
 
-    fn resolve(self: *DeclGen, inst: *Inst) !ResultId {
+    fn resolve(self: *DeclGen, inst: Air.Inst.Index) !ResultId {
         if (inst.value()) |val| {
-            return self.genConstant(inst.src, inst.ty, val);
+            return self.genConstant(inst.ty, val);
         }
 
         return self.inst_results.get(inst).?; // Instruction does not dominate all uses!
@@ -313,7 +314,7 @@ pub const DeclGen = struct {
         const target = self.getTarget();
 
         // The backend will never be asked to compiler a 0-bit integer, so we won't have to handle those in this function.
-        std.debug.assert(bits != 0);
+        assert(bits != 0);
 
         // 8, 16 and 64-bit integers require the Int8, Int16 and Inr64 capabilities respectively.
         // 32-bit integers are always supported (see spec, 2.16.1, Data rules).
@@ -387,19 +388,19 @@ pub const DeclGen = struct {
                     .composite_integer };
             },
             // As of yet, there is no vector support in the self-hosted compiler.
-            .Vector => self.fail(.{ .node_offset = 0 }, "TODO: SPIR-V backend: implement arithmeticTypeInfo for Vector", .{}),
+            .Vector => self.fail("TODO: SPIR-V backend: implement arithmeticTypeInfo for Vector", .{}),
             // TODO: For which types is this the case?
-            else => self.fail(.{ .node_offset = 0 }, "TODO: SPIR-V backend: implement arithmeticTypeInfo for {}", .{ty}),
+            else => self.fail("TODO: SPIR-V backend: implement arithmeticTypeInfo for {}", .{ty}),
         };
     }
 
     /// Generate a constant representing `val`.
     /// TODO: Deduplication?
-    fn genConstant(self: *DeclGen, src: LazySrcLoc, ty: Type, val: Value) Error!ResultId {
+    fn genConstant(self: *DeclGen, ty: Type, val: Value) Error!ResultId {
         const target = self.getTarget();
         const code = &self.spv.binary.types_globals_constants;
         const result_id = self.spv.allocResultId();
-        const result_type_id = try self.genType(src, ty);
+        const result_type_id = try self.genType(ty);
 
         if (val.isUndef()) {
             try writeInstruction(code, .OpUndef, &[_]Word{ result_type_id, result_id });
@@ -411,13 +412,13 @@ pub const DeclGen = struct {
                 const int_info = ty.intInfo(target);
                 const backing_bits = self.backingIntBits(int_info.bits) orelse {
                     // Integers too big for any native type are represented as "composite integers": An array of largestSupportedIntBits.
-                    return self.fail(src, "TODO: SPIR-V backend: implement composite int constants for {}", .{ty});
+                    return self.fail("TODO: SPIR-V backend: implement composite int constants for {}", .{ty});
                 };
 
                 // We can just use toSignedInt/toUnsignedInt here as it returns u64 - a type large enough to hold any
                 // SPIR-V native type (up to i/u64 with Int64). If SPIR-V ever supports native ints of a larger size, this
                 // might need to be updated.
-                std.debug.assert(self.largestSupportedIntBits() <= std.meta.bitCount(u64));
+                assert(self.largestSupportedIntBits() <= std.meta.bitCount(u64));
                 var int_bits = if (ty.isSignedInt()) @bitCast(u64, val.toSignedInt()) else val.toUnsignedInt();
 
                 // Mask the low bits which make up the actual integer. This is to make sure that negative values
@@ -469,13 +470,13 @@ pub const DeclGen = struct {
                 }
             },
             .Void => unreachable,
-            else => return self.fail(src, "TODO: SPIR-V backend: constant generation of type {}", .{ty}),
+            else => return self.fail("TODO: SPIR-V backend: constant generation of type {}", .{ty}),
         }
 
         return result_id;
     }
 
-    fn genType(self: *DeclGen, src: LazySrcLoc, ty: Type) Error!ResultId {
+    fn genType(self: *DeclGen, ty: Type) Error!ResultId {
         // We can't use getOrPut here so we can recursively generate types.
         if (self.spv.types.get(ty)) |already_generated| {
             return already_generated;
@@ -492,7 +493,7 @@ pub const DeclGen = struct {
                 const int_info = ty.intInfo(target);
                 const backing_bits = self.backingIntBits(int_info.bits) orelse {
                     // Integers too big for any native type are represented as "composite integers": An array of largestSupportedIntBits.
-                    return self.fail(src, "TODO: SPIR-V backend: implement composite int {}", .{ty});
+                    return self.fail("TODO: SPIR-V backend: implement composite int {}", .{ty});
                 };
 
                 // TODO: If backing_bits != int_info.bits, a duplicate type might be generated here.
@@ -518,7 +519,7 @@ pub const DeclGen = struct {
                 };
 
                 if (!supported) {
-                    return self.fail(src, "Floating point width of {} bits is not supported for the current SPIR-V feature set", .{bits});
+                    return self.fail("Floating point width of {} bits is not supported for the current SPIR-V feature set", .{bits});
                 }
 
                 try writeInstruction(code, .OpTypeFloat, &[_]Word{ result_id, bits });
@@ -526,19 +527,19 @@ pub const DeclGen = struct {
             .Fn => {
                 // We only support zig-calling-convention functions, no varargs.
                 if (ty.fnCallingConvention() != .Unspecified)
-                    return self.fail(src, "Unsupported calling convention for SPIR-V", .{});
+                    return self.fail("Unsupported calling convention for SPIR-V", .{});
                 if (ty.fnIsVarArgs())
-                    return self.fail(src, "VarArgs unsupported for SPIR-V", .{});
+                    return self.fail("VarArgs unsupported for SPIR-V", .{});
 
                 // In order to avoid a temporary here, first generate all the required types and then simply look them up
                 // when generating the function type.
                 const params = ty.fnParamLen();
                 var i: usize = 0;
                 while (i < params) : (i += 1) {
-                    _ = try self.genType(src, ty.fnParamType(i));
+                    _ = try self.genType(ty.fnParamType(i));
                 }
 
-                const return_type_id = try self.genType(src, ty.fnReturnType());
+                const return_type_id = try self.genType(ty.fnReturnType());
 
                 // result id + result type id + parameter type ids.
                 try writeOpcode(code, .OpTypeFunction, 2 + @intCast(u16, ty.fnParamLen()));
@@ -551,7 +552,7 @@ pub const DeclGen = struct {
                 }
             },
             // When recursively generating a type, we cannot infer the pointer's storage class. See genPointerType.
-            .Pointer => return self.fail(src, "Cannot create pointer with unkown storage class", .{}),
+            .Pointer => return self.fail("Cannot create pointer with unkown storage class", .{}),
             .Vector => {
                 // Although not 100% the same, Zig vectors map quite neatly to SPIR-V vectors (including many integer and float operations
                 // which work on them), so simply use those.
@@ -561,7 +562,7 @@ pub const DeclGen = struct {
                 // is adequate at all for this.
 
                 // TODO: Vectors are not yet supported by the self-hosted compiler itself it seems.
-                return self.fail(src, "TODO: SPIR-V backend: implement type Vector", .{});
+                return self.fail("TODO: SPIR-V backend: implement type Vector", .{});
             },
             .Null,
             .Undefined,
@@ -573,7 +574,7 @@ pub const DeclGen = struct {
 
             .BoundFn => unreachable, // this type will be deleted from the language.
 
-            else => |tag| return self.fail(src, "TODO: SPIR-V backend: implement type {}s", .{tag}),
+            else => |tag| return self.fail("TODO: SPIR-V backend: implement type {}s", .{tag}),
         }
 
         try self.spv.types.putNoClobber(ty, result_id);
@@ -582,8 +583,8 @@ pub const DeclGen = struct {
 
     /// SPIR-V requires pointers to have a storage class (address space), and so we have a special function for that.
     /// TODO: The result of this needs to be cached.
-    fn genPointerType(self: *DeclGen, src: LazySrcLoc, ty: Type, storage_class: spec.StorageClass) !ResultId {
-        std.debug.assert(ty.zigTypeTag() == .Pointer);
+    fn genPointerType(self: *DeclGen, ty: Type, storage_class: spec.StorageClass) !ResultId {
+        assert(ty.zigTypeTag() == .Pointer);
 
         const code = &self.spv.binary.types_globals_constants;
         const result_id = self.spv.allocResultId();
@@ -591,7 +592,7 @@ pub const DeclGen = struct {
         // TODO: There are many constraints which are ignored for now: We may only create pointers to certain types, and to other types
         // if more capabilities are enabled. For example, we may only create pointers to f16 if Float16Buffer is enabled.
         // These also relates to the pointer's address space.
-        const child_id = try self.genType(src, ty.elemType());
+        const child_id = try self.genType(ty.elemType());
 
         try writeInstruction(code, .OpTypePointer, &[_]Word{ result_id, @enumToInt(storage_class), child_id });
 
@@ -602,9 +603,9 @@ pub const DeclGen = struct {
         const decl = self.decl;
         const result_id = decl.fn_link.spirv.id;
 
-        if (decl.val.castTag(.function)) |func_payload| {
-            std.debug.assert(decl.ty.zigTypeTag() == .Fn);
-            const prototype_id = try self.genType(.{ .node_offset = 0 }, decl.ty);
+        if (decl.val.castTag(.function)) |_| {
+            assert(decl.ty.zigTypeTag() == .Fn);
+            const prototype_id = try self.genType(decl.ty);
             try writeInstruction(&self.spv.binary.fn_decls, .OpFunction, &[_]Word{
                 self.spv.types.get(decl.ty.fnReturnType()).?, // This type should be generated along with the prototype.
                 result_id,
@@ -631,189 +632,167 @@ pub const DeclGen = struct {
             try writeInstruction(&self.spv.binary.fn_decls, .OpLabel, &[_]Word{root_block_id});
             self.current_block_label_id = root_block_id;
 
-            try self.genBody(func_payload.data.body);
+            const main_body = self.air.getMainBody();
+            try self.genBody(main_body);
 
             // Append the actual code into the fn_decls section.
             try self.spv.binary.fn_decls.appendSlice(self.code.items);
             try writeInstruction(&self.spv.binary.fn_decls, .OpFunctionEnd, &[_]Word{});
         } else {
-            return self.fail(.{ .node_offset = 0 }, "TODO: SPIR-V backend: generate decl type {}", .{decl.ty.zigTypeTag()});
+            return self.fail("TODO: SPIR-V backend: generate decl type {}", .{decl.ty.zigTypeTag()});
         }
     }
 
-    fn genBody(self: *DeclGen, body: ir.Body) Error!void {
-        for (body.instructions) |inst| {
+    fn genBody(self: *DeclGen, body: []const Air.Inst.Index) Error!void {
+        for (body) |inst| {
             try self.genInst(inst);
         }
     }
 
-    fn genInst(self: *DeclGen, inst: *Inst) !void {
-        const result_id = switch (inst.tag) {
-            .add, .addwrap => try self.genBinOp(inst.castTag(.add).?),
-            .sub, .subwrap => try self.genBinOp(inst.castTag(.sub).?),
-            .mul, .mulwrap => try self.genBinOp(inst.castTag(.mul).?),
-            .div => try self.genBinOp(inst.castTag(.div).?),
-            .bit_and => try self.genBinOp(inst.castTag(.bit_and).?),
-            .bit_or => try self.genBinOp(inst.castTag(.bit_or).?),
-            .xor => try self.genBinOp(inst.castTag(.xor).?),
-            .cmp_eq => try self.genCmp(inst.castTag(.cmp_eq).?),
-            .cmp_neq => try self.genCmp(inst.castTag(.cmp_neq).?),
-            .cmp_gt => try self.genCmp(inst.castTag(.cmp_gt).?),
-            .cmp_gte => try self.genCmp(inst.castTag(.cmp_gte).?),
-            .cmp_lt => try self.genCmp(inst.castTag(.cmp_lt).?),
-            .cmp_lte => try self.genCmp(inst.castTag(.cmp_lte).?),
-            .bool_and => try self.genBinOp(inst.castTag(.bool_and).?),
-            .bool_or => try self.genBinOp(inst.castTag(.bool_or).?),
-            .not => try self.genUnOp(inst.castTag(.not).?),
-            .alloc => try self.genAlloc(inst.castTag(.alloc).?),
-            .arg => self.genArg(),
-            .block => (try self.genBlock(inst.castTag(.block).?)) orelse return,
-            .br => return try self.genBr(inst.castTag(.br).?),
-            .br_void => return try self.genBrVoid(inst.castTag(.br_void).?),
-            // TODO: Breakpoints won't be supported in SPIR-V, but the compiler seems to insert them
-            // throughout the IR.
+    fn genInst(self: *DeclGen, inst: Air.Inst.Index) !void {
+        const air_tags = self.air.instructions.items(.tag);
+        const result_id = switch (air_tags[inst]) {
+            // zig fmt: off
+            .add, .addwrap => try self.genArithOp(inst, .{.OpFAdd, .OpIAdd, .OpIAdd}),
+            .sub, .subwrap => try self.genArithOp(inst, .{.OpFSub, .OpISub, .OpISub}),
+            .mul, .mulwrap => try self.genArithOp(inst, .{.OpFMul, .OpIMul, .OpIMul}),
+            .div           => try self.genArithOp(inst, .{.OpFDiv, .OpSDiv, .OpUDiv}),
+
+            .bit_and  => try self.genBinOpSimple(inst, .OpBitwiseAnd),
+            .bit_or   => try self.genBinOpSimple(inst, .OpBitwiseOr),
+            .xor      => try self.genBinOpSimple(inst, .OpBitwiseXor),
+            .bool_and => try self.genBinOpSimple(inst, .OpLogicalAnd),
+            .bool_or  => try self.genBinOpSimple(inst, .OpLogicalOr),
+
+            .not => try self.genNot(inst),
+
+            .cmp_eq  => try self.genCmp(inst, .{.OpFOrdEqual,            .OpLogicalEqual,      .OpIEqual}),
+            .cmp_neq => try self.genCmp(inst, .{.OpFOrdNotEqual,         .OpLogicalNotEqual,   .OpINotEqual}),
+            .cmp_gt  => try self.genCmp(inst, .{.OpFOrdGreaterThan,      .OpSGreaterThan,      .OpUGreaterThan}),
+            .cmp_gte => try self.genCmp(inst, .{.OpFOrdGreaterThanEqual, .OpSGreaterThanEqual, .OpUGreaterThanEqual}),
+            .cmp_lt  => try self.genCmp(inst, .{.OpFOrdLessThan,         .OpSLessThan,         .OpULessThan}),
+            .cmp_lte => try self.genCmp(inst, .{.OpFOrdLessThanEqual,    .OpSLessThanEqual,    .OpULessThanEqual}),
+
+            .arg   => self.genArg(),
+            .alloc => try self.genAlloc(inst),
+            .block => (try self.genBlock(inst)) orelse return,
+            .load  => try self.genLoad(inst),
+
+            .br         => return self.genBr(inst),
             .breakpoint => return,
-            .condbr => return try self.genCondBr(inst.castTag(.condbr).?),
-            .constant => unreachable,
-            .dbg_stmt => return try self.genDbgStmt(inst.castTag(.dbg_stmt).?),
-            .load => try self.genLoad(inst.castTag(.load).?),
-            .loop => return try self.genLoop(inst.castTag(.loop).?),
-            .ret => return try self.genRet(inst.castTag(.ret).?),
-            .retvoid => return try self.genRetVoid(),
-            .store => return try self.genStore(inst.castTag(.store).?),
-            .unreach => return try self.genUnreach(),
-            else => return self.fail(inst.src, "TODO: SPIR-V backend: implement inst {s}", .{@tagName(inst.tag)}),
+            .condbr     => return self.genCondBr(inst),
+            .constant   => unreachable,
+            .dbg_stmt   => return self.genDbgStmt(inst),
+            .loop       => return self.genLoop(inst),
+            .ret        => return self.genRet(inst),
+            .store      => return self.genStore(inst),
+            .unreach    => return self.genUnreach(),
+            // zig fmt: on
         };
 
         try self.inst_results.putNoClobber(inst, result_id);
     }
 
-    fn genBinOp(self: *DeclGen, inst: *Inst.BinOp) !ResultId {
-        // TODO: Will lhs and rhs have the same type?
-        const lhs_id = try self.resolve(inst.lhs);
-        const rhs_id = try self.resolve(inst.rhs);
+    fn genBinOpSimple(self: *DeclGen, inst: Air.Inst.Index, opcode: Opcode) !ResultId {
+        const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+        const lhs_id = try self.resolve(bin_op.lhs);
+        const rhs_id = try self.resolve(bin_op.rhs);
+        const result_id = self.spv.allocResultId();
+        try writeInstruction(&self.code, opcode, &[_]Word{
+            result_type_id, result_id, lhs_id, rhs_id,
+        });
+        return result_id;
+    }
+
+    fn genArithOp(self: *DeclGen, inst: Air.Inst.Index, ops: [3]Opcode) !ResultId {
+        // LHS and RHS are guaranteed to have the same type, and AIR guarantees
+        // the result to be the same as the LHS and RHS, which matches SPIR-V.
+        const ty = self.air.getType(inst);
+        const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+        const lhs_id = try self.resolve(bin_op.lhs);
+        const rhs_id = try self.resolve(bin_op.rhs);
 
         const result_id = self.spv.allocResultId();
-        const result_type_id = try self.genType(inst.base.src, inst.base.ty);
-
-        // TODO: Is the result the same as the argument types?
-        // This is supposed to be the case for SPIR-V.
-        std.debug.assert(inst.rhs.ty.eql(inst.lhs.ty));
-        std.debug.assert(inst.base.ty.tag() == .bool or inst.base.ty.eql(inst.lhs.ty));
-
-        // Binary operations are generally applicable to both scalar and vector operations in SPIR-V, but int and float
-        // versions of operations require different opcodes.
-        // For operations which produce bools, the information of inst.base.ty is not useful, so just pick either operand
-        // instead.
-        const info = try self.arithmeticTypeInfo(inst.lhs.ty);
-
-        if (info.class == .composite_integer) {
-            return self.fail(inst.base.src, "TODO: SPIR-V backend: binary operations for composite integers", .{});
-        } else if (info.class == .strange_integer) {
-            return self.fail(inst.base.src, "TODO: SPIR-V backend: binary operations for strange integers", .{});
-        }
+        const result_type_id = try self.genType(ty);
+
+        assert(self.air.getType(bin_op.lhs).eql(ty));
+        assert(self.air.getType(bin_op.rhs).eql(ty));
 
-        const is_float = info.class == .float;
-        const is_signed = info.signedness == .signed;
-        // **Note**: All these operations must be valid for vectors as well!
-        const opcode = switch (inst.base.tag) {
-            // The regular integer operations are all defined for wrapping. Since theyre only relevant for integers,
-            // we can just switch on both cases here.
-            .add, .addwrap => if (is_float) Opcode.OpFAdd else Opcode.OpIAdd,
-            .sub, .subwrap => if (is_float) Opcode.OpFSub else Opcode.OpISub,
-            .mul, .mulwrap => if (is_float) Opcode.OpFMul else Opcode.OpIMul,
-            // TODO: Trap if divisor is 0?
-            // TODO: Figure out of OpSDiv for unsigned/OpUDiv for signed does anything useful.
-            //  => Those are probably for divTrunc and divFloor, though the compiler does not yet generate those.
-            //  => TODO: Figure out how those work on the SPIR-V side.
-            //  => TODO: Test these.
-            .div => if (is_float) Opcode.OpFDiv else if (is_signed) Opcode.OpSDiv else Opcode.OpUDiv,
-            // Only integer versions for these.
-            .bit_and => Opcode.OpBitwiseAnd,
-            .bit_or => Opcode.OpBitwiseOr,
-            .xor => Opcode.OpBitwiseXor,
-            // Bool -> bool operations.
-            .bool_and => Opcode.OpLogicalAnd,
-            .bool_or => Opcode.OpLogicalOr,
+        // Binary operations are generally applicable to both scalar and vector operations
+        // in SPIR-V, but int and float versions of operations require different opcodes.
+        const info = try self.arithmeticTypeInfo(ty);
+
+        const opcode_index: usize = switch (info.class) {
+            .composite_integer => {
+                return self.fail("TODO: SPIR-V backend: binary operations for composite integers", .{});
+            },
+            .strange_integer => {
+                return self.fail("TODO: SPIR-V backend: binary operations for strange integers", .{});
+            },
+            .integer => switch (info.signedness) {
+                .signed => 1,
+                .unsigned => 2,
+            },
+            .float => 0,
             else => unreachable,
         };
-
+        const opcode = ops[opcode_index];
         try writeInstruction(&self.code, opcode, &[_]Word{ result_type_id, result_id, lhs_id, rhs_id });
 
         // TODO: Trap on overflow? Probably going to be annoying.
         // TODO: Look into SPV_KHR_no_integer_wrap_decoration which provides NoSignedWrap/NoUnsignedWrap.
 
-        if (info.class != .strange_integer)
-            return result_id;
-
-        return self.fail(inst.base.src, "TODO: SPIR-V backend: strange integer operation mask", .{});
+        return result_id;
     }
 
-    fn genCmp(self: *DeclGen, inst: *Inst.BinOp) !ResultId {
-        const lhs_id = try self.resolve(inst.lhs);
-        const rhs_id = try self.resolve(inst.rhs);
-
+    fn genCmp(self: *DeclGen, inst: Air.Inst.Index, ops: [3]Opcode) !ResultId {
+        const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+        const lhs_id = try self.resolve(bin_op.lhs);
+        const rhs_id = try self.resolve(bin_op.rhs);
         const result_id = self.spv.allocResultId();
-        const result_type_id = try self.genType(inst.base.src, inst.base.ty);
-
-        // All of these operations should be 2 equal types -> bool
-        std.debug.assert(inst.rhs.ty.eql(inst.lhs.ty));
-        std.debug.assert(inst.base.ty.tag() == .bool);
-
-        // Comparisons are generally applicable to both scalar and vector operations in SPIR-V, but int and float
-        // versions of operations require different opcodes.
-        // Since inst.base.ty is always bool and so not very useful, and because both arguments must be the same, just get the info
-        // from either of the operands.
-        const info = try self.arithmeticTypeInfo(inst.lhs.ty);
-
-        if (info.class == .composite_integer) {
-            return self.fail(inst.base.src, "TODO: SPIR-V backend: binary operations for composite integers", .{});
-        } else if (info.class == .strange_integer) {
-            return self.fail(inst.base.src, "TODO: SPIR-V backend: comparison for strange integers", .{});
-        }
+        const result_type_id = try self.genType(Type.initTag(.bool));
+        const op_ty = self.air.getType(bin_op.lhs);
+        assert(op_ty.eql(self.air.getType(bin_op.rhs)));
 
-        const is_bool = info.class == .bool;
-        const is_float = info.class == .float;
-        const is_signed = info.signedness == .signed;
-
-        // **Note**: All these operations must be valid for vectors as well!
-        // For floating points, we generally want ordered operations (which return false if either operand is nan).
-        const opcode = switch (inst.base.tag) {
-            .cmp_eq => if (is_float) Opcode.OpFOrdEqual else if (is_bool) Opcode.OpLogicalEqual else Opcode.OpIEqual,
-            .cmp_neq => if (is_float) Opcode.OpFOrdNotEqual else if (is_bool) Opcode.OpLogicalNotEqual else Opcode.OpINotEqual,
-            // TODO: Verify that these OpFOrd type operations produce the right value.
-            // TODO: Is there a more fundamental difference between OpU and OpS operations here than just the type?
-            .cmp_gt => if (is_float) Opcode.OpFOrdGreaterThan else if (is_signed) Opcode.OpSGreaterThan else Opcode.OpUGreaterThan,
-            .cmp_gte => if (is_float) Opcode.OpFOrdGreaterThanEqual else if (is_signed) Opcode.OpSGreaterThanEqual else Opcode.OpUGreaterThanEqual,
-            .cmp_lt => if (is_float) Opcode.OpFOrdLessThan else if (is_signed) Opcode.OpSLessThan else Opcode.OpULessThan,
-            .cmp_lte => if (is_float) Opcode.OpFOrdLessThanEqual else if (is_signed) Opcode.OpSLessThanEqual else Opcode.OpULessThanEqual,
+        // Comparisons are generally applicable to both scalar and vector operations in SPIR-V,
+        // but int and float versions of operations require different opcodes.
+        const info = try self.arithmeticTypeInfo(op_ty);
+
+        const opcode_index: usize = switch (info.class) {
+            .composite_integer => {
+                return self.fail("TODO: SPIR-V backend: binary operations for composite integers", .{});
+            },
+            .strange_integer => {
+                return self.fail("TODO: SPIR-V backend: comparison for strange integers", .{});
+            },
+            .float => 0,
+            .bool => 1,
+            .integer => switch (info.signedness) {
+                .signed => 1,
+                .unsigned => 2,
+            },
             else => unreachable,
         };
+        const opcode = ops[opcode_index];
 
         try writeInstruction(&self.code, opcode, &[_]Word{ result_type_id, result_id, lhs_id, rhs_id });
         return result_id;
     }
 
-    fn genUnOp(self: *DeclGen, inst: *Inst.UnOp) !ResultId {
-        const operand_id = try self.resolve(inst.operand);
-
+    fn genNot(self: *DeclGen, inst: Air.Inst.Index) !ResultId {
+        const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+        const operand_id = try self.resolve(ty_op.operand);
         const result_id = self.spv.allocResultId();
-        const result_type_id = try self.genType(inst.base.src, inst.base.ty);
-
-        const opcode = switch (inst.base.tag) {
-            // Bool -> bool
-            .not => Opcode.OpLogicalNot,
-            else => unreachable,
-        };
-
+        const result_type_id = try self.genType(Type.initTag(.bool));
+        const opcode: Opcode = .OpLogicalNot;
         try writeInstruction(&self.code, opcode, &[_]Word{ result_type_id, result_id, operand_id });
-
         return result_id;
     }
 
-    fn genAlloc(self: *DeclGen, inst: *Inst.NoOp) !ResultId {
+    fn genAlloc(self: *DeclGen, inst: Air.Inst.Index) !ResultId {
+        const ty = self.air.getType(inst);
         const storage_class = spec.StorageClass.Function;
-        const result_type_id = try self.genPointerType(inst.base.src, inst.base.ty, storage_class);
+        const result_type_id = try self.genPointerType(ty, storage_class);
         const result_id = self.spv.allocResultId();
 
         // Rather than generating into code here, we're just going to generate directly into the fn_decls section so that
@@ -828,7 +807,7 @@ pub const DeclGen = struct {
         return self.args.items[self.next_arg_index];
     }
 
-    fn genBlock(self: *DeclGen, inst: *Inst.Block) !?ResultId {
+    fn genBlock(self: *DeclGen, inst: Air.Inst.Index) !?ResultId {
         // In IR, a block doesn't really define an entry point like a block, but more like a scope that breaks can jump out of and
         // "return" a value from. This cannot be directly modelled in SPIR-V, so in a block instruction, we're going to split up
         // the current block by first generating the code of the block, then a label, and then generate the rest of the current
@@ -848,11 +827,16 @@ pub const DeclGen = struct {
             incoming_blocks.deinit(self.spv.gpa);
         }
 
-        try self.genBody(inst.body);
+        const ty = self.air.getType(inst);
+        const inst_datas = self.air.instructions.items(.data);
+        const extra = self.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload);
+        const body = self.air.extra[extra.end..][0..extra.data.body_len];
+
+        try self.genBody(body);
         try self.beginSPIRVBlock(label_id);
 
         // If this block didn't produce a value, simply return here.
-        if (!inst.base.ty.hasCodeGenBits())
+        if (!ty.hasCodeGenBits())
             return null;
 
         // Combine the result from the blocks using the Phi instruction.
@@ -862,7 +846,7 @@ pub const DeclGen = struct {
         // TODO: OpPhi is limited in the types that it may produce, such as pointers. Figure out which other types
         // are not allowed to be created from a phi node, and throw an error for those. For now, genType already throws
         // an error for pointers.
-        const result_type_id = try self.genType(inst.base.src, inst.base.ty);
+        const result_type_id = try self.genType(ty);
         _ = result_type_id;
 
         try writeOpcode(&self.code, .OpPhi, 2 + @intCast(u16, incoming_blocks.items.len * 2)); // result type + result + variable/parent...
@@ -874,30 +858,26 @@ pub const DeclGen = struct {
         return result_id;
     }
 
-    fn genBr(self: *DeclGen, inst: *Inst.Br) !void {
-        // TODO: This instruction needs to be the last in a block. Is that guaranteed?
-        const target = self.blocks.get(inst.block).?;
+    fn genBr(self: *DeclGen, inst: Air.Inst.Index) !void {
+        const br = self.air.instructions.items(.data)[inst].br;
+        const block = self.blocks.get(br.block_inst).?;
+        const operand_ty = self.air.getType(br.operand);
 
-        // TODO: For some reason, br is emitted with void parameters.
-        if (inst.operand.ty.hasCodeGenBits()) {
-            const operand_id = try self.resolve(inst.operand);
+        if (operand_ty.hasCodeGenBits()) {
+            const operand_id = try self.resolve(br.operand);
             // current_block_label_id should not be undefined here, lest there is a br or br_void in the function's body.
-            try target.incoming_blocks.append(self.spv.gpa, .{ .src_label_id = self.current_block_label_id, .break_value_id = operand_id });
+            try block.incoming_blocks.append(self.spv.gpa, .{ .src_label_id = self.current_block_label_id, .break_value_id = operand_id });
         }
 
-        try writeInstruction(&self.code, .OpBranch, &[_]Word{target.label_id});
-    }
-
-    fn genBrVoid(self: *DeclGen, inst: *Inst.BrVoid) !void {
-        // TODO: This instruction needs to be the last in a block. Is that guaranteed?
-        const target = self.blocks.get(inst.block).?;
-        // Don't need to add this to the incoming block list, as there is no value to insert in the phi node anyway.
-        try writeInstruction(&self.code, .OpBranch, &[_]Word{target.label_id});
+        try writeInstruction(&self.code, .OpBranch, &[_]Word{block.label_id});
     }
 
     fn genCondBr(self: *DeclGen, inst: *Inst.CondBr) !void {
-        // TODO: This instruction needs to be the last in a block. Is that guaranteed?
-        const condition_id = try self.resolve(inst.condition);
+        const pl_op = self.air.instructions.items(.data)[inst].pl_op;
+        const cond_br = self.air.extraData(Air.CondBr, pl_op.payload);
+        const then_body = self.air.extra[cond_br.end..][0..cond_br.data.then_body_len];
+        const else_body = self.air.extra[cond_br.end + then_body.len ..][0..cond_br.data.else_body_len];
+        const condition_id = try self.resolve(pl_op.operand);
 
         // These will always generate a new SPIR-V block, since they are ir.Body and not ir.Block.
         const then_label_id = self.spv.allocResultId();
@@ -913,23 +893,26 @@ pub const DeclGen = struct {
         });
 
         try self.beginSPIRVBlock(then_label_id);
-        try self.genBody(inst.then_body);
+        try self.genBody(then_body);
         try self.beginSPIRVBlock(else_label_id);
-        try self.genBody(inst.else_body);
+        try self.genBody(else_body);
     }
 
-    fn genDbgStmt(self: *DeclGen, inst: *Inst.DbgStmt) !void {
+    fn genDbgStmt(self: *DeclGen, inst: Air.Inst.Index) !void {
+        const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt;
         const src_fname_id = try self.spv.resolveSourceFileName(self.decl);
-        try writeInstruction(&self.code, .OpLine, &[_]Word{ src_fname_id, inst.line, inst.column });
+        try writeInstruction(&self.code, .OpLine, &[_]Word{ src_fname_id, dbg_stmt.line, dbg_stmt.column });
     }
 
-    fn genLoad(self: *DeclGen, inst: *Inst.UnOp) !ResultId {
-        const operand_id = try self.resolve(inst.operand);
+    fn genLoad(self: *DeclGen, inst: Air.Inst.Index) !ResultId {
+        const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+        const operand_id = try self.resolve(ty_op.operand);
+        const ty = self.air.getType(inst);
 
-        const result_type_id = try self.genType(inst.base.src, inst.base.ty);
+        const result_type_id = try self.genType(ty);
         const result_id = self.spv.allocResultId();
 
-        const operands = if (inst.base.ty.isVolatilePtr())
+        const operands = if (ty.isVolatilePtr())
             &[_]Word{ result_type_id, result_id, operand_id, @bitCast(u32, spec.MemoryAccess{ .Volatile = true }) }
         else
             &[_]Word{ result_type_id, result_id, operand_id };
@@ -939,8 +922,9 @@ pub const DeclGen = struct {
         return result_id;
     }
 
-    fn genLoop(self: *DeclGen, inst: *Inst.Loop) !void {
-        // TODO: This instruction needs to be the last in a block. Is that guaranteed?
+    fn genLoop(self: *DeclGen, inst: Air.Inst.Index) !void {
+        const loop = self.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload);
+        const body = self.air.extra[loop.end..][0..loop.data.body_len];
         const loop_label_id = self.spv.allocResultId();
 
         // Jump to the loop entry point
@@ -949,27 +933,29 @@ pub const DeclGen = struct {
         // TODO: Look into OpLoopMerge.
 
         try self.beginSPIRVBlock(loop_label_id);
-        try self.genBody(inst.body);
+        try self.genBody(body);
 
         try writeInstruction(&self.code, .OpBranch, &[_]Word{loop_label_id});
     }
 
-    fn genRet(self: *DeclGen, inst: *Inst.UnOp) !void {
-        const operand_id = try self.resolve(inst.operand);
-        // TODO: This instruction needs to be the last in a block. Is that guaranteed?
-        try writeInstruction(&self.code, .OpReturnValue, &[_]Word{operand_id});
-    }
-
-    fn genRetVoid(self: *DeclGen) !void {
-        // TODO: This instruction needs to be the last in a block. Is that guaranteed?
-        try writeInstruction(&self.code, .OpReturn, &[_]Word{});
+    fn genRet(self: *DeclGen, inst: Air.Inst.Index) !void {
+        const operand = inst_datas[inst].un_op;
+        const operand_ty = self.air.getType(operand);
+        if (operand_ty.hasCodeGenBits()) {
+            const operand_id = try self.resolve(operand);
+            try writeInstruction(&self.code, .OpReturnValue, &[_]Word{operand_id});
+        } else {
+            try writeInstruction(&self.code, .OpReturn, &[_]Word{});
+        }
     }
 
-    fn genStore(self: *DeclGen, inst: *Inst.BinOp) !void {
-        const dst_ptr_id = try self.resolve(inst.lhs);
-        const src_val_id = try self.resolve(inst.rhs);
+    fn genStore(self: *DeclGen, inst: Air.Inst.Index) !void {
+        const bin_op = self.air.instructions.items(.data)[inst].bin_op;
+        const dst_ptr_id = try self.resolve(bin_op.lhs);
+        const src_val_id = try self.resolve(bin_op.rhs);
+        const lhs_ty = self.air.getType(bin_op.lhs);
 
-        const operands = if (inst.lhs.ty.isVolatilePtr())
+        const operands = if (lhs_ty.isVolatilePtr())
             &[_]Word{ dst_ptr_id, src_val_id, @bitCast(u32, spec.MemoryAccess{ .Volatile = true }) }
         else
             &[_]Word{ dst_ptr_id, src_val_id };
@@ -978,7 +964,6 @@ pub const DeclGen = struct {
     }
 
     fn genUnreach(self: *DeclGen) !void {
-        // TODO: This instruction needs to be the last in a block. Is that guaranteed?
         try writeInstruction(&self.code, .OpUnreachable, &[_]Word{});
     }
 };
src/Air.zig
@@ -1,5 +1,7 @@
 //! Analyzed Intermediate Representation.
-//! Sema inputs ZIR and outputs AIR.
+//! This data is produced by Sema and consumed by codegen.
+//! Unlike ZIR where there is one instance for an entire source file, each function
+//! gets its own `Air` instance.
 
 const std = @import("std");
 const Value = @import("value.zig").Value;
@@ -27,38 +29,48 @@ pub const Inst = struct {
     data: Data,
 
     pub const Tag = enum(u8) {
+        /// The first N instructions in Air must be one arg instruction per function parameter.
+        /// Uses the `ty` field.
+        arg,
         /// Float or integer addition. For integers, wrapping is undefined behavior.
-        /// Result type is the same as both operands.
+        /// Both operands are guaranteed to be the same type, and the result type
+        /// is the same as both operands.
         /// Uses the `bin_op` field.
         add,
         /// Integer addition. Wrapping is defined to be twos complement wrapping.
-        /// Result type is the same as both operands.
+        /// Both operands are guaranteed to be the same type, and the result type
+        /// is the same as both operands.
         /// Uses the `bin_op` field.
         addwrap,
         /// Float or integer subtraction. For integers, wrapping is undefined behavior.
-        /// Result type is the same as both operands.
+        /// Both operands are guaranteed to be the same type, and the result type
+        /// is the same as both operands.
         /// Uses the `bin_op` field.
         sub,
         /// Integer subtraction. Wrapping is defined to be twos complement wrapping.
-        /// Result type is the same as both operands.
+        /// Both operands are guaranteed to be the same type, and the result type
+        /// is the same as both operands.
         /// Uses the `bin_op` field.
         subwrap,
         /// Float or integer multiplication. For integers, wrapping is undefined behavior.
-        /// Result type is the same as both operands.
+        /// Both operands are guaranteed to be the same type, and the result type
+        /// is the same as both operands.
         /// Uses the `bin_op` field.
         mul,
         /// Integer multiplication. Wrapping is defined to be twos complement wrapping.
-        /// Result type is the same as both operands.
+        /// Both operands are guaranteed to be the same type, and the result type
+        /// is the same as both operands.
         /// Uses the `bin_op` field.
         mulwrap,
         /// Integer or float division. For integers, wrapping is undefined behavior.
-        /// Result type is the same as both operands.
+        /// Both operands are guaranteed to be the same type, and the result type
+        /// is the same as both operands.
         /// Uses the `bin_op` field.
         div,
         /// Allocates stack local memory.
         /// Uses the `ty` field.
         alloc,
-        /// TODO
+        /// Inline assembly. Uses the `ty_pl` field. Payload is `Asm`.
         assembly,
         /// Bitwise AND. `&`.
         /// Result type is the same as both operands.
@@ -80,7 +92,7 @@ pub const Inst = struct {
         /// Uses the `ty_pl` field with payload `Block`.
         block,
         /// Return from a block with a result.
-        /// Result type is always noreturn.
+        /// Result type is always noreturn; no instructions in a block follow this one.
         /// Uses the `br` field.
         br,
         /// Lowers to a hardware trap instruction, or the next best thing.
@@ -109,11 +121,11 @@ pub const Inst = struct {
         /// Uses the `bin_op` field.
         cmp_neq,
         /// Conditional branch.
-        /// Result type is always noreturn.
+        /// Result type is always noreturn; no instructions in a block follow this one.
         /// Uses the `pl_op` field. Operand is the condition. Payload is `CondBr`.
         cond_br,
         /// Switch branch.
-        /// Result type is always noreturn.
+        /// Result type is always noreturn; no instructions in a block follow this one.
         /// Uses the `pl_op` field. Operand is the condition. Payload is `SwitchBr`.
         switch_br,
         /// A comptime-known value. Uses the `ty_pl` field, payload is index of
@@ -166,7 +178,7 @@ pub const Inst = struct {
         load,
         /// A labeled block of code that loops forever. At the end of the body it is implied
         /// to repeat; no explicit "repeat" instruction terminates loop bodies.
-        /// Result type is always noreturn.
+        /// Result type is always noreturn; no instructions in a block follow this one.
         /// Uses the `ty_pl` field. Payload is `Block`.
         loop,
         /// Converts a pointer to its address. Result type is always `usize`.
@@ -178,7 +190,7 @@ pub const Inst = struct {
         /// Uses the `ty_op` field.
         ref,
         /// Return a value from a function.
-        /// Result type is always noreturn.
+        /// Result type is always noreturn; no instructions in a block follow this one.
         /// Uses the `un_op` field.
         ret,
         /// Returns a pointer to a global variable.
@@ -189,7 +201,7 @@ pub const Inst = struct {
         /// Uses the `bin_op` field.
         store,
         /// Indicates the program counter will never get to this instruction.
-        /// Result type is always noreturn.
+        /// Result type is always noreturn; no instructions in a block follow this one.
         unreach,
         /// Convert from one float type to another.
         /// Uses the `ty_op` field.
@@ -343,6 +355,16 @@ pub const StructField = struct {
     field_index: u32,
 };
 
+/// Trailing:
+/// 0. `Ref` for every outputs_len
+/// 1. `Ref` for every inputs_len
+pub const Asm = struct {
+    /// Index to the corresponding ZIR instruction.
+    /// `asm_source`, `outputs_len`, `inputs_len`, `clobbers_len`, `is_volatile`, and
+    /// clobbers are found via here.
+    zir_index: u32,
+};
+
 pub fn getMainBody(air: Air) []const Air.Inst.Index {
     const body_index = air.extra[@enumToInt(ExtraIndex.main_block)];
     const body_len = air.extra[body_index];
@@ -369,3 +391,11 @@ pub fn extraData(air: Air, comptime T: type, index: usize) struct { data: T, end
         .end = i,
     };
 }
+
+pub fn deinit(air: *Air, gpa: *std.mem.Allocator) void {
+    air.instructions.deinit(gpa);
+    gpa.free(air.extra);
+    gpa.free(air.values);
+    gpa.free(air.variables);
+    air.* = undefined;
+}
src/Compilation.zig
@@ -13,7 +13,7 @@ const target_util = @import("target.zig");
 const Package = @import("Package.zig");
 const link = @import("link.zig");
 const trace = @import("tracy.zig").trace;
-const liveness = @import("liveness.zig");
+const Liveness = @import("Liveness.zig");
 const build_options = @import("build_options");
 const LibCInstallation = @import("libc_installation.zig").LibCInstallation;
 const glibc = @import("glibc.zig");
@@ -1922,6 +1922,7 @@ pub fn getCompileLogOutput(self: *Compilation) []const u8 {
 }
 
 pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemory }!void {
+    const gpa = self.gpa;
     // If the terminal is dumb, we dont want to show the user all the
     // output.
     var progress: std.Progress = .{ .dont_print_on_dumb = true };
@@ -2005,7 +2006,8 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor
                 assert(decl.has_tv);
                 if (decl.val.castTag(.function)) |payload| {
                     const func = payload.data;
-                    switch (func.state) {
+
+                    var air = switch (func.state) {
                         .queued => module.analyzeFnBody(decl, func) catch |err| switch (err) {
                             error.AnalysisFail => {
                                 assert(func.state != .in_progress);
@@ -2016,18 +2018,39 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor
                         .in_progress => unreachable,
                         .inline_only => unreachable, // don't queue work for this
                         .sema_failure, .dependency_failure => continue,
-                        .success => {},
-                    }
-                    // Here we tack on additional allocations to the Decl's arena. The allocations
-                    // are lifetime annotations in the ZIR.
-                    var decl_arena = decl.value_arena.?.promote(module.gpa);
-                    defer decl.value_arena.?.* = decl_arena.state;
+                        .success => unreachable, // don't queue it twice
+                    };
+                    defer air.deinit(gpa);
+
                     log.debug("analyze liveness of {s}", .{decl.name});
-                    try liveness.analyze(module.gpa, &decl_arena.allocator, func.body);
+                    var liveness = try Liveness.analyze(gpa, air);
+                    defer liveness.deinit(gpa);
 
                     if (std.builtin.mode == .Debug and self.verbose_air) {
                         func.dump(module.*);
                     }
+
+                    assert(decl.ty.hasCodeGenBits());
+
+                    self.bin_file.updateFunc(module, func, air, liveness) catch |err| switch (err) {
+                        error.OutOfMemory => return error.OutOfMemory,
+                        error.AnalysisFail => {
+                            decl.analysis = .codegen_failure;
+                            continue;
+                        },
+                        else => {
+                            try module.failed_decls.ensureUnusedCapacity(gpa, 1);
+                            module.failed_decls.putAssumeCapacityNoClobber(decl, try Module.ErrorMsg.create(
+                                gpa,
+                                decl.srcLoc(),
+                                "unable to codegen: {s}",
+                                .{@errorName(err)},
+                            ));
+                            decl.analysis = .codegen_failure_retryable;
+                            continue;
+                        },
+                    };
+                    continue;
                 }
 
                 assert(decl.ty.hasCodeGenBits());
@@ -2039,9 +2062,9 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor
                         continue;
                     },
                     else => {
-                        try module.failed_decls.ensureCapacity(module.gpa, module.failed_decls.count() + 1);
+                        try module.failed_decls.ensureCapacity(gpa, module.failed_decls.count() + 1);
                         module.failed_decls.putAssumeCapacityNoClobber(decl, try Module.ErrorMsg.create(
-                            module.gpa,
+                            gpa,
                             decl.srcLoc(),
                             "unable to codegen: {s}",
                             .{@errorName(err)},
@@ -2070,7 +2093,7 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor
                     @panic("sadly stage2 is omitted from this build to save memory on the CI server");
                 const module = self.bin_file.options.module.?;
                 const emit_h = module.emit_h.?;
-                _ = try emit_h.decl_table.getOrPut(module.gpa, decl);
+                _ = try emit_h.decl_table.getOrPut(gpa, decl);
                 const decl_emit_h = decl.getEmitH(module);
                 const fwd_decl = &decl_emit_h.fwd_decl;
                 fwd_decl.shrinkRetainingCapacity(0);
@@ -2079,7 +2102,7 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor
                     .module = module,
                     .error_msg = null,
                     .decl = decl,
-                    .fwd_decl = fwd_decl.toManaged(module.gpa),
+                    .fwd_decl = fwd_decl.toManaged(gpa),
                     // we don't want to emit optionals and error unions to headers since they have no ABI
                     .typedefs = undefined,
                 };
@@ -2087,14 +2110,14 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor
 
                 c_codegen.genHeader(&dg) catch |err| switch (err) {
                     error.AnalysisFail => {
-                        try emit_h.failed_decls.put(module.gpa, decl, dg.error_msg.?);
+                        try emit_h.failed_decls.put(gpa, decl, dg.error_msg.?);
                         continue;
                     },
                     else => |e| return e,
                 };
 
                 fwd_decl.* = dg.fwd_decl.moveToUnmanaged();
-                fwd_decl.shrinkAndFree(module.gpa, fwd_decl.items.len);
+                fwd_decl.shrinkAndFree(gpa, fwd_decl.items.len);
             },
         },
         .analyze_decl => |decl| {
@@ -2111,9 +2134,9 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor
                 @panic("sadly stage2 is omitted from this build to save memory on the CI server");
             const module = self.bin_file.options.module.?;
             self.bin_file.updateDeclLineNumber(module, decl) catch |err| {
-                try module.failed_decls.ensureCapacity(module.gpa, module.failed_decls.count() + 1);
+                try module.failed_decls.ensureCapacity(gpa, module.failed_decls.count() + 1);
                 module.failed_decls.putAssumeCapacityNoClobber(decl, try Module.ErrorMsg.create(
-                    module.gpa,
+                    gpa,
                     decl.srcLoc(),
                     "unable to update line number: {s}",
                     .{@errorName(err)},
src/Liveness.zig
@@ -150,6 +150,7 @@ fn analyzeInst(
     const gpa = a.gpa;
     const table = &a.table;
     const inst_tags = a.air.instructions.items(.tag);
+    const inst_datas = a.air.instructions.items(.data);
 
     // No tombstone for this instruction means it is never referenced,
     // and its birth marks its own death. Very metal ๐Ÿค˜
src/Module.zig
@@ -739,8 +739,6 @@ pub const Union = struct {
 pub const Fn = struct {
     /// The Decl that corresponds to the function itself.
     owner_decl: *Decl,
-    /// undefined unless analysis state is `success`.
-    body: ir.Body,
     /// The ZIR instruction that is a function instruction. Use this to find
     /// the body. We store this rather than the body directly so that when ZIR
     /// is regenerated on update(), we can map this to the new corresponding
@@ -3585,17 +3583,19 @@ fn deleteDeclExports(mod: *Module, decl: *Decl) void {
     mod.gpa.free(kv.value);
 }
 
-pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !void {
+pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !Air {
     const tracy = trace(@src());
     defer tracy.end();
 
+    const gpa = mod.gpa;
+
     // Use the Decl's arena for function memory.
-    var arena = decl.value_arena.?.promote(mod.gpa);
+    var arena = decl.value_arena.?.promote(gpa);
     defer decl.value_arena.?.* = arena.state;
 
     const fn_ty = decl.ty;
-    const param_inst_list = try mod.gpa.alloc(*ir.Inst, fn_ty.fnParamLen());
-    defer mod.gpa.free(param_inst_list);
+    const param_inst_list = try gpa.alloc(*ir.Inst, fn_ty.fnParamLen());
+    defer gpa.free(param_inst_list);
 
     for (param_inst_list) |*param_inst, param_index| {
         const param_type = fn_ty.fnParamType(param_index);
@@ -3615,7 +3615,7 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !void {
 
     var sema: Sema = .{
         .mod = mod,
-        .gpa = mod.gpa,
+        .gpa = gpa,
         .arena = &arena.allocator,
         .code = zir,
         .owner_decl = decl,
@@ -3626,6 +3626,11 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !void {
     };
     defer sema.deinit();
 
+    // First few indexes of extra are reserved and set at the end.
+    const reserved_count = @typeInfo(Air.ExtraIndex).Enum.fields.len;
+    try sema.air_extra.ensureTotalCapacity(gpa, reserved_count);
+    sema.air_extra.items.len += reserved_count;
+
     var inner_block: Scope.Block = .{
         .parent = null,
         .sema = &sema,
@@ -3634,20 +3639,29 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !void {
         .inlining = null,
         .is_comptime = false,
     };
-    defer inner_block.instructions.deinit(mod.gpa);
+    defer inner_block.instructions.deinit(gpa);
 
     // AIR currently requires the arg parameters to be the first N instructions
-    try inner_block.instructions.appendSlice(mod.gpa, param_inst_list);
+    try inner_block.instructions.appendSlice(gpa, param_inst_list);
 
     func.state = .in_progress;
     log.debug("set {s} to in_progress", .{decl.name});
 
     try sema.analyzeFnBody(&inner_block, func.zir_body_inst);
 
-    const instructions = try arena.allocator.dupe(*ir.Inst, inner_block.instructions.items);
+    // Copy the block into place and mark that as the main block.
+    sema.air_extra.items[@enumToInt(Air.ExtraIndex.main_block)] = sema.air_extra.items.len;
+    try sema.air_extra.appendSlice(inner_block.instructions.items);
+
     func.state = .success;
-    func.body = .{ .instructions = instructions };
     log.debug("set {s} to success", .{decl.name});
+
+    return Air{
+        .instructions = sema.air_instructions.toOwnedSlice(),
+        .extra = sema.air_extra.toOwnedSlice(),
+        .values = sema.air_values.toOwnedSlice(),
+        .variables = sema.air_variables.toOwnedSlice(),
+    };
 }
 
 fn markOutdatedDecl(mod: *Module, decl: *Decl) !void {
src/Sema.zig
@@ -1,6 +1,6 @@
 //! Semantic analysis of ZIR instructions.
 //! Shared to every Block. Stored on the stack.
-//! State used for compiling a `Zir` into AIR.
+//! State used for compiling a ZIR into AIR.
 //! Transforms untyped ZIR instructions into semantically-analyzed AIR instructions.
 //! Does type checking, comptime control flow, and safety-check generation.
 //! This is the the heart of the Zig compiler.
@@ -11,6 +11,10 @@ gpa: *Allocator,
 /// Points to the arena allocator of the Decl.
 arena: *Allocator,
 code: Zir,
+air_instructions: std.MultiArrayList(Air.Inst) = .{},
+air_extra: ArrayListUnmanaged(u32) = .{},
+air_values: ArrayListUnmanaged(Value) = .{},
+air_variables: ArrayListUnmanaged(Module.Var) = .{},
 /// Maps ZIR to AIR.
 inst_map: InstMap = .{},
 /// When analyzing an inline function call, owner_decl is the Decl of the caller
@@ -32,7 +36,7 @@ func: ?*Module.Fn,
 /// > Denormalized data to make `resolveInst` faster. This is 0 if not inside a function,
 /// > otherwise it is the number of parameters of the function.
 /// > param_count: u32
-param_inst_list: []const *ir.Inst,
+param_inst_list: []const Air.Inst.Index,
 branch_quota: u32 = 1000,
 branch_count: u32 = 0,
 /// This field is updated when a new source location becomes active, so that
@@ -65,10 +69,15 @@ const LazySrcLoc = Module.LazySrcLoc;
 const RangeSet = @import("RangeSet.zig");
 const target_util = @import("target.zig");
 
-pub const InstMap = std.AutoHashMapUnmanaged(Zir.Inst.Index, *ir.Inst);
+pub const InstMap = std.AutoHashMapUnmanaged(Zir.Inst.Index, Air.Inst.Index);
 
 pub fn deinit(sema: *Sema) void {
-    sema.inst_map.deinit(sema.gpa);
+    const gpa = sema.gpa;
+    sema.air_instructions.deinit(gpa);
+    sema.air_extra.deinit(gpa);
+    sema.air_values.deinit(gpa);
+    sema.air_variables.deinit(gpa);
+    sema.inst_map.deinit(gpa);
     sema.* = undefined;
 }
 
@@ -108,7 +117,7 @@ pub fn analyzeFnBody(
 /// Returns only the result from the body that is specified.
 /// Only appropriate to call when it is determined at comptime that this body
 /// has no peers.
-fn resolveBody(sema: *Sema, block: *Scope.Block, body: []const Zir.Inst.Index) InnerError!*Inst {
+fn resolveBody(sema: *Sema, block: *Scope.Block, body: []const Zir.Inst.Index) InnerError!Air.Inst.Index {
     const break_inst = try sema.analyzeBody(block, body);
     const operand_ref = sema.code.instructions.items(.data)[break_inst].@"break".operand;
     return sema.resolveInst(operand_ref);
@@ -533,7 +542,7 @@ pub fn analyzeBody(
     }
 }
 
-fn zirExtended(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirExtended(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const extended = sema.code.instructions.items(.data)[inst].extended;
     switch (extended.opcode) {
         // zig fmt: off
@@ -569,7 +578,7 @@ fn zirExtended(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro
 }
 
 /// TODO when we rework AIR memory layout, this function will no longer have a possible error.
-pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) error{OutOfMemory}!*ir.Inst {
+pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) error{OutOfMemory}!Air.Inst.Index {
     var i: usize = @enumToInt(zir_ref);
 
     // First section of indexes correspond to a set number of constant values.
@@ -618,19 +627,19 @@ pub fn resolveType(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, zir_ref: Z
     return sema.resolveAirAsType(block, src, air_inst);
 }
 
-fn resolveAirAsType(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, air_inst: *ir.Inst) !Type {
+fn resolveAirAsType(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, air_inst: Air.Inst.Index) !Type {
     const wanted_type = Type.initTag(.@"type");
     const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src);
     const val = try sema.resolveConstValue(block, src, coerced_inst);
     return val.toType(sema.arena);
 }
 
-fn resolveConstValue(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, base: *ir.Inst) !Value {
+fn resolveConstValue(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, base: Air.Inst.Index) !Value {
     return (try sema.resolveDefinedValue(block, src, base)) orelse
         return sema.failWithNeededComptime(block, src);
 }
 
-fn resolveDefinedValue(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, base: *ir.Inst) !?Value {
+fn resolveDefinedValue(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, base: Air.Inst.Index) !?Value {
     if (try sema.resolvePossiblyUndefinedValue(block, src, base)) |val| {
         if (val.isUndef()) {
             return sema.failWithUseOfUndef(block, src);
@@ -644,7 +653,7 @@ fn resolvePossiblyUndefinedValue(
     sema: *Sema,
     block: *Scope.Block,
     src: LazySrcLoc,
-    base: *ir.Inst,
+    base: Air.Inst.Index,
 ) !?Value {
     if (try sema.typeHasOnePossibleValue(block, src, base.ty)) |opv| {
         return opv;
@@ -708,13 +717,13 @@ pub fn resolveInstConst(
     };
 }
 
-fn zirBitcastResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirBitcastResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO implement zir_sema.zirBitcastResultPtr", .{});
 }
 
-fn zirCoerceResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirCoerceResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     _ = inst;
     const tracy = trace(@src());
     defer tracy.end();
@@ -749,7 +758,7 @@ fn zirStructDecl(
     block: *Scope.Block,
     extended: Zir.Inst.Extended.InstData,
     inst: Zir.Inst.Index,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small);
     const src: LazySrcLoc = if (small.has_src_node) blk: {
         const node_offset = @bitCast(i32, sema.code.extra[extended.operand]);
@@ -820,7 +829,7 @@ fn zirEnumDecl(
     sema: *Sema,
     block: *Scope.Block,
     extended: Zir.Inst.Extended.InstData,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -1017,7 +1026,7 @@ fn zirUnionDecl(
     block: *Scope.Block,
     extended: Zir.Inst.Extended.InstData,
     inst: Zir.Inst.Index,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -1081,7 +1090,7 @@ fn zirOpaqueDecl(
     block: *Scope.Block,
     inst: Zir.Inst.Index,
     name_strategy: Zir.Inst.NameStrategy,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -1101,7 +1110,7 @@ fn zirErrorSetDecl(
     block: *Scope.Block,
     inst: Zir.Inst.Index,
     name_strategy: Zir.Inst.NameStrategy,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -1141,7 +1150,7 @@ fn zirRetPtr(
     sema: *Sema,
     block: *Scope.Block,
     extended: Zir.Inst.Extended.InstData,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -1153,7 +1162,7 @@ fn zirRetPtr(
     return block.addNoOp(src, ptr_type, .alloc);
 }
 
-fn zirRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -1166,7 +1175,7 @@ fn zirRetType(
     sema: *Sema,
     block: *Scope.Block,
     extended: Zir.Inst.Extended.InstData,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -1191,7 +1200,7 @@ fn zirEnsureResultUsed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) I
 fn ensureResultUsed(
     sema: *Sema,
     block: *Scope.Block,
-    operand: *Inst,
+    operand: Air.Inst.Index,
     src: LazySrcLoc,
 ) InnerError!void {
     switch (operand.ty.zigTypeTag()) {
@@ -1213,7 +1222,7 @@ fn zirEnsureResultNonError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde
     }
 }
 
-fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -1247,7 +1256,7 @@ fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In
     return sema.analyzeLoad(block, src, result_ptr, result_ptr.src);
 }
 
-fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].str_tok;
     const arg_name = inst_data.get(sema.code);
     const arg_index = sema.next_arg_index;
@@ -1269,13 +1278,13 @@ fn zirAllocExtended(
     sema: *Sema,
     block: *Scope.Block,
     extended: Zir.Inst.Extended.InstData,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const extra = sema.code.extraData(Zir.Inst.AllocExtended, extended.operand);
     const src: LazySrcLoc = .{ .node_offset = extra.data.src_node };
     return sema.mod.fail(&block.base, src, "TODO implement Sema.zirAllocExtended", .{});
 }
 
-fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -1298,13 +1307,13 @@ fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne
     });
 }
 
-fn zirAllocInferredComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirAllocInferredComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const src_node = sema.code.instructions.items(.data)[inst].node;
     const src: LazySrcLoc = .{ .node_offset = src_node };
     return sema.mod.fail(&block.base, src, "TODO implement Sema.zirAllocInferredComptime", .{});
 }
 
-fn zirAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -1317,7 +1326,7 @@ fn zirAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*
     return block.addNoOp(var_decl_src, ptr_type, .alloc);
 }
 
-fn zirAllocMut(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirAllocMut(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -1336,7 +1345,7 @@ fn zirAllocInferred(
     block: *Scope.Block,
     inst: Zir.Inst.Index,
     inferred_alloc_ty: Type,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -1589,7 +1598,7 @@ fn zirStoreNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr
     return sema.storePtr(block, src, ptr, value);
 }
 
-fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -1625,7 +1634,7 @@ fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr
     return sema.mod.constType(sema.arena, src, param_type);
 }
 
-fn zirStr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirStr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -1653,7 +1662,7 @@ fn zirStr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*In
     return sema.analyzeDeclRef(block, .unneeded, new_decl);
 }
 
-fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     _ = block;
     const tracy = trace(@src());
     defer tracy.end();
@@ -1662,7 +1671,7 @@ fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*In
     return sema.mod.constIntUnsigned(sema.arena, .unneeded, Type.initTag(.comptime_int), int);
 }
 
-fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     _ = block;
     const tracy = trace(@src());
     defer tracy.end();
@@ -1680,7 +1689,7 @@ fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!
     });
 }
 
-fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     _ = block;
     const arena = sema.arena;
     const inst_data = sema.code.instructions.items(.data)[inst].float;
@@ -1693,7 +1702,7 @@ fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*
     });
 }
 
-fn zirFloat128(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirFloat128(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     _ = block;
     const arena = sema.arena;
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
@@ -1722,7 +1731,7 @@ fn zirCompileLog(
     sema: *Sema,
     block: *Scope.Block,
     extended: Zir.Inst.Extended.InstData,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     var managed = sema.mod.compile_log_text.toManaged(sema.gpa);
     defer sema.mod.compile_log_text = managed.moveToUnmanaged();
     const writer = managed.writer();
@@ -1772,7 +1781,7 @@ fn zirPanic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Z
     return sema.panicWithMsg(block, src, msg_inst);
 }
 
-fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -1832,12 +1841,12 @@ fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerE
     // Loop repetition is implied so the last instruction may or may not be a noreturn instruction.
 
     try child_block.instructions.append(sema.gpa, &loop_inst.base);
-    loop_inst.body = .{ .instructions = try sema.arena.dupe(*Inst, loop_block.instructions.items) };
+    loop_inst.body = .{ .instructions = try sema.arena.dupe(Air.Inst.Index, loop_block.instructions.items) };
 
     return sema.analyzeBlockBody(parent_block, src, &child_block, merges);
 }
 
-fn zirCImport(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirCImport(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -1847,13 +1856,13 @@ fn zirCImport(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) Inn
     return sema.mod.fail(&parent_block.base, src, "TODO: implement Sema.zirCImport", .{});
 }
 
-fn zirSuspendBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirSuspendBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
     const src = inst_data.src();
     return sema.mod.fail(&parent_block.base, src, "TODO: implement Sema.zirSuspendBlock", .{});
 }
 
-fn zirBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -1911,7 +1920,7 @@ fn resolveBlockBody(
     child_block: *Scope.Block,
     body: []const Zir.Inst.Index,
     merges: *Scope.Block.Merges,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     _ = try sema.analyzeBody(child_block, body);
     return sema.analyzeBlockBody(parent_block, src, child_block, merges);
 }
@@ -1922,7 +1931,7 @@ fn analyzeBlockBody(
     src: LazySrcLoc,
     child_block: *Scope.Block,
     merges: *Scope.Block.Merges,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -1933,7 +1942,7 @@ fn analyzeBlockBody(
     if (merges.results.items.len == 0) {
         // No need for a block instruction. We can put the new instructions
         // directly into the parent block.
-        const copied_instructions = try sema.arena.dupe(*Inst, child_block.instructions.items);
+        const copied_instructions = try sema.arena.dupe(Air.Inst.Index, child_block.instructions.items);
         try parent_block.instructions.appendSlice(sema.gpa, copied_instructions);
         return copied_instructions[copied_instructions.len - 1];
     }
@@ -1944,7 +1953,7 @@ fn analyzeBlockBody(
             if (br_block == merges.block_inst) {
                 // No need for a block instruction. We can put the new instructions directly
                 // into the parent block. Here we omit the break instruction.
-                const copied_instructions = try sema.arena.dupe(*Inst, child_block.instructions.items[0..last_inst_index]);
+                const copied_instructions = try sema.arena.dupe(Air.Inst.Index, child_block.instructions.items[0..last_inst_index]);
                 try parent_block.instructions.appendSlice(sema.gpa, copied_instructions);
                 return merges.results.items[0];
             }
@@ -1959,7 +1968,7 @@ fn analyzeBlockBody(
     const resolved_ty = try sema.resolvePeerTypes(parent_block, src, merges.results.items);
     merges.block_inst.base.ty = resolved_ty;
     merges.block_inst.body = .{
-        .instructions = try sema.arena.dupe(*Inst, child_block.instructions.items),
+        .instructions = try sema.arena.dupe(Air.Inst.Index, child_block.instructions.items),
     };
     // Now that the block has its type resolved, we need to go back into all the break
     // instructions, and insert type coercion on the operands.
@@ -1991,7 +2000,7 @@ fn analyzeBlockBody(
             },
             .block = merges.block_inst,
             .body = .{
-                .instructions = try sema.arena.dupe(*Inst, coerce_block.instructions.items),
+                .instructions = try sema.arena.dupe(Air.Inst.Index, coerce_block.instructions.items),
             },
         };
     }
@@ -2130,7 +2139,7 @@ fn zirDbgStmt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError
     _ = try block.addDbgStmt(.unneeded, inst_data.line, inst_data.column);
 }
 
-fn zirDeclRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirDeclRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].str_tok;
     const src = inst_data.src();
     const decl_name = inst_data.get(sema.code);
@@ -2138,7 +2147,7 @@ fn zirDeclRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError
     return sema.analyzeDeclRef(block, src, decl);
 }
 
-fn zirDeclVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirDeclVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].str_tok;
     const src = inst_data.src();
     const decl_name = inst_data.get(sema.code);
@@ -2192,7 +2201,7 @@ fn zirCall(
     inst: Zir.Inst.Index,
     modifier: std.builtin.CallOptions.Modifier,
     ensure_result_used: bool,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -2204,7 +2213,7 @@ fn zirCall(
 
     const func = try sema.resolveInst(extra.data.callee);
     // TODO handle function calls of generic functions
-    const resolved_args = try sema.arena.alloc(*Inst, args.len);
+    const resolved_args = try sema.arena.alloc(Air.Inst.Index, args.len);
     for (args) |zir_arg, i| {
         // the args are already casted to the result of a param type instruction.
         resolved_args[i] = try sema.resolveInst(zir_arg);
@@ -2216,13 +2225,13 @@ fn zirCall(
 fn analyzeCall(
     sema: *Sema,
     block: *Scope.Block,
-    func: *ir.Inst,
+    func: Air.Inst.Index,
     func_src: LazySrcLoc,
     call_src: LazySrcLoc,
     modifier: std.builtin.CallOptions.Modifier,
     ensure_result_used: bool,
-    args: []const *ir.Inst,
-) InnerError!*ir.Inst {
+    args: []const Air.Inst.Index,
+) InnerError!Air.Inst.Index {
     if (func.ty.zigTypeTag() != .Fn)
         return sema.mod.fail(&block.base, func_src, "type '{}' not a function", .{func.ty});
 
@@ -2279,7 +2288,7 @@ fn analyzeCall(
     const is_comptime_call = block.is_comptime or modifier == .compile_time;
     const is_inline_call = is_comptime_call or modifier == .always_inline or
         func.ty.fnCallingConvention() == .Inline;
-    const result: *Inst = if (is_inline_call) res: {
+    const result: Air.Inst.Index = if (is_inline_call) res: {
         const func_val = try sema.resolveConstValue(block, func_src, func);
         const module_fn = switch (func_val.tag()) {
             .function => func_val.castTag(.function).?.data,
@@ -2377,7 +2386,7 @@ fn analyzeCall(
     return result;
 }
 
-fn zirIntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirIntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     _ = block;
     const tracy = trace(@src());
     defer tracy.end();
@@ -2389,7 +2398,7 @@ fn zirIntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError
     return sema.mod.constType(sema.arena, src, ty);
 }
 
-fn zirOptionalType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirOptionalType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -2401,7 +2410,7 @@ fn zirOptionalType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner
     return sema.mod.constType(sema.arena, src, opt_type);
 }
 
-fn zirElemType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirElemType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].un_node;
     const src = inst_data.src();
     const array_type = try sema.resolveType(block, src, inst_data.operand);
@@ -2409,7 +2418,7 @@ fn zirElemType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro
     return sema.mod.constType(sema.arena, src, elem_type);
 }
 
-fn zirVectorType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirVectorType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
     const src = inst_data.src();
     const elem_type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
@@ -2424,7 +2433,7 @@ fn zirVectorType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr
     return sema.mod.constType(sema.arena, src, vector_type);
 }
 
-fn zirArrayType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirArrayType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -2437,7 +2446,7 @@ fn zirArrayType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr
     return sema.mod.constType(sema.arena, .unneeded, array_ty);
 }
 
-fn zirArrayTypeSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirArrayTypeSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -2452,7 +2461,7 @@ fn zirArrayTypeSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index)
     return sema.mod.constType(sema.arena, .unneeded, array_ty);
 }
 
-fn zirAnyframeType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirAnyframeType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -2465,7 +2474,7 @@ fn zirAnyframeType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner
     return sema.mod.constType(sema.arena, src, anyframe_type);
 }
 
-fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -2486,7 +2495,7 @@ fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inn
     return sema.mod.constType(sema.arena, src, err_union_ty);
 }
 
-fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     _ = block;
     const tracy = trace(@src());
     defer tracy.end();
@@ -2505,7 +2514,7 @@ fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr
     });
 }
 
-fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -2535,7 +2544,7 @@ fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr
     return block.addUnOp(src, result_ty, .bitcast, op_coerced);
 }
 
-fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -2568,7 +2577,7 @@ fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr
     return block.addUnOp(src, Type.initTag(.anyerror), .bitcast, op);
 }
 
-fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -2658,7 +2667,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inn
     });
 }
 
-fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     _ = block;
     const tracy = trace(@src());
     defer tracy.end();
@@ -2672,7 +2681,7 @@ fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerE
     });
 }
 
-fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const mod = sema.mod;
     const arena = sema.arena;
     const inst_data = sema.code.instructions.items(.data)[inst].un_node;
@@ -2680,7 +2689,7 @@ fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr
     const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
     const operand = try sema.resolveInst(inst_data.operand);
 
-    const enum_tag: *Inst = switch (operand.ty.zigTypeTag()) {
+    const enum_tag: Air.Inst.Index = switch (operand.ty.zigTypeTag()) {
         .Enum => operand,
         .Union => {
             //if (!operand.ty.unionHasTag()) {
@@ -2754,7 +2763,7 @@ fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr
     return block.addUnOp(src, int_tag_ty, .bitcast, enum_tag);
 }
 
-fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const mod = sema.mod;
     const target = mod.getTarget();
     const arena = sema.arena;
@@ -2815,7 +2824,7 @@ fn zirOptionalPayloadPtr(
     block: *Scope.Block,
     inst: Zir.Inst.Index,
     safety_check: bool,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -2858,7 +2867,7 @@ fn zirOptionalPayload(
     block: *Scope.Block,
     inst: Zir.Inst.Index,
     safety_check: bool,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -2896,7 +2905,7 @@ fn zirErrUnionPayload(
     block: *Scope.Block,
     inst: Zir.Inst.Index,
     safety_check: bool,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -2930,7 +2939,7 @@ fn zirErrUnionPayloadPtr(
     block: *Scope.Block,
     inst: Zir.Inst.Index,
     safety_check: bool,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -2969,7 +2978,7 @@ fn zirErrUnionPayloadPtr(
 }
 
 /// Value in, value out
-fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -2995,7 +3004,7 @@ fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner
 }
 
 /// Pointer in, value out
-fn zirErrUnionCodePtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirErrUnionCodePtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -3042,7 +3051,7 @@ fn zirFunc(
     block: *Scope.Block,
     inst: Zir.Inst.Index,
     inferred_error_set: bool,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -3093,7 +3102,7 @@ fn funcCommon(
     is_extern: bool,
     src_locs: Zir.Inst.Func.SrcLocs,
     opt_lib_name: ?[]const u8,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const src: LazySrcLoc = .{ .node_offset = src_node_offset };
     const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = src_node_offset };
     const bare_return_type = try sema.resolveType(block, ret_ty_src, zir_return_type);
@@ -3234,7 +3243,7 @@ fn funcCommon(
     return result;
 }
 
-fn zirAs(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirAs(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -3242,7 +3251,7 @@ fn zirAs(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Ins
     return sema.analyzeAs(block, .unneeded, bin_inst.lhs, bin_inst.rhs);
 }
 
-fn zirAsNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirAsNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -3258,13 +3267,13 @@ fn analyzeAs(
     src: LazySrcLoc,
     zir_dest_type: Zir.Inst.Ref,
     zir_operand: Zir.Inst.Ref,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const dest_type = try sema.resolveType(block, src, zir_dest_type);
     const operand = try sema.resolveInst(zir_operand);
     return sema.coerce(block, dest_type, operand, src);
 }
 
-fn zirPtrToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirPtrToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -3281,7 +3290,7 @@ fn zirPtrToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro
     return block.addUnOp(src, ty, .ptrtoint, ptr);
 }
 
-fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -3299,7 +3308,7 @@ fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro
     return sema.analyzeLoad(block, src, result_ptr, result_ptr.src);
 }
 
-fn zirFieldPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirFieldPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -3312,7 +3321,7 @@ fn zirFieldPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro
     return sema.namedFieldPtr(block, src, object_ptr, field_name, field_name_src);
 }
 
-fn zirFieldValNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirFieldValNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -3327,7 +3336,7 @@ fn zirFieldValNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne
     return sema.analyzeLoad(block, src, result_ptr, src);
 }
 
-fn zirFieldPtrNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirFieldPtrNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -3340,7 +3349,7 @@ fn zirFieldPtrNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne
     return sema.namedFieldPtr(block, src, object_ptr, field_name, field_name_src);
 }
 
-fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -3383,7 +3392,7 @@ fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError
     return sema.mod.fail(&block.base, src, "TODO implement analyze widen or shorten int", .{});
 }
 
-fn zirBitcast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirBitcast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -3396,7 +3405,7 @@ fn zirBitcast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError
     return sema.bitcast(block, dest_type, operand);
 }
 
-fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -3439,7 +3448,7 @@ fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr
     return sema.mod.fail(&block.base, src, "TODO implement analyze widen or shorten float", .{});
 }
 
-fn zirElemVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirElemVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -3454,7 +3463,7 @@ fn zirElemVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError
     return sema.analyzeLoad(block, sema.src, result_ptr, sema.src);
 }
 
-fn zirElemValNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirElemValNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -3472,7 +3481,7 @@ fn zirElemValNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerE
     return sema.analyzeLoad(block, src, result_ptr, src);
 }
 
-fn zirElemPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirElemPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -3482,7 +3491,7 @@ fn zirElemPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError
     return sema.elemPtr(block, sema.src, array_ptr, elem_index, sema.src);
 }
 
-fn zirElemPtrNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirElemPtrNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -3495,7 +3504,7 @@ fn zirElemPtrNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerE
     return sema.elemPtr(block, src, array_ptr, elem_index, elem_index_src);
 }
 
-fn zirSliceStart(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirSliceStart(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -3508,7 +3517,7 @@ fn zirSliceStart(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr
     return sema.analyzeSlice(block, src, array_ptr, start, null, null, .unneeded);
 }
 
-fn zirSliceEnd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirSliceEnd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -3522,7 +3531,7 @@ fn zirSliceEnd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro
     return sema.analyzeSlice(block, src, array_ptr, start, end, null, .unneeded);
 }
 
-fn zirSliceSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirSliceSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -3544,7 +3553,7 @@ fn zirSwitchCapture(
     inst: Zir.Inst.Index,
     is_multi: bool,
     is_ref: bool,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -3563,7 +3572,7 @@ fn zirSwitchCaptureElse(
     block: *Scope.Block,
     inst: Zir.Inst.Index,
     is_ref: bool,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -3582,7 +3591,7 @@ fn zirSwitchBlock(
     inst: Zir.Inst.Index,
     is_ref: bool,
     special_prong: Zir.SpecialProng,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -3615,7 +3624,7 @@ fn zirSwitchBlockMulti(
     inst: Zir.Inst.Index,
     is_ref: bool,
     special_prong: Zir.SpecialProng,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -3645,14 +3654,14 @@ fn zirSwitchBlockMulti(
 fn analyzeSwitch(
     sema: *Sema,
     block: *Scope.Block,
-    operand: *Inst,
+    operand: Air.Inst.Index,
     extra_end: usize,
     special_prong: Zir.SpecialProng,
     scalar_cases_len: usize,
     multi_cases_len: usize,
     switch_inst: Zir.Inst.Index,
     src_node_offset: i32,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const gpa = sema.gpa;
     const mod = sema.mod;
 
@@ -4187,7 +4196,7 @@ fn analyzeSwitch(
 
         cases[scalar_i] = .{
             .item = item_val,
-            .body = .{ .instructions = try sema.arena.dupe(*Inst, case_block.instructions.items) },
+            .body = .{ .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items) },
         };
     }
 
@@ -4207,7 +4216,7 @@ fn analyzeSwitch(
 
         case_block.instructions.shrinkRetainingCapacity(0);
 
-        var any_ok: ?*Inst = null;
+        var any_ok: ?Air.Inst.Index = null;
         const bool_ty = comptime Type.initTag(.bool);
 
         for (items) |item_ref| {
@@ -4280,7 +4289,7 @@ fn analyzeSwitch(
         try case_block.instructions.append(gpa, &new_condbr.base);
 
         const cond_body: Body = .{
-            .instructions = try sema.arena.dupe(*Inst, case_block.instructions.items),
+            .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items),
         };
 
         case_block.instructions.shrinkRetainingCapacity(0);
@@ -4288,7 +4297,7 @@ fn analyzeSwitch(
         extra_index += body_len;
         _ = try sema.analyzeBody(&case_block, body);
         new_condbr.then_body = .{
-            .instructions = try sema.arena.dupe(*Inst, case_block.instructions.items),
+            .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items),
         };
         if (prev_condbr) |condbr| {
             condbr.else_body = cond_body;
@@ -4303,7 +4312,7 @@ fn analyzeSwitch(
             case_block.instructions.shrinkRetainingCapacity(0);
             _ = try sema.analyzeBody(&case_block, special.body);
             const else_body: Body = .{
-                .instructions = try sema.arena.dupe(*Inst, case_block.instructions.items),
+                .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items),
             };
             if (prev_condbr) |condbr| {
                 condbr.else_body = else_body;
@@ -4507,7 +4516,7 @@ fn validateSwitchNoRange(
     return sema.mod.failWithOwnedErrorMsg(&block.base, msg);
 }
 
-fn zirHasField(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirHasField(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
     const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
     _ = extra;
@@ -4516,7 +4525,7 @@ fn zirHasField(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro
     return sema.mod.fail(&block.base, src, "TODO implement zirHasField", .{});
 }
 
-fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
     const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
     const src = inst_data.src();
@@ -4541,7 +4550,7 @@ fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError
     return mod.constBool(arena, src, false);
 }
 
-fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -4566,13 +4575,13 @@ fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!
     return mod.constType(sema.arena, src, file_root_decl.ty);
 }
 
-fn zirRetErrValueCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirRetErrValueCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     _ = block;
     _ = inst;
     return sema.mod.fail(&block.base, sema.src, "TODO implement zirRetErrValueCode", .{});
 }
 
-fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -4581,7 +4590,7 @@ fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*In
     return sema.mod.fail(&block.base, sema.src, "TODO implement zirShl", .{});
 }
 
-fn zirShr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirShr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -4594,7 +4603,7 @@ fn zirBitwise(
     block: *Scope.Block,
     inst: Zir.Inst.Index,
     ir_tag: ir.Inst.Tag,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -4606,7 +4615,7 @@ fn zirBitwise(
     const lhs = try sema.resolveInst(extra.lhs);
     const rhs = try sema.resolveInst(extra.rhs);
 
-    const instructions = &[_]*Inst{ lhs, rhs };
+    const instructions = &[_]Air.Inst.Index{ lhs, rhs };
     const resolved_type = try sema.resolvePeerTypes(block, src, instructions);
     const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
     const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
@@ -4652,7 +4661,7 @@ fn zirBitwise(
     return block.addBinOp(src, scalar_type, ir_tag, casted_lhs, casted_rhs);
 }
 
-fn zirBitNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirBitNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -4660,7 +4669,7 @@ fn zirBitNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!
     return sema.mod.fail(&block.base, sema.src, "TODO implement zirBitNot", .{});
 }
 
-fn zirArrayCat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirArrayCat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -4668,7 +4677,7 @@ fn zirArrayCat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro
     return sema.mod.fail(&block.base, sema.src, "TODO implement zirArrayCat", .{});
 }
 
-fn zirArrayMul(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirArrayMul(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -4681,7 +4690,7 @@ fn zirNegate(
     block: *Scope.Block,
     inst: Zir.Inst.Index,
     tag_override: Zir.Inst.Tag,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -4695,7 +4704,7 @@ fn zirNegate(
     return sema.analyzeArithmetic(block, tag_override, lhs, rhs, src, lhs_src, rhs_src);
 }
 
-fn zirArithmetic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirArithmetic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -4715,7 +4724,7 @@ fn zirOverflowArithmetic(
     sema: *Sema,
     block: *Scope.Block,
     extended: Zir.Inst.Extended.InstData,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -4729,13 +4738,13 @@ fn analyzeArithmetic(
     sema: *Sema,
     block: *Scope.Block,
     zir_tag: Zir.Inst.Tag,
-    lhs: *Inst,
-    rhs: *Inst,
+    lhs: Air.Inst.Index,
+    rhs: Air.Inst.Index,
     src: LazySrcLoc,
     lhs_src: LazySrcLoc,
     rhs_src: LazySrcLoc,
-) InnerError!*Inst {
-    const instructions = &[_]*Inst{ lhs, rhs };
+) InnerError!Air.Inst.Index {
+    const instructions = &[_]Air.Inst.Index{ lhs, rhs };
     const resolved_type = try sema.resolvePeerTypes(block, src, instructions);
     const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
     const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
@@ -4844,7 +4853,7 @@ fn analyzeArithmetic(
     return block.addBinOp(src, scalar_type, ir_tag, casted_lhs, casted_rhs);
 }
 
-fn zirLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -4859,7 +4868,7 @@ fn zirAsm(
     sema: *Sema,
     block: *Scope.Block,
     extended: Zir.Inst.Extended.InstData,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -4899,7 +4908,7 @@ fn zirAsm(
         };
     };
 
-    const args = try sema.arena.alloc(*Inst, inputs_len);
+    const args = try sema.arena.alloc(Air.Inst.Index, inputs_len);
     const inputs = try sema.arena.alloc([]const u8, inputs_len);
 
     for (args) |*arg, arg_i| {
@@ -4943,7 +4952,7 @@ fn zirCmp(
     block: *Scope.Block,
     inst: Zir.Inst.Index,
     op: std.math.CompareOperator,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -5009,7 +5018,7 @@ fn zirCmp(
         return mod.constBool(sema.arena, src, lhs.value().?.eql(rhs.value().?) == (op == .eq));
     }
 
-    const instructions = &[_]*Inst{ lhs, rhs };
+    const instructions = &[_]Air.Inst.Index{ lhs, rhs };
     const resolved_type = try sema.resolvePeerTypes(block, src, instructions);
     if (!resolved_type.isSelfComparable(is_equality_cmp)) {
         return mod.fail(&block.base, src, "operator not allowed for type '{}'", .{resolved_type});
@@ -5041,7 +5050,7 @@ fn zirCmp(
     return block.addBinOp(src, bool_type, tag, casted_lhs, casted_rhs);
 }
 
-fn zirSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].un_node;
     const src = inst_data.src();
     const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
@@ -5051,7 +5060,7 @@ fn zirSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!
     return sema.mod.constIntUnsigned(sema.arena, src, Type.initTag(.comptime_int), abi_size);
 }
 
-fn zirBitSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirBitSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].un_node;
     const src = inst_data.src();
     const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
@@ -5065,7 +5074,7 @@ fn zirThis(
     sema: *Sema,
     block: *Scope.Block,
     extended: Zir.Inst.Extended.InstData,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) };
     return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirThis", .{});
 }
@@ -5074,7 +5083,7 @@ fn zirRetAddr(
     sema: *Sema,
     block: *Scope.Block,
     extended: Zir.Inst.Extended.InstData,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) };
     return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirRetAddr", .{});
 }
@@ -5083,12 +5092,12 @@ fn zirBuiltinSrc(
     sema: *Sema,
     block: *Scope.Block,
     extended: Zir.Inst.Extended.InstData,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) };
     return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirBuiltinSrc", .{});
 }
 
-fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].un_node;
     const src = inst_data.src();
     const ty = try sema.resolveType(block, src, inst_data.operand);
@@ -5131,7 +5140,7 @@ fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro
     }
 }
 
-fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     _ = block;
     const zir_datas = sema.code.instructions.items(.data);
     const inst_data = zir_datas[inst].un_node;
@@ -5140,7 +5149,7 @@ fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!
     return sema.mod.constType(sema.arena, src, operand.ty);
 }
 
-fn zirTypeofElem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirTypeofElem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     _ = block;
     const inst_data = sema.code.instructions.items(.data)[inst].un_node;
     const src = inst_data.src();
@@ -5149,13 +5158,13 @@ fn zirTypeofElem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr
     return sema.mod.constType(sema.arena, src, elem_ty);
 }
 
-fn zirTypeofLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirTypeofLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].un_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirTypeofLog2IntType", .{});
 }
 
-fn zirLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].un_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirLog2IntType", .{});
@@ -5165,7 +5174,7 @@ fn zirTypeofPeer(
     sema: *Sema,
     block: *Scope.Block,
     extended: Zir.Inst.Extended.InstData,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -5173,7 +5182,7 @@ fn zirTypeofPeer(
     const src: LazySrcLoc = .{ .node_offset = extra.data.src_node };
     const args = sema.code.refSlice(extra.end, extended.small);
 
-    const inst_list = try sema.gpa.alloc(*ir.Inst, args.len);
+    const inst_list = try sema.gpa.alloc(Air.Inst.Index, args.len);
     defer sema.gpa.free(inst_list);
 
     for (args) |arg_ref, i| {
@@ -5184,7 +5193,7 @@ fn zirTypeofPeer(
     return sema.mod.constType(sema.arena, src, result_type);
 }
 
-fn zirBoolNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirBoolNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -5206,7 +5215,7 @@ fn zirBoolOp(
     block: *Scope.Block,
     inst: Zir.Inst.Index,
     comptime is_bool_or: bool,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -5237,7 +5246,7 @@ fn zirBoolBr(
     parent_block: *Scope.Block,
     inst: Zir.Inst.Index,
     is_bool_or: bool,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -5292,12 +5301,12 @@ fn zirBoolBr(
     const rhs_result = try sema.resolveBody(rhs_block, body);
     _ = try rhs_block.addBr(src, block_inst, rhs_result);
 
-    const air_then_body: ir.Body = .{ .instructions = try sema.arena.dupe(*Inst, then_block.instructions.items) };
-    const air_else_body: ir.Body = .{ .instructions = try sema.arena.dupe(*Inst, else_block.instructions.items) };
+    const air_then_body: ir.Body = .{ .instructions = try sema.arena.dupe(Air.Inst.Index, then_block.instructions.items) };
+    const air_else_body: ir.Body = .{ .instructions = try sema.arena.dupe(Air.Inst.Index, else_block.instructions.items) };
     _ = try child_block.addCondBr(src, lhs, air_then_body, air_else_body);
 
     block_inst.body = .{
-        .instructions = try sema.arena.dupe(*Inst, child_block.instructions.items),
+        .instructions = try sema.arena.dupe(Air.Inst.Index, child_block.instructions.items),
     };
     try parent_block.instructions.append(sema.gpa, &block_inst.base);
     return &block_inst.base;
@@ -5307,7 +5316,7 @@ fn zirIsNonNull(
     sema: *Sema,
     block: *Scope.Block,
     inst: Zir.Inst.Index,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -5321,7 +5330,7 @@ fn zirIsNonNullPtr(
     sema: *Sema,
     block: *Scope.Block,
     inst: Zir.Inst.Index,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -5332,7 +5341,7 @@ fn zirIsNonNullPtr(
     return sema.analyzeIsNull(block, src, loaded, true);
 }
 
-fn zirIsNonErr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirIsNonErr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -5341,7 +5350,7 @@ fn zirIsNonErr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro
     return sema.analyzeIsNonErr(block, inst_data.src(), operand);
 }
 
-fn zirIsNonErrPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirIsNonErrPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -5385,14 +5394,14 @@ fn zirCondbr(
 
     _ = try sema.analyzeBody(&sub_block, then_body);
     const air_then_body: ir.Body = .{
-        .instructions = try sema.arena.dupe(*Inst, sub_block.instructions.items),
+        .instructions = try sema.arena.dupe(Air.Inst.Index, sub_block.instructions.items),
     };
 
     sub_block.instructions.shrinkRetainingCapacity(0);
 
     _ = try sema.analyzeBody(&sub_block, else_body);
     const air_else_body: ir.Body = .{
-        .instructions = try sema.arena.dupe(*Inst, sub_block.instructions.items),
+        .instructions = try sema.arena.dupe(Air.Inst.Index, sub_block.instructions.items),
     };
 
     _ = try parent_block.addCondBr(src, cond, air_then_body, air_else_body);
@@ -5470,7 +5479,7 @@ fn zirRetNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError
 fn analyzeRet(
     sema: *Sema,
     block: *Scope.Block,
-    operand: *Inst,
+    operand: Air.Inst.Index,
     src: LazySrcLoc,
     need_coercion: bool,
 ) InnerError!Zir.Inst.Index {
@@ -5505,7 +5514,7 @@ fn floatOpAllowed(tag: Zir.Inst.Tag) bool {
     };
 }
 
-fn zirPtrTypeSimple(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirPtrTypeSimple(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -5526,7 +5535,7 @@ fn zirPtrTypeSimple(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne
     return sema.mod.constType(sema.arena, .unneeded, ty);
 }
 
-fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -5580,7 +5589,7 @@ fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError
     return sema.mod.constType(sema.arena, src, ty);
 }
 
-fn zirStructInitEmpty(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirStructInitEmpty(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -5594,13 +5603,13 @@ fn zirStructInitEmpty(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In
     });
 }
 
-fn zirUnionInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirUnionInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirUnionInitPtr", .{});
 }
 
-fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!*Inst {
+fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Index {
     const mod = sema.mod;
     const gpa = sema.gpa;
     const zir_datas = sema.code.instructions.items(.data);
@@ -5622,7 +5631,7 @@ fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref:
     mem.set(Zir.Inst.Index, found_fields, 0);
 
     // The init values to use for the struct instance.
-    const field_inits = try gpa.alloc(*ir.Inst, struct_obj.fields.count());
+    const field_inits = try gpa.alloc(Air.Inst.Index, struct_obj.fields.count());
     defer gpa.free(field_inits);
 
     var field_i: u32 = 0;
@@ -5713,7 +5722,7 @@ fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref:
     return mod.fail(&block.base, src, "TODO: Sema.zirStructInit for runtime-known struct values", .{});
 }
 
-fn zirStructInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!*Inst {
+fn zirStructInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
     const src = inst_data.src();
 
@@ -5721,7 +5730,7 @@ fn zirStructInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirStructInitAnon", .{});
 }
 
-fn zirArrayInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!*Inst {
+fn zirArrayInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
     const src = inst_data.src();
 
@@ -5729,7 +5738,7 @@ fn zirArrayInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref:
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirArrayInit", .{});
 }
 
-fn zirArrayInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!*Inst {
+fn zirArrayInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
     const src = inst_data.src();
 
@@ -5737,13 +5746,13 @@ fn zirArrayInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_r
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirArrayInitAnon", .{});
 }
 
-fn zirFieldTypeRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirFieldTypeRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirFieldTypeRef", .{});
 }
 
-fn zirFieldType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirFieldType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
     const extra = sema.code.extraData(Zir.Inst.FieldType, inst_data.payload_index).data;
     const src = inst_data.src();
@@ -5765,7 +5774,7 @@ fn zirErrorReturnTrace(
     sema: *Sema,
     block: *Scope.Block,
     extended: Zir.Inst.Extended.InstData,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) };
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirErrorReturnTrace", .{});
 }
@@ -5774,7 +5783,7 @@ fn zirFrame(
     sema: *Sema,
     block: *Scope.Block,
     extended: Zir.Inst.Extended.InstData,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) };
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrame", .{});
 }
@@ -5783,84 +5792,84 @@ fn zirFrameAddress(
     sema: *Sema,
     block: *Scope.Block,
     extended: Zir.Inst.Extended.InstData,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) };
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrameAddress", .{});
 }
 
-fn zirAlignOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirAlignOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].un_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirAlignOf", .{});
 }
 
-fn zirBoolToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirBoolToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].un_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirBoolToInt", .{});
 }
 
-fn zirEmbedFile(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirEmbedFile(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].un_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirEmbedFile", .{});
 }
 
-fn zirErrorName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirErrorName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].un_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirErrorName", .{});
 }
 
-fn zirUnaryMath(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirUnaryMath(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].un_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirUnaryMath", .{});
 }
 
-fn zirTagName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirTagName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].un_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirTagName", .{});
 }
 
-fn zirReify(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirReify(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].un_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirReify", .{});
 }
 
-fn zirTypeName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirTypeName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].un_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirTypeName", .{});
 }
 
-fn zirFrameType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirFrameType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].un_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrameType", .{});
 }
 
-fn zirFrameSize(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirFrameSize(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].un_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrameSize", .{});
 }
 
-fn zirFloatToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirFloatToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirFloatToInt", .{});
 }
 
-fn zirIntToFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirIntToFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirIntToFloat", .{});
 }
 
-fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
     const src = inst_data.src();
 
@@ -5923,199 +5932,199 @@ fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro
     return block.addUnOp(src, type_res, .bitcast, operand_coerced);
 }
 
-fn zirErrSetCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirErrSetCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirErrSetCast", .{});
 }
 
-fn zirPtrCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirPtrCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirPtrCast", .{});
 }
 
-fn zirTruncate(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirTruncate(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirTruncate", .{});
 }
 
-fn zirAlignCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirAlignCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirAlignCast", .{});
 }
 
-fn zirClz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirClz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].un_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirClz", .{});
 }
 
-fn zirCtz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirCtz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].un_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirCtz", .{});
 }
 
-fn zirPopCount(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirPopCount(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].un_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirPopCount", .{});
 }
 
-fn zirByteSwap(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirByteSwap(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].un_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirByteSwap", .{});
 }
 
-fn zirBitReverse(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirBitReverse(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].un_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirBitReverse", .{});
 }
 
-fn zirDivExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirDivExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirDivExact", .{});
 }
 
-fn zirDivFloor(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirDivFloor(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirDivFloor", .{});
 }
 
-fn zirDivTrunc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirDivTrunc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirDivTrunc", .{});
 }
 
-fn zirMod(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirMod(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirMod", .{});
 }
 
-fn zirRem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirRem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirRem", .{});
 }
 
-fn zirShlExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirShlExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirShlExact", .{});
 }
 
-fn zirShrExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirShrExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirShrExact", .{});
 }
 
-fn zirBitOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirBitOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirBitOffsetOf", .{});
 }
 
-fn zirOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirOffsetOf", .{});
 }
 
-fn zirCmpxchg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirCmpxchg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirCmpxchg", .{});
 }
 
-fn zirSplat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirSplat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirSplat", .{});
 }
 
-fn zirReduce(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirReduce(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirReduce", .{});
 }
 
-fn zirShuffle(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirShuffle(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirShuffle", .{});
 }
 
-fn zirAtomicLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirAtomicLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirAtomicLoad", .{});
 }
 
-fn zirAtomicRmw(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirAtomicRmw(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirAtomicRmw", .{});
 }
 
-fn zirAtomicStore(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirAtomicStore(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirAtomicStore", .{});
 }
 
-fn zirMulAdd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirMulAdd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirMulAdd", .{});
 }
 
-fn zirBuiltinCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirBuiltinCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirBuiltinCall", .{});
 }
 
-fn zirFieldPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirFieldPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirFieldPtrType", .{});
 }
 
-fn zirFieldParentPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirFieldParentPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirFieldParentPtr", .{});
 }
 
-fn zirMemcpy(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirMemcpy(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirMemcpy", .{});
 }
 
-fn zirMemset(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirMemset(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirMemset", .{});
 }
 
-fn zirBuiltinAsyncCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirBuiltinAsyncCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirBuiltinAsyncCall", .{});
 }
 
-fn zirResume(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst {
+fn zirResume(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].un_node;
     const src = inst_data.src();
     return sema.mod.fail(&block.base, src, "TODO: Sema.zirResume", .{});
@@ -6126,7 +6135,7 @@ fn zirAwait(
     block: *Scope.Block,
     inst: Zir.Inst.Index,
     is_nosuspend: bool,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const inst_data = sema.code.instructions.items(.data)[inst].un_node;
     const src = inst_data.src();
 
@@ -6138,7 +6147,7 @@ fn zirVarExtended(
     sema: *Sema,
     block: *Scope.Block,
     extended: Zir.Inst.Extended.InstData,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const extra = sema.code.extraData(Zir.Inst.ExtendedVar, extended.operand);
     const src = sema.src;
     const ty_src: LazySrcLoc = src; // TODO add a LazySrcLoc that points at type
@@ -6204,7 +6213,7 @@ fn zirFuncExtended(
     block: *Scope.Block,
     extended: Zir.Inst.Extended.InstData,
     inst: Zir.Inst.Index,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const tracy = trace(@src());
     defer tracy.end();
 
@@ -6271,7 +6280,7 @@ fn zirCUndef(
     sema: *Sema,
     block: *Scope.Block,
     extended: Zir.Inst.Extended.InstData,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
     const src: LazySrcLoc = .{ .node_offset = extra.node };
     return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirCUndef", .{});
@@ -6281,7 +6290,7 @@ fn zirCInclude(
     sema: *Sema,
     block: *Scope.Block,
     extended: Zir.Inst.Extended.InstData,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
     const src: LazySrcLoc = .{ .node_offset = extra.node };
     return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirCInclude", .{});
@@ -6291,7 +6300,7 @@ fn zirCDefine(
     sema: *Sema,
     block: *Scope.Block,
     extended: Zir.Inst.Extended.InstData,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
     const src: LazySrcLoc = .{ .node_offset = extra.node };
     return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirCDefine", .{});
@@ -6301,7 +6310,7 @@ fn zirWasmMemorySize(
     sema: *Sema,
     block: *Scope.Block,
     extended: Zir.Inst.Extended.InstData,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
     const src: LazySrcLoc = .{ .node_offset = extra.node };
     return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirWasmMemorySize", .{});
@@ -6311,7 +6320,7 @@ fn zirWasmMemoryGrow(
     sema: *Sema,
     block: *Scope.Block,
     extended: Zir.Inst.Extended.InstData,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
     const src: LazySrcLoc = .{ .node_offset = extra.node };
     return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirWasmMemoryGrow", .{});
@@ -6321,7 +6330,7 @@ fn zirBuiltinExtern(
     sema: *Sema,
     block: *Scope.Block,
     extended: Zir.Inst.Extended.InstData,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data;
     const src: LazySrcLoc = .{ .node_offset = extra.node };
     return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirBuiltinExtern", .{});
@@ -6355,7 +6364,7 @@ pub const PanicId = enum {
     invalid_error_code,
 };
 
-fn addSafetyCheck(sema: *Sema, parent_block: *Scope.Block, ok: *Inst, panic_id: PanicId) !void {
+fn addSafetyCheck(sema: *Sema, parent_block: *Scope.Block, ok: Air.Inst.Index, panic_id: PanicId) !void {
     const block_inst = try sema.arena.create(Inst.Block);
     block_inst.* = .{
         .base = .{
@@ -6364,12 +6373,12 @@ fn addSafetyCheck(sema: *Sema, parent_block: *Scope.Block, ok: *Inst, panic_id:
             .src = ok.src,
         },
         .body = .{
-            .instructions = try sema.arena.alloc(*Inst, 1), // Only need space for the condbr.
+            .instructions = try sema.arena.alloc(Air.Inst.Index, 1), // Only need space for the condbr.
         },
     };
 
     const ok_body: ir.Body = .{
-        .instructions = try sema.arena.alloc(*Inst, 1), // Only need space for the br_void.
+        .instructions = try sema.arena.alloc(Air.Inst.Index, 1), // Only need space for the br_void.
     };
     const br_void = try sema.arena.create(Inst.BrVoid);
     br_void.* = .{
@@ -6395,7 +6404,7 @@ fn addSafetyCheck(sema: *Sema, parent_block: *Scope.Block, ok: *Inst, panic_id:
 
     _ = try sema.safetyPanic(&fail_block, ok.src, panic_id);
 
-    const fail_body: ir.Body = .{ .instructions = try sema.arena.dupe(*Inst, fail_block.instructions.items) };
+    const fail_body: ir.Body = .{ .instructions = try sema.arena.dupe(Air.Inst.Index, fail_block.instructions.items) };
 
     const condbr = try sema.arena.create(Inst.CondBr);
     condbr.* = .{
@@ -6417,7 +6426,7 @@ fn panicWithMsg(
     sema: *Sema,
     block: *Scope.Block,
     src: LazySrcLoc,
-    msg_inst: *ir.Inst,
+    msg_inst: Air.Inst.Index,
 ) !Zir.Inst.Index {
     const mod = sema.mod;
     const arena = sema.arena;
@@ -6438,7 +6447,7 @@ fn panicWithMsg(
         .ty = try mod.optionalType(arena, ptr_stack_trace_ty),
         .val = Value.initTag(.null_value),
     });
-    const args = try arena.create([2]*ir.Inst);
+    const args = try arena.create([2]Air.Inst.Index);
     args.* = .{ msg_inst, null_stack_trace };
     _ = try sema.analyzeCall(block, panic_fn, src, src, .auto, false, args);
     return always_noreturn;
@@ -6494,10 +6503,10 @@ fn namedFieldPtr(
     sema: *Sema,
     block: *Scope.Block,
     src: LazySrcLoc,
-    object_ptr: *Inst,
+    object_ptr: Air.Inst.Index,
     field_name: []const u8,
     field_name_src: LazySrcLoc,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const mod = sema.mod;
     const arena = sema.arena;
 
@@ -6647,7 +6656,7 @@ fn analyzeNamespaceLookup(
     src: LazySrcLoc,
     namespace: *Scope.Namespace,
     decl_name: []const u8,
-) InnerError!?*Inst {
+) InnerError!?Air.Inst.Index {
     const mod = sema.mod;
     const gpa = sema.gpa;
     if (try sema.lookupInNamespace(namespace, decl_name)) |decl| {
@@ -6671,11 +6680,11 @@ fn analyzeStructFieldPtr(
     sema: *Sema,
     block: *Scope.Block,
     src: LazySrcLoc,
-    struct_ptr: *Inst,
+    struct_ptr: Air.Inst.Index,
     field_name: []const u8,
     field_name_src: LazySrcLoc,
     unresolved_struct_ty: Type,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const mod = sema.mod;
     const arena = sema.arena;
     assert(unresolved_struct_ty.zigTypeTag() == .Struct);
@@ -6706,11 +6715,11 @@ fn analyzeUnionFieldPtr(
     sema: *Sema,
     block: *Scope.Block,
     src: LazySrcLoc,
-    union_ptr: *Inst,
+    union_ptr: Air.Inst.Index,
     field_name: []const u8,
     field_name_src: LazySrcLoc,
     unresolved_union_ty: Type,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const mod = sema.mod;
     const arena = sema.arena;
     assert(unresolved_union_ty.zigTypeTag() == .Union);
@@ -6743,10 +6752,10 @@ fn elemPtr(
     sema: *Sema,
     block: *Scope.Block,
     src: LazySrcLoc,
-    array_ptr: *Inst,
-    elem_index: *Inst,
+    array_ptr: Air.Inst.Index,
+    elem_index: Air.Inst.Index,
     elem_index_src: LazySrcLoc,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const array_ty = switch (array_ptr.ty.zigTypeTag()) {
         .Pointer => array_ptr.ty.elemType(),
         else => return sema.mod.fail(&block.base, array_ptr.src, "expected pointer, found '{}'", .{array_ptr.ty}),
@@ -6770,10 +6779,10 @@ fn elemPtrArray(
     sema: *Sema,
     block: *Scope.Block,
     src: LazySrcLoc,
-    array_ptr: *Inst,
-    elem_index: *Inst,
+    array_ptr: Air.Inst.Index,
+    elem_index: Air.Inst.Index,
     elem_index_src: LazySrcLoc,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     if (array_ptr.value()) |array_ptr_val| {
         if (elem_index.value()) |index_val| {
             // Both array pointer and index are compile-time known.
@@ -6798,9 +6807,9 @@ fn coerce(
     sema: *Sema,
     block: *Scope.Block,
     dest_type: Type,
-    inst: *Inst,
+    inst: Air.Inst.Index,
     inst_src: LazySrcLoc,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     if (dest_type.tag() == .var_args_param) {
         return sema.coerceVarArgParam(block, inst);
     }
@@ -6976,7 +6985,7 @@ fn coerceInMemoryAllowed(dest_type: Type, src_type: Type) InMemoryCoercionResult
     return .no_match;
 }
 
-fn coerceNum(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst) InnerError!?*Inst {
+fn coerceNum(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) InnerError!?Air.Inst.Index {
     const val = inst.value() orelse return null;
     const src_zig_tag = inst.ty.zigTypeTag();
     const dst_zig_tag = dest_type.zigTypeTag();
@@ -7014,7 +7023,7 @@ fn coerceNum(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst) Inn
     return null;
 }
 
-fn coerceVarArgParam(sema: *Sema, block: *Scope.Block, inst: *Inst) !*Inst {
+fn coerceVarArgParam(sema: *Sema, block: *Scope.Block, inst: Air.Inst.Index) !Air.Inst.Index {
     switch (inst.ty.zigTypeTag()) {
         .ComptimeInt, .ComptimeFloat => return sema.mod.fail(&block.base, inst.src, "integer and float literals in var args function must be casted", .{}),
         else => {},
@@ -7027,8 +7036,8 @@ fn storePtr(
     sema: *Sema,
     block: *Scope.Block,
     src: LazySrcLoc,
-    ptr: *Inst,
-    uncasted_value: *Inst,
+    ptr: Air.Inst.Index,
+    uncasted_value: Air.Inst.Index,
 ) !void {
     if (ptr.ty.isConstPtr())
         return sema.mod.fail(&block.base, src, "cannot assign to constant", .{});
@@ -7076,7 +7085,7 @@ fn storePtr(
     _ = try block.addBinOp(src, Type.initTag(.void), .store, ptr, value);
 }
 
-fn bitcast(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst) !*Inst {
+fn bitcast(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index {
     if (inst.value()) |val| {
         // Keep the comptime Value representation; take the new type.
         return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val });
@@ -7086,7 +7095,7 @@ fn bitcast(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst) !*Ins
     return block.addUnOp(inst.src, dest_type, .bitcast, inst);
 }
 
-fn coerceArrayPtrToSlice(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst) !*Inst {
+fn coerceArrayPtrToSlice(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index {
     if (inst.value()) |val| {
         // The comptime Value representation is compatible with both types.
         return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val });
@@ -7094,7 +7103,7 @@ fn coerceArrayPtrToSlice(sema: *Sema, block: *Scope.Block, dest_type: Type, inst
     return sema.mod.fail(&block.base, inst.src, "TODO implement coerceArrayPtrToSlice runtime instruction", .{});
 }
 
-fn coerceArrayPtrToMany(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst) !*Inst {
+fn coerceArrayPtrToMany(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index {
     if (inst.value()) |val| {
         // The comptime Value representation is compatible with both types.
         return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val });
@@ -7102,12 +7111,12 @@ fn coerceArrayPtrToMany(sema: *Sema, block: *Scope.Block, dest_type: Type, inst:
     return sema.mod.fail(&block.base, inst.src, "TODO implement coerceArrayPtrToMany runtime instruction", .{});
 }
 
-fn analyzeDeclVal(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) InnerError!*Inst {
+fn analyzeDeclVal(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) InnerError!Air.Inst.Index {
     const decl_ref = try sema.analyzeDeclRef(block, src, decl);
     return sema.analyzeLoad(block, src, decl_ref, src);
 }
 
-fn analyzeDeclRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) InnerError!*Inst {
+fn analyzeDeclRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) InnerError!Air.Inst.Index {
     try sema.mod.declareDeclDependency(sema.owner_decl, decl);
     sema.mod.ensureDeclAnalyzed(decl) catch |err| {
         if (sema.func) |func| {
@@ -7128,7 +7137,7 @@ fn analyzeDeclRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl
     });
 }
 
-fn analyzeVarRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, tv: TypedValue) InnerError!*Inst {
+fn analyzeVarRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, tv: TypedValue) InnerError!Air.Inst.Index {
     const variable = tv.val.castTag(.variable).?.data;
 
     const ty = try sema.mod.simplePtrType(sema.arena, tv.ty, variable.is_mutable, .One);
@@ -7157,8 +7166,8 @@ fn analyzeRef(
     sema: *Sema,
     block: *Scope.Block,
     src: LazySrcLoc,
-    operand: *Inst,
-) InnerError!*Inst {
+    operand: Air.Inst.Index,
+) InnerError!Air.Inst.Index {
     const ptr_type = try sema.mod.simplePtrType(sema.arena, operand.ty, false, .One);
 
     if (try sema.resolvePossiblyUndefinedValue(block, src, operand)) |val| {
@@ -7176,9 +7185,9 @@ fn analyzeLoad(
     sema: *Sema,
     block: *Scope.Block,
     src: LazySrcLoc,
-    ptr: *Inst,
+    ptr: Air.Inst.Index,
     ptr_src: LazySrcLoc,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const elem_ty = switch (ptr.ty.zigTypeTag()) {
         .Pointer => ptr.ty.elemType(),
         else => return sema.mod.fail(&block.base, ptr_src, "expected pointer, found '{}'", .{ptr.ty}),
@@ -7201,9 +7210,9 @@ fn analyzeIsNull(
     sema: *Sema,
     block: *Scope.Block,
     src: LazySrcLoc,
-    operand: *Inst,
+    operand: Air.Inst.Index,
     invert_logic: bool,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const result_ty = Type.initTag(.bool);
     if (try sema.resolvePossiblyUndefinedValue(block, src, operand)) |opt_val| {
         if (opt_val.isUndef()) {
@@ -7222,8 +7231,8 @@ fn analyzeIsNonErr(
     sema: *Sema,
     block: *Scope.Block,
     src: LazySrcLoc,
-    operand: *Inst,
-) InnerError!*Inst {
+    operand: Air.Inst.Index,
+) InnerError!Air.Inst.Index {
     const ot = operand.ty.zigTypeTag();
     if (ot != .ErrorSet and ot != .ErrorUnion) return sema.mod.constBool(sema.arena, src, true);
     if (ot == .ErrorSet) return sema.mod.constBool(sema.arena, src, false);
@@ -7243,12 +7252,12 @@ fn analyzeSlice(
     sema: *Sema,
     block: *Scope.Block,
     src: LazySrcLoc,
-    array_ptr: *Inst,
-    start: *Inst,
-    end_opt: ?*Inst,
-    sentinel_opt: ?*Inst,
+    array_ptr: Air.Inst.Index,
+    start: Air.Inst.Index,
+    end_opt: ?Air.Inst.Index,
+    sentinel_opt: ?Air.Inst.Index,
     sentinel_src: LazySrcLoc,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     const ptr_child = switch (array_ptr.ty.zigTypeTag()) {
         .Pointer => array_ptr.ty.elemType(),
         else => return sema.mod.fail(&block.base, src, "expected pointer, found '{}'", .{array_ptr.ty}),
@@ -7319,10 +7328,10 @@ fn cmpNumeric(
     sema: *Sema,
     block: *Scope.Block,
     src: LazySrcLoc,
-    lhs: *Inst,
-    rhs: *Inst,
+    lhs: Air.Inst.Index,
+    rhs: Air.Inst.Index,
     op: std.math.CompareOperator,
-) InnerError!*Inst {
+) InnerError!Air.Inst.Index {
     assert(lhs.ty.isNumeric());
     assert(rhs.ty.isNumeric());
 
@@ -7488,7 +7497,7 @@ fn cmpNumeric(
     return block.addBinOp(src, Type.initTag(.bool), Inst.Tag.fromCmpOp(op), casted_lhs, casted_rhs);
 }
 
-fn wrapOptional(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst) !*Inst {
+fn wrapOptional(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index {
     if (inst.value()) |val| {
         return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val });
     }
@@ -7497,7 +7506,7 @@ fn wrapOptional(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst)
     return block.addUnOp(inst.src, dest_type, .wrap_optional, inst);
 }
 
-fn wrapErrorUnion(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst) !*Inst {
+fn wrapErrorUnion(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index {
     const err_union = dest_type.castTag(.error_union).?;
     if (inst.value()) |val| {
         if (inst.ty.zigTypeTag() != .ErrorSet) {
@@ -7568,7 +7577,7 @@ fn wrapErrorUnion(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst
     }
 }
 
-fn resolvePeerTypes(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, instructions: []*Inst) !Type {
+fn resolvePeerTypes(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, instructions: []Air.Inst.Index) !Type {
     if (instructions.len == 0)
         return Type.initTag(.noreturn);
 
@@ -7704,7 +7713,7 @@ fn getBuiltin(
     block: *Scope.Block,
     src: LazySrcLoc,
     name: []const u8,
-) InnerError!*ir.Inst {
+) InnerError!Air.Inst.Index {
     const mod = sema.mod;
     const std_pkg = mod.root_pkg.table.get("std").?;
     const std_file = (mod.importPkg(std_pkg) catch unreachable).file;
BRANCH_TODO
@@ -1,24 +1,6 @@
  * be sure to test debug info of parameters
 
 
-    /// Each bit represents the index of an `Inst` parameter in the `args` field.
-    /// If a bit is set, it marks the end of the lifetime of the corresponding
-    /// instruction parameter. For example, 0b101 means that the first and
-    /// third `Inst` parameters' lifetimes end after this instruction, and will
-    /// not have any more following references.
-    /// The most significant bit being set means that the instruction itself is
-    /// never referenced, in other words its lifetime ends as soon as it finishes.
-    /// If bit 15 (0b1xxx_xxxx_xxxx_xxxx) is set, it means this instruction itself is unreferenced.
-    /// If bit 14 (0bx1xx_xxxx_xxxx_xxxx) is set, it means this is a special case and the
-    /// lifetimes of operands are encoded elsewhere.
-    deaths: DeathsInt = undefined,
-
-
-    pub const DeathsInt = u16;
-    pub const DeathsBitIndex = std.math.Log2Int(DeathsInt);
-    pub const unreferenced_bit_index = @typeInfo(DeathsInt).Int.bits - 1;
-    pub const deaths_bits = unreferenced_bit_index - 1;
-
     pub fn isUnused(self: Inst) bool {
         return (self.deaths & (1 << unreferenced_bit_index)) != 0;
     }
@@ -115,32 +97,6 @@
 
 
 
-    pub const Assembly = struct {
-        pub const base_tag = Tag.assembly;
-
-        base: Inst,
-        asm_source: []const u8,
-        is_volatile: bool,
-        output_constraint: ?[]const u8,
-        inputs: []const []const u8,
-        clobbers: []const []const u8,
-        args: []const *Inst,
-
-        pub fn operandCount(self: *const Assembly) usize {
-            return self.args.len;
-        }
-        pub fn getOperand(self: *const Assembly, index: usize) ?*Inst {
-            if (index < self.args.len)
-                return self.args[index];
-            return null;
-        }
-    };
-
-    pub const StructFieldPtr = struct {
-        struct_ptr: *Inst,
-        field_index: usize,
-    };
-
 
 /// For debugging purposes, prints a function representation to stderr.
 pub fn dumpFn(old_module: Module, module_fn: *Module.Fn) void {