Commit 4168b01e7a

joachimschmidt557 <joachim.schmidt557@outlook.com>
2021-11-10 19:47:56
stage2 AArch64: implement airCondBr
1 parent a5a012e
Changed files (4)
src
test
src/arch/aarch64/CodeGen.zig
@@ -177,7 +177,7 @@ const StackAllocation = struct {
 };
 
 const BlockData = struct {
-    relocs: std.ArrayListUnmanaged(Reloc),
+    relocs: std.ArrayListUnmanaged(Mir.Inst.Index),
     /// The first break instruction encounters `null` here and chooses a
     /// machine code value for the block result, populating this field.
     /// Following break instructions encounter that value and use it for
@@ -185,18 +185,6 @@ const BlockData = struct {
     mcv: MCValue,
 };
 
-const Reloc = union(enum) {
-    /// The value is an offset into the `Function` `code` from the beginning.
-    /// To perform the reloc, write 32-bit signed little-endian integer
-    /// which is a relative jump, based on the address following the reloc.
-    rel32: usize,
-    /// A branch in the ARM instruction set
-    arm_branch: struct {
-        pos: usize,
-        cond: @import("../arm/bits.zig").Condition,
-    },
-};
-
 const BigTomb = struct {
     function: *Self,
     inst: Air.Inst.Index,
@@ -426,6 +414,12 @@ fn gen(self: *Self) !void {
             });
         }
 
+        // add sp, sp, #stack_size
+        _ = try self.addInst(.{
+            .tag = .add_immediate,
+            .data = .{ .rr_imm12_sh = .{ .rd = .xzr, .rn = .xzr, .imm12 = @intCast(u12, aligned_stack_end) } },
+        });
+
         // ldp fp, lr, [sp], #16
         _ = try self.addInst(.{
             .tag = .ldp,
@@ -437,12 +431,6 @@ fn gen(self: *Self) !void {
             } },
         });
 
-        // add sp, sp, #stack_size
-        _ = try self.addInst(.{
-            .tag = .add_immediate,
-            .data = .{ .rr_imm12_sh = .{ .rd = .xzr, .rn = .xzr, .imm12 = @intCast(u12, aligned_stack_end) } },
-        });
-
         // ret lr
         _ = try self.addInst(.{
             .tag = .ret,
@@ -1358,7 +1346,9 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
             const stack_offset = try self.allocMem(inst, abi_size, abi_align);
             try self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
 
-            break :blk MCValue{ .stack_offset = stack_offset };
+            // TODO correct loading and storing from memory
+            // break :blk MCValue{ .stack_offset = stack_offset };
+            break :blk result;
         },
         else => result,
     };
@@ -1734,9 +1724,153 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void {
 }
 
 fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
-    _ = inst;
+    const pl_op = self.air.instructions.items(.data)[inst].pl_op;
+    const cond = try self.resolveInst(pl_op.operand);
+    const extra = self.air.extraData(Air.CondBr, pl_op.payload);
+    const then_body = self.air.extra[extra.end..][0..extra.data.then_body_len];
+    const else_body = self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len];
+    const liveness_condbr = self.liveness.getCondBr(inst);
+
+    const reloc: Mir.Inst.Index = switch (cond) {
+        .compare_flags_signed,
+        .compare_flags_unsigned,
+        => try self.addInst(.{
+            .tag = .b_cond,
+            .data = .{
+                .inst_cond = .{
+                    .inst = undefined, // populated later through performReloc
+                    .cond = switch (cond) {
+                        .compare_flags_signed => |cmp_op| blk: {
+                            // Here we map to the opposite condition because the jump is to the false branch.
+                            const condition = Instruction.Condition.fromCompareOperatorSigned(cmp_op);
+                            break :blk condition.negate();
+                        },
+                        .compare_flags_unsigned => |cmp_op| blk: {
+                            // Here we map to the opposite condition because the jump is to the false branch.
+                            const condition = Instruction.Condition.fromCompareOperatorUnsigned(cmp_op);
+                            break :blk condition.negate();
+                        },
+                        else => unreachable,
+                    },
+                },
+            },
+        }),
+        else => return self.fail("TODO implement condr when condition is {s}", .{@tagName(cond)}),
+    };
+
+    // Capture the state of register and stack allocation state so that we can revert to it.
+    const parent_next_stack_offset = self.next_stack_offset;
+    const parent_free_registers = self.register_manager.free_registers;
+    var parent_stack = try self.stack.clone(self.gpa);
+    defer parent_stack.deinit(self.gpa);
+    const parent_registers = self.register_manager.registers;
 
-    return self.fail("TODO implement condbr {}", .{self.target.cpu.arch});
+    try self.branch_stack.append(.{});
+
+    try self.ensureProcessDeathCapacity(liveness_condbr.then_deaths.len);
+    for (liveness_condbr.then_deaths) |operand| {
+        self.processDeath(operand);
+    }
+    try self.genBody(then_body);
+
+    // Revert to the previous register and stack allocation state.
+
+    var saved_then_branch = self.branch_stack.pop();
+    defer saved_then_branch.deinit(self.gpa);
+
+    self.register_manager.registers = parent_registers;
+
+    self.stack.deinit(self.gpa);
+    self.stack = parent_stack;
+    parent_stack = .{};
+
+    self.next_stack_offset = parent_next_stack_offset;
+    self.register_manager.free_registers = parent_free_registers;
+
+    try self.performReloc(reloc);
+    const else_branch = self.branch_stack.addOneAssumeCapacity();
+    else_branch.* = .{};
+
+    try self.ensureProcessDeathCapacity(liveness_condbr.else_deaths.len);
+    for (liveness_condbr.else_deaths) |operand| {
+        self.processDeath(operand);
+    }
+    try self.genBody(else_body);
+
+    // At this point, each branch will possibly have conflicting values for where
+    // each instruction is stored. They agree, however, on which instructions are alive/dead.
+    // We use the first ("then") branch as canonical, and here emit
+    // instructions into the second ("else") branch to make it conform.
+    // We continue respect the data structure semantic guarantees of the else_branch so
+    // that we can use all the code emitting abstractions. This is why at the bottom we
+    // assert that parent_branch.free_registers equals the saved_then_branch.free_registers
+    // rather than assigning it.
+    const parent_branch = &self.branch_stack.items[self.branch_stack.items.len - 2];
+    try parent_branch.inst_table.ensureUnusedCapacity(self.gpa, else_branch.inst_table.count());
+
+    const else_slice = else_branch.inst_table.entries.slice();
+    const else_keys = else_slice.items(.key);
+    const else_values = else_slice.items(.value);
+    for (else_keys) |else_key, else_idx| {
+        const else_value = else_values[else_idx];
+        const canon_mcv = if (saved_then_branch.inst_table.fetchSwapRemove(else_key)) |then_entry| blk: {
+            // The instruction's MCValue is overridden in both branches.
+            parent_branch.inst_table.putAssumeCapacity(else_key, then_entry.value);
+            if (else_value == .dead) {
+                assert(then_entry.value == .dead);
+                continue;
+            }
+            break :blk then_entry.value;
+        } else blk: {
+            if (else_value == .dead)
+                continue;
+            // The instruction is only overridden in the else branch.
+            var i: usize = self.branch_stack.items.len - 2;
+            while (true) {
+                i -= 1; // If this overflows, the question is: why wasn't the instruction marked dead?
+                if (self.branch_stack.items[i].inst_table.get(else_key)) |mcv| {
+                    assert(mcv != .dead);
+                    break :blk mcv;
+                }
+            }
+        };
+        log.debug("consolidating else_entry {d} {}=>{}", .{ else_key, else_value, canon_mcv });
+        // TODO make sure the destination stack offset / register does not already have something
+        // going on there.
+        try self.setRegOrMem(self.air.typeOfIndex(else_key), canon_mcv, else_value);
+        // TODO track the new register / stack allocation
+    }
+    try parent_branch.inst_table.ensureUnusedCapacity(self.gpa, saved_then_branch.inst_table.count());
+    const then_slice = saved_then_branch.inst_table.entries.slice();
+    const then_keys = then_slice.items(.key);
+    const then_values = then_slice.items(.value);
+    for (then_keys) |then_key, then_idx| {
+        const then_value = then_values[then_idx];
+        // We already deleted the items from this table that matched the else_branch.
+        // So these are all instructions that are only overridden in the then branch.
+        parent_branch.inst_table.putAssumeCapacity(then_key, then_value);
+        if (then_value == .dead)
+            continue;
+        const parent_mcv = blk: {
+            var i: usize = self.branch_stack.items.len - 2;
+            while (true) {
+                i -= 1;
+                if (self.branch_stack.items[i].inst_table.get(then_key)) |mcv| {
+                    assert(mcv != .dead);
+                    break :blk mcv;
+                }
+            }
+        };
+        log.debug("consolidating then_entry {d} {}=>{}", .{ then_key, parent_mcv, then_value });
+        // TODO make sure the destination stack offset / register does not already have something
+        // going on there.
+        try self.setRegOrMem(self.air.typeOfIndex(then_key), parent_mcv, then_value);
+        // TODO track the new register / stack allocation
+    }
+
+    self.branch_stack.pop().deinit(self.gpa);
+
+    return self.finishAir(inst, .unreach, .{ pl_op.operand, .none, .none });
 }
 
 fn isNull(self: *Self, operand: MCValue) !MCValue {
@@ -1927,10 +2061,12 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void {
     return self.fail("TODO airSwitch for {}", .{self.target.cpu.arch});
 }
 
-fn performReloc(self: *Self, reloc: Reloc) !void {
-    switch (reloc) {
-        .rel32 => return self.fail("TODO reloc.rel32 for {}", .{self.target.cpu.arch}),
-        .arm_branch => return self.fail("TODO reloc.arm_branch for {}", .{self.target.cpu.arch}),
+fn performReloc(self: *Self, inst: Mir.Inst.Index) !void {
+    const tag = self.mir_instructions.items(.tag)[inst];
+    switch (tag) {
+        .b_cond => self.mir_instructions.items(.data)[inst].inst_cond.inst = @intCast(Air.Inst.Index, self.mir_instructions.len),
+        .b => self.mir_instructions.items(.data)[inst].inst = @intCast(Air.Inst.Index, self.mir_instructions.len),
+        else => unreachable,
     }
 }
 
@@ -1970,7 +2106,10 @@ fn brVoid(self: *Self, block: Air.Inst.Index) !void {
     // Emit a jump with a relocation. It will be patched up after the block ends.
     try block_data.relocs.ensureUnusedCapacity(self.gpa, 1);
 
-    return self.fail("TODO implement brvoid for {}", .{self.target.cpu.arch});
+    block_data.relocs.appendAssumeCapacity(try self.addInst(.{
+        .tag = .b,
+        .data = .{ .inst = undefined }, // populated later through performReloc
+    }));
 }
 
 fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
@@ -2117,8 +2256,6 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro
             return self.fail("TODO implement set stack variable from embedded_in_code", .{});
         },
         .register => |reg| {
-            _ = reg;
-
             const abi_size = ty.abiSize(self.target.*);
             const adj_off = stack_offset + abi_size;
 
src/arch/aarch64/Emit.zig
@@ -14,6 +14,7 @@ const DW = std.dwarf;
 const leb128 = std.leb;
 const Instruction = bits.Instruction;
 const Register = bits.Register;
+const log = std.log.scoped(.aarch64_emit);
 const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput;
 
 mir: Mir,
@@ -47,9 +48,16 @@ const InnerError = error{
 };
 
 const BranchType = enum {
+    b_cond,
     unconditional_branch_immediate,
 
-    const default = BranchType.unconditional_branch_immediate;
+    fn default(tag: Mir.Inst.Tag) BranchType {
+        return switch (tag) {
+            .b, .bl => .unconditional_branch_immediate,
+            .b_cond => .b_cond,
+            else => unreachable,
+        };
+    }
 };
 
 pub fn emitMir(
@@ -68,6 +76,8 @@ pub fn emitMir(
             .cmp_immediate => try emit.mirAddSubtractImmediate(inst),
             .sub_immediate => try emit.mirAddSubtractImmediate(inst),
 
+            .b_cond => try emit.mirConditionalBranchImmediate(inst),
+
             .b => try emit.mirBranch(inst),
             .bl => try emit.mirBranch(inst),
 
@@ -112,29 +122,50 @@ pub fn emitMir(
 }
 
 pub fn deinit(emit: *Emit) void {
+    var iter = emit.branch_forward_origins.valueIterator();
+    while (iter.next()) |origin_list| {
+        origin_list.deinit(emit.bin_file.allocator);
+    }
+
     emit.branch_types.deinit(emit.bin_file.allocator);
     emit.branch_forward_origins.deinit(emit.bin_file.allocator);
     emit.code_offset_mapping.deinit(emit.bin_file.allocator);
     emit.* = undefined;
 }
 
-fn optimalBranchType(emit: *Emit, offset: i64) !BranchType {
+fn optimalBranchType(emit: *Emit, tag: Mir.Inst.Tag, offset: i64) !BranchType {
     assert(offset & 0b11 == 0);
 
-    // TODO handle conditional branches
-    if (std.math.cast(i26, offset >> 2)) |_| {
-        return BranchType.unconditional_branch_immediate;
-    } else |_| {
-        return emit.fail("TODO support branches larger than +-128 MiB", .{});
+    switch (tag) {
+        .b, .bl => {
+            if (std.math.cast(i26, offset >> 2)) |_| {
+                return BranchType.unconditional_branch_immediate;
+            } else |_| {
+                return emit.fail("TODO support branches larger than +-128 MiB", .{});
+            }
+        },
+        .b_cond => {
+            if (std.math.cast(i19, offset >> 2)) |_| {
+                return BranchType.b_cond;
+            } else |_| {
+                return emit.fail("TODO support conditional branches larger than +-1 MiB", .{});
+            }
+        },
+        else => unreachable,
     }
 }
 
 fn instructionSize(emit: *Emit, inst: Mir.Inst.Index) usize {
     const tag = emit.mir.instructions.items(.tag)[inst];
-    switch (tag) {
-        .b, .bl => switch (emit.branch_types.get(inst).?) {
+
+    if (isBranch(tag)) {
+        switch (emit.branch_types.get(inst).?) {
             .unconditional_branch_immediate => return 4,
-        },
+            .b_cond => return 4,
+        }
+    }
+
+    switch (tag) {
         .load_memory => {
             if (emit.bin_file.options.pie) {
                 // adrp, ldr
@@ -151,10 +182,32 @@ fn instructionSize(emit: *Emit, inst: Mir.Inst.Index) usize {
                 return 5 * 4;
             }
         },
+        .call_extern => return 4,
+        .dbg_line,
+        .dbg_epilogue_begin,
+        .dbg_prologue_end,
+        => return 0,
         else => return 4,
     }
 }
 
+fn isBranch(tag: Mir.Inst.Tag) bool {
+    return switch (tag) {
+        .b, .bl, .b_cond => true,
+        else => false,
+    };
+}
+
+fn branchTarget(emit: *Emit, inst: Mir.Inst.Index) Mir.Inst.Index {
+    const tag = emit.mir.instructions.items(.tag)[inst];
+
+    switch (tag) {
+        .b, .bl => return emit.mir.instructions.items(.data)[inst].inst,
+        .b_cond => return emit.mir.instructions.items(.data)[inst].inst_cond.inst,
+        else => unreachable,
+    }
+}
+
 fn lowerBranches(emit: *Emit) !void {
     const mir_tags = emit.mir.instructions.items(.tag);
     const allocator = emit.bin_file.allocator;
@@ -167,41 +220,38 @@ fn lowerBranches(emit: *Emit) !void {
     // generating MIR
     for (mir_tags) |tag, index| {
         const inst = @intCast(u32, index);
-        switch (tag) {
-            .b, .bl => {
-                const target_inst = emit.mir.instructions.items(.data)[inst].inst;
-
-                // Remember this branch instruction
-                try emit.branch_types.put(allocator, inst, BranchType.default);
-
-                // Forward branches require some extra stuff: We only
-                // know their offset once we arrive at the target
-                // instruction. Therefore, we need to be able to
-                // access the branch instruction when we visit the
-                // target instruction in order to manipulate its type
-                // etc.
-                if (target_inst > inst) {
-                    // Remember the branch instruction index
-                    try emit.code_offset_mapping.put(allocator, inst, 0);
-
-                    if (emit.branch_forward_origins.getPtr(target_inst)) |origin_list| {
-                        try origin_list.append(allocator, inst);
-                    } else {
-                        var origin_list: std.ArrayListUnmanaged(Mir.Inst.Index) = .{};
-                        try origin_list.append(allocator, inst);
-                        try emit.branch_forward_origins.put(allocator, target_inst, origin_list);
-                    }
+        if (isBranch(tag)) {
+            const target_inst = emit.branchTarget(inst);
+
+            // Remember this branch instruction
+            try emit.branch_types.put(allocator, inst, BranchType.default(tag));
+
+            // Forward branches require some extra stuff: We only
+            // know their offset once we arrive at the target
+            // instruction. Therefore, we need to be able to
+            // access the branch instruction when we visit the
+            // target instruction in order to manipulate its type
+            // etc.
+            if (target_inst > inst) {
+                // Remember the branch instruction index
+                try emit.code_offset_mapping.put(allocator, inst, 0);
+
+                if (emit.branch_forward_origins.getPtr(target_inst)) |origin_list| {
+                    try origin_list.append(allocator, inst);
+                } else {
+                    var origin_list: std.ArrayListUnmanaged(Mir.Inst.Index) = .{};
+                    try origin_list.append(allocator, inst);
+                    try emit.branch_forward_origins.put(allocator, target_inst, origin_list);
                 }
+            }
 
-                // Remember the target instruction index so that we
-                // update the real code offset in all future passes
-                //
-                // putNoClobber may not be used as the put operation
-                // may clobber the entry when multiple branches branch
-                // to the same target instruction
-                try emit.code_offset_mapping.put(allocator, target_inst, 0);
-            },
-            else => {}, // not a branch
+            // Remember the target instruction index so that we
+            // update the real code offset in all future passes
+            //
+            // putNoClobber may not be used as the put operation
+            // may clobber the entry when multiple branches branch
+            // to the same target instruction
+            try emit.code_offset_mapping.put(allocator, target_inst, 0);
         }
     }
 
@@ -225,21 +275,20 @@ fn lowerBranches(emit: *Emit) !void {
 
             // If this instruction is a backward branch, calculate the
             // offset, which may potentially update the branch type
-            switch (tag) {
-                .b, .bl => {
-                    const target_inst = emit.mir.instructions.items(.data)[inst].inst;
-                    if (target_inst < inst) {
-                        const target_offset = emit.code_offset_mapping.get(target_inst).?;
-                        const offset = @intCast(i64, target_offset) - @intCast(i64, current_code_offset + 8);
-                        const branch_type = emit.branch_types.getPtr(inst).?;
-                        const optimal_branch_type = try emit.optimalBranchType(offset);
-                        if (branch_type.* != optimal_branch_type) {
-                            branch_type.* = optimal_branch_type;
-                            all_branches_lowered = false;
-                        }
+            if (isBranch(tag)) {
+                const target_inst = emit.branchTarget(inst);
+                if (target_inst < inst) {
+                    const target_offset = emit.code_offset_mapping.get(target_inst).?;
+                    const offset = @intCast(i64, target_offset) - @intCast(i64, current_code_offset);
+                    const branch_type = emit.branch_types.getPtr(inst).?;
+                    const optimal_branch_type = try emit.optimalBranchType(tag, offset);
+                    if (branch_type.* != optimal_branch_type) {
+                        branch_type.* = optimal_branch_type;
+                        all_branches_lowered = false;
                     }
-                },
-                else => {},
+
+                    log.debug("lowerBranches: branch {} has offset {}", .{ inst, offset });
+                }
             }
 
             // If this instruction is the target of one or more
@@ -247,14 +296,17 @@ fn lowerBranches(emit: *Emit) !void {
             // potentially update the branch type
             if (emit.branch_forward_origins.get(inst)) |origin_list| {
                 for (origin_list.items) |forward_branch_inst| {
+                    const branch_tag = emit.mir.instructions.items(.tag)[forward_branch_inst];
                     const forward_branch_inst_offset = emit.code_offset_mapping.get(forward_branch_inst).?;
-                    const offset = @intCast(i64, forward_branch_inst_offset) - @intCast(i64, current_code_offset + 8);
+                    const offset = @intCast(i64, current_code_offset) - @intCast(i64, forward_branch_inst_offset);
                     const branch_type = emit.branch_types.getPtr(forward_branch_inst).?;
-                    const optimal_branch_type = try emit.optimalBranchType(offset);
+                    const optimal_branch_type = try emit.optimalBranchType(branch_tag, offset);
                     if (branch_type.* != optimal_branch_type) {
                         branch_type.* = optimal_branch_type;
                         all_branches_lowered = false;
                     }
+
+                    log.debug("lowerBranches: branch {} has offset {}", .{ forward_branch_inst, offset });
                 }
             }
 
@@ -368,12 +420,37 @@ fn mirAddSubtractImmediate(emit: *Emit, inst: Mir.Inst.Index) !void {
     }
 }
 
+fn mirConditionalBranchImmediate(emit: *Emit, inst: Mir.Inst.Index) !void {
+    const tag = emit.mir.instructions.items(.tag)[inst];
+    const inst_cond = emit.mir.instructions.items(.data)[inst].inst_cond;
+
+    const offset = @intCast(i64, emit.code_offset_mapping.get(inst_cond.inst).?) - @intCast(i64, emit.code.items.len);
+    const branch_type = emit.branch_types.get(inst).?;
+    log.debug("mirConditionalBranchImmediate: {} offset={}", .{ inst, offset });
+
+    switch (branch_type) {
+        .b_cond => switch (tag) {
+            .b_cond => try emit.writeInstruction(Instruction.bCond(inst_cond.cond, @intCast(i21, offset))),
+            else => unreachable,
+        },
+        else => unreachable,
+    }
+}
+
 fn mirBranch(emit: *Emit, inst: Mir.Inst.Index) !void {
     const tag = emit.mir.instructions.items(.tag)[inst];
     const target_inst = emit.mir.instructions.items(.data)[inst].inst;
 
-    const offset = @intCast(i64, emit.code_offset_mapping.get(target_inst).?) - @intCast(i64, emit.code.items.len + 8);
+    log.debug("branch {}(tag: {}) -> {}(tag: {})", .{
+        inst,
+        tag,
+        target_inst,
+        emit.mir.instructions.items(.tag)[target_inst],
+    });
+
+    const offset = @intCast(i64, emit.code_offset_mapping.get(target_inst).?) - @intCast(i64, emit.code.items.len);
     const branch_type = emit.branch_types.get(inst).?;
+    log.debug("mirBranch: {} offset={}", .{ inst, offset });
 
     switch (branch_type) {
         .unconditional_branch_immediate => switch (tag) {
@@ -381,6 +458,7 @@ fn mirBranch(emit: *Emit, inst: Mir.Inst.Index) !void {
             .bl => try emit.writeInstruction(Instruction.bl(@intCast(i28, offset))),
             else => unreachable,
         },
+        else => unreachable,
     }
 }
 
src/arch/aarch64/Mir.zig
@@ -26,6 +26,8 @@ pub const Inst = struct {
     pub const Tag = enum(u16) {
         /// Add (immediate)
         add_immediate,
+        /// Branch conditionally
+        b_cond,
         /// Branch
         b,
         /// Branch with Link
@@ -48,7 +50,7 @@ pub const Inst = struct {
         dbg_epilogue_begin,
         /// Pseudo-instruction: Update debug line
         dbg_line,
-        /// Psuedo-instruction: Load memory
+        /// Pseudo-instruction: Load memory
         ///
         /// Payload is `LoadMemory`
         load_memory,
@@ -103,7 +105,7 @@ pub const Inst = struct {
         ///
         /// Used by e.g. nop
         nop: void,
-        /// Another instruction.
+        /// Another instruction
         ///
         /// Used by e.g. b
         inst: Index,
@@ -123,6 +125,13 @@ pub const Inst = struct {
         ///
         /// Used by e.g. blr
         reg: Register,
+        /// Another instruction and a condition
+        ///
+        /// Used by e.g. b_cond
+        inst_cond: struct {
+            inst: Index,
+            cond: bits.Instruction.Condition,
+        },
         /// A register, an unsigned 16-bit immediate, and an optional shift
         ///
         /// Used by e.g. movz
test/stage2/aarch64.zig
@@ -68,4 +68,33 @@ pub fn addCases(ctx: *TestContext) !void {
             "",
         );
     }
+
+    {
+        var case = ctx.exe("conditional branches", linux_aarch64);
+
+        case.addCompareOutput(
+            \\pub fn main() void {
+            \\    foo(123);
+            \\}
+            \\
+            \\fn foo(x: u64) void {
+            \\    if (x > 42) {
+            \\        print();
+            \\    }
+            \\}
+            \\
+            \\fn print() void {
+            \\    asm volatile ("svc #0"
+            \\        :
+            \\        : [number] "{x8}" (64),
+            \\          [arg1] "{x0}" (1),
+            \\          [arg2] "{x1}" (@ptrToInt("Hello, World!\n")),
+            \\          [arg3] "{x2}" ("Hello, World!\n".len),
+            \\        : "memory", "cc"
+            \\    );
+            \\}
+        ,
+            "Hello, World!\n",
+        );
+    }
 }