Commit decff51238
Changed files (2)
src
codegen
spirv
src/codegen/spirv/Section.zig
@@ -65,6 +65,16 @@ pub fn emit(
section.writeOperands(opcode.Operands(), operands);
}
+pub fn emitBranch(
+ section: *Section,
+ allocator: Allocator,
+ target_label: spec.IdRef,
+) !void {
+ try section.emit(allocator, .OpBranch, .{
+ .target_label = target_label,
+ });
+}
+
pub fn emitSpecConstantOp(
section: *Section,
allocator: Allocator,
src/codegen/spirv.zig
@@ -40,17 +40,109 @@ const SpvTypeInfo = struct {
const TypeMap = std.AutoHashMapUnmanaged(InternPool.Index, SpvTypeInfo);
-const IncomingBlock = struct {
- src_label_id: IdRef,
- break_value_id: IdRef,
-};
+const ControlFlow = union(enum) {
+ const Structured = struct {
+ /// This type indicates the way that a block is terminated. The
+ /// state of a particular block is used to track how a jump from
+ /// inside the block must reach the outside.
+ const Block = union(enum) {
+ const Incoming = struct {
+ src_label: IdRef,
+ /// Instruction that returns an u32 value of the
+ /// `Air.Inst.Index` that control flow should jump to.
+ next_block: IdRef,
+ };
-const Block = struct {
- label_id: ?IdRef,
- incoming_blocks: std.ArrayListUnmanaged(IncomingBlock),
-};
+ const SelectionMerge = struct {
+ /// Incoming block from the `then` label.
+ /// Note that hte incoming block from the `else` label is
+ /// either given by the next element in the stack.
+ incoming: Incoming,
+ /// The label id of the cond_br's merge block.
+ /// For the top-most element in the stack, this
+ /// value is undefined.
+ merge_block: IdRef,
+ };
+
+ /// For a `selection` type block, we cannot use early exits, and we
+ /// must generate a 'merge ladder' of OpSelection instructions. To that end,
+ /// we keep a stack of the merges that still must be closed at the end of
+ /// a block.
+ ///
+ /// This entire structure basically just resembles a tree like
+ /// a x
+ /// \ /
+ /// b o merge
+ /// \ /
+ /// c o merge
+ /// \ /
+ /// o merge
+ /// /
+ /// o jump to next block
+ selection: struct {
+ /// In order to know which merges we still need to do, we need to keep
+ /// a stack of those.
+ merge_stack: std.ArrayListUnmanaged(SelectionMerge) = .{},
+ },
+ /// For a `loop` type block, we can early-exit the block by
+ /// jumping to the loop exit node, and we don't need to generate
+ /// an entire stack of merges.
+ loop: struct {
+ /// The next block to jump to can be determined from any number
+ /// of conditions that jump to the loop exit.
+ merges: std.ArrayListUnmanaged(Incoming) = .{},
+ /// The label id of the loop's merge block.
+ merge_block: IdRef,
+ },
+
+ fn deinit(self: *Structured.Block, a: Allocator) void {
+ switch (self.*) {
+ .selection => |*merge| merge.merge_stack.deinit(a),
+ .loop => |*merge| merge.merges.deinit(a),
+ }
+ self.* = undefined;
+ }
+ };
+ /// The stack of (structured) blocks that we are currently in. This determines
+ /// how exits from the current block must be handled.
+ block_stack: std.ArrayListUnmanaged(*Structured.Block) = .{},
+ /// Maps `block` inst indices to the variable that the block's result
+ /// value must be written to.
+ block_results: std.AutoHashMapUnmanaged(Air.Inst.Index, IdRef) = .{},
+ };
-const BlockMap = std.AutoHashMapUnmanaged(Air.Inst.Index, *Block);
+ const Unstructured = struct {
+ const Incoming = struct {
+ src_label: IdRef,
+ break_value_id: IdRef,
+ };
+
+ const Block = struct {
+ label: ?IdRef = null,
+ incoming_blocks: std.ArrayListUnmanaged(Incoming) = .{},
+ };
+
+ /// We need to keep track of result ids for block labels, as well as the 'incoming'
+ /// blocks for a block.
+ blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, *Block) = .{},
+ };
+
+ structured: Structured,
+ unstructured: Unstructured,
+
+ pub fn deinit(self: *ControlFlow, a: Allocator) void {
+ switch (self.*) {
+ .structured => |*cf| {
+ cf.block_stack.deinit(a);
+ cf.block_results.deinit(a);
+ },
+ .unstructured => |*cf| {
+ cf.blocks.deinit(a);
+ },
+ }
+ self.* = undefined;
+ }
+};
/// This structure holds information that is relevant to the entire compilation,
/// in contrast to `DeclGen`, which only holds relevant information about a
@@ -106,7 +198,6 @@ pub const Object = struct {
.opencl => mod.comp.bin_file.options.want_structured_cfg orelse false,
else => true,
};
- _ = want_structured_cfg;
var decl_gen = DeclGen{
.gpa = self.gpa,
@@ -117,7 +208,11 @@ pub const Object = struct {
.air = air,
.liveness = liveness,
.type_map = &self.type_map,
- .current_block_label_id = undefined,
+ .control_flow = switch (want_structured_cfg) {
+ true => .{ .structured = .{} },
+ false => .{ .unstructured = .{} },
+ },
+ .current_block_label = undefined,
};
defer decl_gen.deinit();
@@ -222,12 +317,11 @@ const DeclGen = struct {
/// is already in this map, its recursive.
wip_pointers: std.AutoHashMapUnmanaged(struct { InternPool.Index, StorageClass }, CacheRef) = .{},
- /// We need to keep track of result ids for block labels, as well as the 'incoming'
- /// blocks for a block.
- blocks: BlockMap = .{},
+ /// This field keeps track of the current state wrt structured or unstructured control flow.
+ control_flow: ControlFlow,
/// The label of the SPIR-V block we are currently generating.
- current_block_label_id: IdRef,
+ current_block_label: IdRef,
/// The code (prologue and body) for the function we are currently generating code for.
func: SpvModule.Fn = .{},
@@ -309,7 +403,7 @@ const DeclGen = struct {
self.args.deinit(self.gpa);
self.inst_results.deinit(self.gpa);
self.wip_pointers.deinit(self.gpa);
- self.blocks.deinit(self.gpa);
+ self.control_flow.deinit(self.gpa);
self.func.deinit(self.gpa);
self.base_line_stack.deinit(self.gpa);
}
@@ -393,10 +487,11 @@ const DeclGen = struct {
// TODO: This should probably be made a little more robust.
const func = self.func;
defer self.func = func;
- const block_label_id = self.current_block_label_id;
- defer self.current_block_label_id = block_label_id;
+ const block_label = self.current_block_label;
+ defer self.current_block_label = block_label;
self.func = .{};
+ defer self.func.deinit(self.gpa);
// TODO: Merge this with genDecl?
const begin = self.spv.beginGlobal();
@@ -418,7 +513,7 @@ const DeclGen = struct {
try self.func.prologue.emit(self.spv.gpa, .OpLabel, .{
.id_result = root_block_id,
});
- self.current_block_label_id = root_block_id;
+ self.current_block_label = root_block_id;
const val_id = try self.constant(ty, val.toValue(), .indirect);
try self.func.body.emit(self.spv.gpa, .OpStore, .{
@@ -441,9 +536,9 @@ const DeclGen = struct {
/// block we are currently generating.
/// Note that there is no such thing as nested blocks like in ZIR or AIR, so we don't need to
/// keep track of the previous block.
- fn beginSpvBlock(self: *DeclGen, label_id: IdResult) !void {
- try self.func.body.emit(self.spv.gpa, .OpLabel, .{ .id_result = label_id });
- self.current_block_label_id = label_id;
+ fn beginSpvBlock(self: *DeclGen, label: IdResult) !void {
+ try self.func.body.emit(self.spv.gpa, .OpLabel, .{ .id_result = label });
+ self.current_block_label = label;
}
/// SPIR-V requires enabling specific integer sizes through capabilities, and so if they are not enabled, we need
@@ -1792,13 +1887,22 @@ const DeclGen = struct {
try self.func.prologue.emit(self.spv.gpa, .OpLabel, .{
.id_result = root_block_id,
});
- self.current_block_label_id = root_block_id;
+ self.current_block_label = root_block_id;
const main_body = self.air.getMainBody();
- try self.genBody(main_body);
-
- // Append the actual code into the functions section.
+ switch (self.control_flow) {
+ .structured => {
+ _ = try self.genStructuredBody(.selection, main_body);
+ // We always expect paths to here to end, but we still need the block
+ // to act as a dummy merge block.
+ try self.func.body.emit(self.spv.gpa, .OpUnreachable, {});
+ },
+ .unstructured => {
+ try self.genBody(main_body);
+ },
+ }
try self.func.body.emit(self.spv.gpa, .OpFunctionEnd, {});
+ // Append the actual code into the functions section.
try self.spv.addFunction(spv_decl_index, self.func);
const fqn = ip.stringToSlice(try decl.getFullyQualifiedName(self.module));
@@ -1856,7 +1960,7 @@ const DeclGen = struct {
try self.func.prologue.emit(self.spv.gpa, .OpLabel, .{
.id_result = root_block_id,
});
- self.current_block_label_id = root_block_id;
+ self.current_block_label = root_block_id;
const val_id = try self.constant(decl.ty, init_val, .indirect);
try self.func.body.emit(self.spv.gpa, .OpStore, .{
@@ -3655,6 +3759,154 @@ const DeclGen = struct {
return self.args.items[self.next_arg_index];
}
+ /// Given a slice of incoming block connections, returns the block-id of the next
+ /// block to jump to. This function emits instructions, so it should be emitted
+ /// inside the merge block of the block.
+ /// This function should only be called with structured control flow generation.
+ fn structuredNextBlock(self: *DeclGen, incoming: []const ControlFlow.Structured.Block.Incoming) !IdRef {
+ assert(self.control_flow == .structured);
+
+ const result_id = self.spv.allocId();
+ const block_id_ty_ref = try self.intType(.unsigned, 32);
+ try self.func.body.emitRaw(self.spv.gpa, .OpPhi, @intCast(2 + incoming.len * 2)); // result type + result + variable/parent...
+ self.func.body.writeOperand(spec.IdResultType, self.typeId(block_id_ty_ref));
+ self.func.body.writeOperand(spec.IdRef, result_id);
+
+ for (incoming) |incoming_block| {
+ self.func.body.writeOperand(spec.PairIdRefIdRef, .{ incoming_block.next_block, incoming_block.src_label });
+ }
+
+ return result_id;
+ }
+
+ /// Jumps to the block with the target block-id. This function must only be called when
+ /// terminating a body, there should be no instructions after it.
+ /// This function should only be called with structured control flow generation.
+ fn structuredBreak(self: *DeclGen, target_block: IdRef) !void {
+ assert(self.control_flow == .structured);
+
+ const sblock = self.control_flow.structured.block_stack.getLast();
+ const merge_block = switch (sblock.*) {
+ .selection => |*merge| blk: {
+ const merge_label = self.spv.allocId();
+ try merge.merge_stack.append(self.gpa, .{
+ .incoming = .{
+ .src_label = self.current_block_label,
+ .next_block = target_block,
+ },
+ .merge_block = merge_label,
+ });
+ break :blk merge_label;
+ },
+ // Loop blocks do not end in a break. Not through a direct break,
+ // and also not through another instruction like cond_br or unreachable (these
+ // situations are replaced by `cond_br` in sema, or there is a `block` instruction
+ // placed around them).
+ .loop => unreachable,
+ };
+
+ try self.func.body.emitBranch(self.spv.gpa, merge_block);
+ }
+
+ /// Generate a body in a way that exits the body using only structured constructs.
+ /// Returns the block-id of the next block to jump to. After this function, a jump
+ /// should still be emitted to the block that should follow this structured body.
+ /// This function should only be called with structured control flow generation.
+ fn genStructuredBody(
+ self: *DeclGen,
+ /// This parameter defines the method that this structured body is exited with.
+ block_merge_type: union(enum) {
+ /// Using selection; early exits from this body are surrounded with
+ /// if() statements.
+ selection,
+ /// Using loops; loops can be early exited by jumping to the merge block at
+ /// any time.
+ loop: struct {
+ merge_label: IdRef,
+ continue_label: IdRef,
+ },
+ },
+ body: []const Air.Inst.Index,
+ ) !IdRef {
+ assert(self.control_flow == .structured);
+
+ var sblock: ControlFlow.Structured.Block = switch (block_merge_type) {
+ .loop => |merge| .{ .loop = .{
+ .merge_block = merge.merge_label,
+ } },
+ .selection => .{ .selection = .{} },
+ };
+ defer sblock.deinit(self.gpa);
+
+ {
+ try self.control_flow.structured.block_stack.append(self.gpa, &sblock);
+ defer _ = self.control_flow.structured.block_stack.pop();
+
+ try self.genBody(body);
+ }
+
+ switch (sblock) {
+ .selection => |merge| {
+ // Now generate the merge block for all merges that
+ // still need to be performed.
+ const merge_stack = merge.merge_stack.items;
+
+ // If no merges on the stack, this block didn't generate any jumps (all paths
+ // ended with a return or an unreachable). In that case, we don't need to do
+ // any merging.
+ if (merge_stack.len == 0) {
+ // We still need to return a value of a next block to jump to.
+ // For example, if we have code like
+ // if (x) {
+ // if (y) return else return;
+ // } else {}
+ // then we still need the outer to have an OpSelectionMerge and consequently
+ // a phi node. In that case we can just return bogus, since we know that its
+ // path will never be taken.
+
+ // Make sure that we are still in a block when exiting the function.
+ // TODO: Can we get rid of that?
+ try self.beginSpvBlock(self.spv.allocId());
+ const block_id_ty_ref = try self.intType(.unsigned, 32);
+ return try self.spv.constUndef(block_id_ty_ref);
+ }
+
+ // The top-most merge actually only has a single source, the
+ // final jump of the block, or the merge block of a sub-block, cond_br,
+ // or loop. Therefore we just need to generate a block with a jump to the
+ // next merge block.
+ try self.beginSpvBlock(merge_stack[merge_stack.len - 1].merge_block);
+
+ // Now generate a merge ladder for the remaining merges in the stack.
+ var incoming = ControlFlow.Structured.Block.Incoming{
+ .src_label = self.current_block_label,
+ .next_block = merge_stack[merge_stack.len - 1].incoming.next_block,
+ };
+ var i = merge_stack.len - 1;
+ while (i > 0) {
+ i -= 1;
+ const step = merge_stack[i];
+ try self.func.body.emitBranch(self.spv.gpa, step.merge_block);
+ try self.beginSpvBlock(step.merge_block);
+ const next_block = try self.structuredNextBlock(&.{ incoming, step.incoming });
+ incoming = .{
+ .src_label = step.merge_block,
+ .next_block = next_block,
+ };
+ }
+
+ return incoming.next_block;
+ },
+ .loop => |merge| {
+ // Close the loop by jumping to the continue label
+ try self.func.body.emitBranch(self.spv.gpa, block_merge_type.loop.continue_label);
+ // For blocks we must simple merge all the incoming blocks to get the next block.
+ try self.beginSpvBlock(merge.merge_block);
+ return try self.structuredNextBlock(merge.merges.items);
+ },
+ }
+ }
+
fn airBlock(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
// In AIR, a block doesn't really define an entry point like a block, but
// more like a scope that breaks can jump out of and "return" a value from.
@@ -3670,62 +3922,170 @@ const DeclGen = struct {
const body = self.air.extra[extra.end..][0..extra.data.body_len];
const have_block_result = ty.isFnOrHasRuntimeBitsIgnoreComptime(mod);
- // 4 chosen as arbitrary initial capacity.
- var block = Block{
- // Label id is lazily allocated if needed.
- .label_id = null,
- .incoming_blocks = try std.ArrayListUnmanaged(IncomingBlock).initCapacity(self.gpa, 4),
+ const cf = switch (self.control_flow) {
+ .structured => |*cf| cf,
+ .unstructured => |*cf| {
+ var block = ControlFlow.Unstructured.Block{};
+ defer block.incoming_blocks.deinit(self.gpa);
+
+ // 4 chosen as arbitrary initial capacity.
+ try block.incoming_blocks.ensureUnusedCapacity(self.gpa, 4);
+
+ try cf.blocks.putNoClobber(self.gpa, inst, &block);
+ defer assert(cf.blocks.remove(inst));
+
+ try self.genBody(body);
+
+ // Only begin a new block if there were actually any breaks towards it.
+ if (block.label) |label| {
+ try self.beginSpvBlock(label);
+ }
+
+ if (!have_block_result)
+ return null;
+
+ assert(block.label != null);
+ const result_id = self.spv.allocId();
+ const result_type_id = try self.resolveTypeId(ty);
+
+ try self.func.body.emitRaw(
+ self.spv.gpa,
+ .OpPhi,
+ // result type + result + variable/parent...
+ 2 + @as(u16, @intCast(block.incoming_blocks.items.len * 2)),
+ );
+ self.func.body.writeOperand(spec.IdResultType, result_type_id);
+ self.func.body.writeOperand(spec.IdRef, result_id);
+
+ for (block.incoming_blocks.items) |incoming| {
+ self.func.body.writeOperand(
+ spec.PairIdRefIdRef,
+ .{ incoming.break_value_id, incoming.src_label },
+ );
+ }
+
+ return result_id;
+ },
};
- defer block.incoming_blocks.deinit(self.gpa);
- try self.blocks.putNoClobber(self.gpa, inst, &block);
- defer assert(self.blocks.remove(inst));
+ const maybe_block_result_var_id = if (have_block_result) blk: {
+ const block_result_var_id = try self.alloc(ty, .{ .storage_class = .Function });
+ try cf.block_results.putNoClobber(self.gpa, inst, block_result_var_id);
+ break :blk block_result_var_id;
+ } else null;
+ defer if (have_block_result) assert(cf.block_results.remove(inst));
- try self.genBody(body);
+ const next_block = try self.genStructuredBody(.selection, body);
- // Only begin a new block if there were actually any breaks towards it.
- if (block.label_id) |label_id| {
- try self.beginSpvBlock(label_id);
- }
+ // When encountering a block instruction, we are always at least in the function's scope,
+ // so there always has to be another entry.
+ assert(cf.block_stack.items.len > 0);
- if (!have_block_result)
- return null;
+ // Check if the target of the branch was this current block.
+ const block_id_ty_ref = try self.intType(.unsigned, 32);
+ const this_block = try self.constInt(block_id_ty_ref, inst);
+ const jump_to_this_block_id = self.spv.allocId();
+ const bool_ty_ref = try self.resolveType(Type.bool, .direct);
+ try self.func.body.emit(self.spv.gpa, .OpIEqual, .{
+ .id_result_type = self.typeId(bool_ty_ref),
+ .id_result = jump_to_this_block_id,
+ .operand_1 = next_block,
+ .operand_2 = this_block,
+ });
- assert(block.label_id != null);
- const result_id = self.spv.allocId();
- const result_type_id = try self.resolveTypeId(ty);
+ const sblock = cf.block_stack.getLast();
- try self.func.body.emitRaw(self.spv.gpa, .OpPhi, 2 + @as(u16, @intCast(block.incoming_blocks.items.len * 2))); // result type + result + variable/parent...
- self.func.body.writeOperand(spec.IdResultType, result_type_id);
- self.func.body.writeOperand(spec.IdRef, result_id);
+ if (ty.isNoReturn(mod)) {
+ // If this block is noreturn, this instruction is the last of a block,
+ // and we must simply jump to the block's merge unconditionally.
+ try self.structuredBreak(next_block);
+ } else {
+ switch (sblock.*) {
+ .selection => |*merge| {
+ // To jump out of a selection block, push a new entry onto its merge stack and
+ // generate a conditional branch to there and to the instructions following this block.
+ const merge_label = self.spv.allocId();
+ const then_label = self.spv.allocId();
+ try self.func.body.emit(self.spv.gpa, .OpSelectionMerge, .{
+ .merge_block = merge_label,
+ .selection_control = .{},
+ });
+ try self.func.body.emit(self.spv.gpa, .OpBranchConditional, .{
+ .condition = jump_to_this_block_id,
+ .true_label = then_label,
+ .false_label = merge_label,
+ });
+ try merge.merge_stack.append(self.gpa, .{
+ .incoming = .{
+ .src_label = self.current_block_label,
+ .next_block = next_block,
+ },
+ .merge_block = merge_label,
+ });
- for (block.incoming_blocks.items) |incoming| {
- self.func.body.writeOperand(spec.PairIdRefIdRef, .{ incoming.break_value_id, incoming.src_label_id });
+ try self.beginSpvBlock(then_label);
+ },
+ .loop => |*merge| {
+ // To jump out of a loop block, generate a conditional that exits the block
+ // to the loop merge if the target ID is not the one of this block.
+ const continue_label = self.spv.allocId();
+ try self.func.body.emit(self.spv.gpa, .OpBranchConditional, .{
+ .condition = jump_to_this_block_id,
+ .true_label = continue_label,
+ .false_label = merge.merge_block,
+ });
+ try merge.merges.append(self.gpa, .{
+ .src_label = self.current_block_label,
+ .next_block = next_block,
+ });
+ try self.beginSpvBlock(continue_label);
+ },
+ }
}
- return result_id;
+ if (maybe_block_result_var_id) |block_result_var_id| {
+ return try self.load(ty, block_result_var_id, .{});
+ }
+
+ return null;
}
fn airBr(self: *DeclGen, inst: Air.Inst.Index) !void {
+ const mod = self.module;
const br = self.air.instructions.items(.data)[inst].br;
const operand_ty = self.typeOf(br.operand);
- const block = self.blocks.get(br.block_inst).?;
- const mod = self.module;
- if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) {
- const operand_id = try self.resolve(br.operand);
- // current_block_label_id should not be undefined here, lest there is a br or br_void in the function's body.
- try block.incoming_blocks.append(self.gpa, .{
- .src_label_id = self.current_block_label_id,
- .break_value_id = operand_id,
- });
- }
+ switch (self.control_flow) {
+ .structured => |*cf| {
+ if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) {
+ const operand_id = try self.resolve(br.operand);
+ const block_result_var_id = cf.block_results.get(br.block_inst).?;
+ try self.store(operand_ty, block_result_var_id, operand_id, .{});
+ }
- if (block.label_id == null) {
- block.label_id = self.spv.allocId();
- }
+ const block_id_ty_ref = try self.intType(.unsigned, 32);
+ const next_block = try self.constInt(block_id_ty_ref, br.block_inst);
+ try self.structuredBreak(next_block);
+ },
+ .unstructured => |cf| {
+ const block = cf.blocks.get(br.block_inst).?;
+ if (operand_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) {
+ const operand_id = try self.resolve(br.operand);
+ // current_block_label should not be undefined here, lest there
+ // is a br or br_void in the function's body.
+ try block.incoming_blocks.append(self.gpa, .{
+ .src_label = self.current_block_label,
+ .break_value_id = operand_id,
+ });
+ }
+
+ if (block.label == null) {
+ block.label = self.spv.allocId();
+ }
- try self.func.body.emit(self.spv.gpa, .OpBranch, .{ .target_label = block.label_id.? });
+ try self.func.body.emitBranch(self.spv.gpa, block.label.?);
+ },
+ }
}
fn airCondBr(self: *DeclGen, inst: Air.Inst.Index) !void {
@@ -3735,23 +4095,104 @@ const DeclGen = struct {
const else_body = self.air.extra[cond_br.end + then_body.len ..][0..cond_br.data.else_body_len];
const condition_id = try self.resolve(pl_op.operand);
- // These will always generate a new SPIR-V block, since they are ir.Body and not ir.Block.
- const then_label_id = self.spv.allocId();
- const else_label_id = self.spv.allocId();
+ const then_label = self.spv.allocId();
+ const else_label = self.spv.allocId();
- // TODO: We can generate OpSelectionMerge here if we know the target block that both of these will resolve to,
- // but i don't know if those will always resolve to the same block.
+ switch (self.control_flow) {
+ .structured => {
+ const merge_label = self.spv.allocId();
- try self.func.body.emit(self.spv.gpa, .OpBranchConditional, .{
- .condition = condition_id,
- .true_label = then_label_id,
- .false_label = else_label_id,
- });
+ try self.func.body.emit(self.spv.gpa, .OpSelectionMerge, .{
+ .merge_block = merge_label,
+ .selection_control = .{},
+ });
+ try self.func.body.emit(self.spv.gpa, .OpBranchConditional, .{
+ .condition = condition_id,
+ .true_label = then_label,
+ .false_label = else_label,
+ });
+
+ try self.beginSpvBlock(then_label);
+ const then_next = try self.genStructuredBody(.selection, then_body);
+ const then_incoming = ControlFlow.Structured.Block.Incoming{
+ .src_label = self.current_block_label,
+ .next_block = then_next,
+ };
+ try self.func.body.emitBranch(self.spv.gpa, merge_label);
+
+ try self.beginSpvBlock(else_label);
+ const else_next = try self.genStructuredBody(.selection, else_body);
+ const else_incoming = ControlFlow.Structured.Block.Incoming{
+ .src_label = self.current_block_label,
+ .next_block = else_next,
+ };
+ try self.func.body.emitBranch(self.spv.gpa, merge_label);
+
+ try self.beginSpvBlock(merge_label);
+ const next_block = try self.structuredNextBlock(&.{ then_incoming, else_incoming });
+
+ try self.structuredBreak(next_block);
+ },
+ .unstructured => {
+ try self.func.body.emit(self.spv.gpa, .OpBranchConditional, .{
+ .condition = condition_id,
+ .true_label = then_label,
+ .false_label = else_label,
+ });
+
+ try self.beginSpvBlock(then_label);
+ try self.genBody(then_body);
+ try self.beginSpvBlock(else_label);
+ try self.genBody(else_body);
+ },
+ }
+ }
- try self.beginSpvBlock(then_label_id);
- try self.genBody(then_body);
- try self.beginSpvBlock(else_label_id);
- try self.genBody(else_body);
+ fn airLoop(self: *DeclGen, inst: Air.Inst.Index) !void {
+ const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
+ const loop = self.air.extraData(Air.Block, ty_pl.payload);
+ const body = self.air.extra[loop.end..][0..loop.data.body_len];
+
+ const body_label = self.spv.allocId();
+
+ switch (self.control_flow) {
+ .structured => {
+ const header_label = self.spv.allocId();
+ const merge_label = self.spv.allocId();
+ const continue_label = self.spv.allocId();
+
+ // The back-edge must point to the loop header, so generate a separate block for the
+ // loop header so that we don't accidentally include some instructions from there
+ // in the loop.
+ try self.func.body.emitBranch(self.spv.gpa, header_label);
+ try self.beginSpvBlock(header_label);
+
+ // Emit loop header and jump to loop body
+ try self.func.body.emit(self.spv.gpa, .OpLoopMerge, .{
+ .merge_block = merge_label,
+ .continue_target = continue_label,
+ .loop_control = .{},
+ });
+ try self.func.body.emitBranch(self.spv.gpa, body_label);
+
+ try self.beginSpvBlock(body_label);
+
+ const next_block = try self.genStructuredBody(.{ .loop = .{
+ .merge_label = merge_label,
+ .continue_label = continue_label,
+ } }, body);
+ try self.structuredBreak(next_block);
+
+ try self.beginSpvBlock(continue_label);
+ try self.func.body.emitBranch(self.spv.gpa, header_label);
+ },
+ .unstructured => {
+ try self.func.body.emitBranch(self.spv.gpa, body_label);
+ try self.beginSpvBlock(body_label);
+ try self.genBody(body);
+ try self.func.body.emitBranch(self.spv.gpa, body_label);
+ },
+ }
}
fn airLoad(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
@@ -3775,22 +4216,6 @@ const DeclGen = struct {
try self.store(elem_ty, ptr, value, .{ .is_volatile = ptr_ty.isVolatilePtr(self.module) });
}
- fn airLoop(self: *DeclGen, inst: Air.Inst.Index) !void {
- const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
- const loop = self.air.extraData(Air.Block, ty_pl.payload);
- const body = self.air.extra[loop.end..][0..loop.data.body_len];
- const loop_label_id = self.spv.allocId();
-
- // Jump to the loop entry point
- try self.func.body.emit(self.spv.gpa, .OpBranch, .{ .target_label = loop_label_id });
-
- // TODO: Look into OpLoopMerge.
- try self.beginSpvBlock(loop_label_id);
- try self.genBody(body);
-
- try self.func.body.emit(self.spv.gpa, .OpBranch, .{ .target_label = loop_label_id });
- }
-
fn airRet(self: *DeclGen, inst: Air.Inst.Index) !void {
const operand = self.air.instructions.items(.data)[inst].un_op;
const ret_ty = self.typeOf(operand);
@@ -3879,7 +4304,20 @@ const DeclGen = struct {
const err_block = self.spv.allocId();
const ok_block = self.spv.allocId();
- // TODO: Merge block
+ switch (self.control_flow) {
+ .structured => {
+ // According to AIR documentation, this block is guaranteed
+ // to not break and end in a return instruction. Thus,
+ // for structured control flow, we can just naively use
+ // the ok block as the merge block here.
+ try self.func.body.emit(self.spv.gpa, .OpSelectionMerge, .{
+ .merge_block = ok_block,
+ .selection_control = .{},
+ });
+ },
+ .unstructured => {},
+ }
+
try self.func.body.emit(self.spv.gpa, .OpBranchConditional, .{
.condition = is_err_id,
.true_label = err_block,
@@ -3890,7 +4328,6 @@ const DeclGen = struct {
try self.genBody(body);
try self.beginSpvBlock(ok_block);
- // Now just extract the payload, if required.
}
if (self.liveness.isUnused(inst)) {
return null;
@@ -3899,6 +4336,7 @@ const DeclGen = struct {
return null;
}
+ // Now just extract the payload, if required.
return try self.extractField(payload_ty, err_union_id, eu_layout.payloadFieldIndex());
}
@@ -4164,9 +4602,8 @@ const DeclGen = struct {
// Zig switches are grouped by condition, so we need to loop through all of them
const num_conditions = blk: {
var extra_index: usize = switch_br.end;
- var case_i: u32 = 0;
var num_conditions: u32 = 0;
- while (case_i < num_cases) : (case_i += 1) {
+ for (0..num_cases) |_| {
const case = self.air.extraData(Air.SwitchBr.Case, extra_index);
const case_body = self.air.extra[case.end + case.data.items_len ..][0..case.data.body_len];
extra_index = case.end + case.data.items_len + case_body.len;
@@ -4180,6 +4617,18 @@ const DeclGen = struct {
// We always need the default case - if zig has none, we will generate unreachable there.
const default = self.spv.allocId();
+ const merge_label = switch (self.control_flow) {
+ .structured => self.spv.allocId(),
+ .unstructured => null,
+ };
+
+ if (self.control_flow == .structured) {
+ try self.func.body.emit(self.spv.gpa, .OpSelectionMerge, .{
+ .merge_block = merge_label.?,
+ .selection_control = .{},
+ });
+ }
+
// Emit the instruction before generating the blocks.
try self.func.body.emitRaw(self.spv.gpa, .OpSwitch, 2 + (cond_words + 1) * num_conditions);
self.func.body.writeOperand(IdRef, cond_indirect);
@@ -4188,20 +4637,17 @@ const DeclGen = struct {
// Emit each of the cases
{
var extra_index: usize = switch_br.end;
- var case_i: u32 = 0;
- while (case_i < num_cases) : (case_i += 1) {
+ for (0..num_cases) |case_i| {
// SPIR-V needs a literal here, which' width depends on the case condition.
const case = self.air.extraData(Air.SwitchBr.Case, extra_index);
const items = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[case.end..][0..case.data.items_len]));
const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len];
extra_index = case.end + case.data.items_len + case_body.len;
- const label = IdRef{ .id = first_case_label.id + case_i };
+ const label = IdRef{ .id = @intCast(first_case_label.id + case_i) };
for (items) |item| {
- const value = (try self.air.value(item, mod)) orelse {
- return self.todo("switch on runtime value???", .{});
- };
+ const value = (try self.air.value(item, mod)) orelse unreachable;
const int_val = switch (cond_ty.zigTypeTag(mod)) {
.Bool, .Int => if (cond_ty.isSignedInt(mod)) @as(u64, @bitCast(value.toSignedInt(mod))) else value.toUnsignedInt(mod),
.Enum => blk: {
@@ -4222,28 +4668,65 @@ const DeclGen = struct {
}
}
+ var incoming_structured_blocks = std.ArrayListUnmanaged(ControlFlow.Structured.Block.Incoming){};
+ defer incoming_structured_blocks.deinit(self.gpa);
+
+ if (self.control_flow == .structured) {
+ try incoming_structured_blocks.ensureUnusedCapacity(self.gpa, num_cases + 1);
+ }
+
// Now, finally, we can start emitting each of the cases.
var extra_index: usize = switch_br.end;
- var case_i: u32 = 0;
- while (case_i < num_cases) : (case_i += 1) {
+ for (0..num_cases) |case_i| {
const case = self.air.extraData(Air.SwitchBr.Case, extra_index);
const items = @as([]const Air.Inst.Ref, @ptrCast(self.air.extra[case.end..][0..case.data.items_len]));
const case_body = self.air.extra[case.end + items.len ..][0..case.data.body_len];
extra_index = case.end + case.data.items_len + case_body.len;
- const label = IdResult{ .id = first_case_label.id + case_i };
+ const label = IdResult{ .id = @intCast(first_case_label.id + case_i) };
try self.beginSpvBlock(label);
- try self.genBody(case_body);
+
+ switch (self.control_flow) {
+ .structured => {
+ const next_block = try self.genStructuredBody(.selection, case_body);
+ incoming_structured_blocks.appendAssumeCapacity(.{
+ .src_label = self.current_block_label,
+ .next_block = next_block,
+ });
+ try self.func.body.emitBranch(self.spv.gpa, merge_label.?);
+ },
+ .unstructured => {
+ try self.genBody(case_body);
+ },
+ }
}
const else_body = self.air.extra[extra_index..][0..switch_br.data.else_body_len];
try self.beginSpvBlock(default);
if (else_body.len != 0) {
- try self.genBody(else_body);
+ switch (self.control_flow) {
+ .structured => {
+ const next_block = try self.genStructuredBody(.selection, else_body);
+ incoming_structured_blocks.appendAssumeCapacity(.{
+ .src_label = self.current_block_label,
+ .next_block = next_block,
+ });
+ try self.func.body.emitBranch(self.spv.gpa, merge_label.?);
+ },
+ .unstructured => {
+ try self.genBody(else_body);
+ },
+ }
} else {
try self.func.body.emit(self.spv.gpa, .OpUnreachable, {});
}
+
+ if (self.control_flow == .structured) {
+ try self.beginSpvBlock(merge_label.?);
+ const next_block = try self.structuredNextBlock(incoming_structured_blocks.items);
+ try self.structuredBreak(next_block);
+ }
}
fn airUnreach(self: *DeclGen) !void {