Commit 97a67762ba
Changed files (4)
src
codegen
link
SpirV
src/codegen/spirv/Assembler.zig
@@ -296,18 +296,19 @@ fn processTypeInstruction(self: *Assembler) !AsmValue {
.OpTypeVoid => try self.spv.resolve(.void_type),
.OpTypeBool => try self.spv.resolve(.bool_type),
.OpTypeInt => blk: {
- const signedness: std.builtin.Signedness = switch (operands[2].literal32) {
- 0 => .unsigned,
- 1 => .signed,
- else => {
- // TODO: Improve source location.
- return self.fail(0, "{} is not a valid signedness (expected 0 or 1)", .{operands[2].literal32});
- },
- };
- const width = std.math.cast(u16, operands[1].literal32) orelse {
- return self.fail(0, "int type of {} bits is too large", .{operands[1].literal32});
- };
- break :blk try self.spv.intType(signedness, width);
+ // const signedness: std.builtin.Signedness = switch (operands[2].literal32) {
+ // 0 => .unsigned,
+ // 1 => .signed,
+ // else => {
+ // // TODO: Improve source location.
+ // return self.fail(0, "{} is not a valid signedness (expected 0 or 1)", .{operands[2].literal32});
+ // },
+ // };
+ // const width = std.math.cast(u16, operands[1].literal32) orelse {
+ // return self.fail(0, "int type of {} bits is too large", .{operands[1].literal32});
+ // };
+ // break :blk try self.spv.intType(signedness, width);
+ break :blk @as(CacheRef, @enumFromInt(0)); // TODO(robin): fix
},
.OpTypeFloat => blk: {
const bits = operands[1].literal32;
src/codegen/spirv/Module.zig
@@ -23,7 +23,6 @@ const Section = @import("Section.zig");
const Cache = @import("Cache.zig");
pub const CacheKey = Cache.Key;
pub const CacheRef = Cache.Ref;
-pub const CacheString = Cache.String;
/// This structure represents a function that isc in-progress of being emitted.
/// Commonly, the contents of this structure will be merged with the appropriate
@@ -98,7 +97,7 @@ pub const EntryPoint = struct {
/// The declaration that should be exported.
decl_index: Decl.Index,
/// The name of the kernel to be exported.
- name: CacheString,
+ name: []const u8,
/// Calling Convention
execution_model: spec.ExecutionModel,
};
@@ -106,6 +105,9 @@ pub const EntryPoint = struct {
/// A general-purpose allocator which may be used to allocate resources for this module
gpa: Allocator,
+/// Arena for things that need to live for the length of this program.
+arena: std.heap.ArenaAllocator,
+
/// Module layout, according to SPIR-V Spec section 2.4, "Logical Layout of a Module".
sections: struct {
/// Capability instructions
@@ -143,15 +145,26 @@ sections: struct {
/// SPIR-V instructions return result-ids. This variable holds the module-wide counter for these.
next_result_id: Word,
-/// Cache for results of OpString instructions for module file names fed to OpSource.
-/// Since OpString is pretty much only used for those, we don't need to keep track of all strings,
-/// just the ones for OpLine. Note that OpLine needs the result of OpString, and not that of OpSource.
-source_file_names: std.AutoArrayHashMapUnmanaged(CacheString, IdRef) = .{},
+/// Cache for results of OpString instructions.
+strings: std.StringArrayHashMapUnmanaged(IdRef) = .{},
/// SPIR-V type- and constant cache. This structure is used to store information about these in a more
/// efficient manner.
cache: Cache = .{},
+/// Some types shouldn't be emitted more than one time, but cannot be caught by
+/// the `intern_map` during codegen. Sometimes, IDs are compared to check if
+/// types are the same, so we can't delay until the dedup pass. Therefore,
+/// this is an ad-hoc structure to cache types where required.
+/// According to the SPIR-V specification, section 2.8, this includes all non-aggregate
+/// non-pointer types.
+cache2: struct {
+ bool_type: ?IdRef = null,
+ void_type: ?IdRef = null,
+ int_types: std.AutoHashMapUnmanaged(std.builtin.Type.Int, IdRef) = .{},
+ float_types: std.AutoHashMapUnmanaged(std.builtin.Type.Float, IdRef) = .{},
+} = .{},
+
/// Set of Decls, referred to by Decl.Index.
decls: std.ArrayListUnmanaged(Decl) = .{},
@@ -168,6 +181,7 @@ extended_instruction_set: std.AutoHashMapUnmanaged(spec.InstructionSet, IdRef) =
pub fn init(gpa: Allocator) Module {
return .{
.gpa = gpa,
+ .arena = std.heap.ArenaAllocator.init(gpa),
.next_result_id = 1, // 0 is an invalid SPIR-V result id, so start counting at 1.
};
}
@@ -184,15 +198,19 @@ pub fn deinit(self: *Module) void {
self.sections.types_globals_constants.deinit(self.gpa);
self.sections.functions.deinit(self.gpa);
- self.source_file_names.deinit(self.gpa);
+ self.strings.deinit(self.gpa);
self.cache.deinit(self);
+ self.cache2.int_types.deinit(self.gpa);
+ self.cache2.float_types.deinit(self.gpa);
+
self.decls.deinit(self.gpa);
self.decl_deps.deinit(self.gpa);
self.entry_points.deinit(self.gpa);
self.extended_instruction_set.deinit(self.gpa);
+ self.arena.deinit();
self.* = undefined;
}
@@ -235,10 +253,6 @@ pub fn resolveId(self: *Module, key: CacheKey) !IdResult {
return self.resultId(try self.resolve(key));
}
-pub fn resolveString(self: *Module, str: []const u8) !CacheString {
- return try self.cache.addString(self, str);
-}
-
fn addEntryPointDeps(
self: *Module,
decl_index: Decl.Index,
@@ -283,7 +297,7 @@ fn entryPoints(self: *Module) !Section {
try entry_points.emit(self.gpa, .OpEntryPoint, .{
.execution_model = entry_point.execution_model,
.entry_point = entry_point_id,
- .name = self.cache.getString(entry_point.name).?,
+ .name = entry_point.name,
.interface = interface.items,
});
}
@@ -388,51 +402,110 @@ pub fn importInstructionSet(self: *Module, set: spec.InstructionSet) !IdRef {
return result_id;
}
-/// Fetch the result-id of an OpString instruction that encodes the path of the source
-/// file of the decl. This function may also emit an OpSource with source-level information regarding
-/// the decl.
-pub fn resolveSourceFileName(self: *Module, path: []const u8) !IdRef {
- const path_ref = try self.resolveString(path);
- const result = try self.source_file_names.getOrPut(self.gpa, path_ref);
- if (!result.found_existing) {
- const file_result_id = self.allocId();
- result.value_ptr.* = file_result_id;
- try self.sections.debug_strings.emit(self.gpa, .OpString, .{
- .id_result = file_result_id,
- .string = path,
- });
+/// Fetch the result-id of an instruction corresponding to a string.
+pub fn resolveString(self: *Module, string: []const u8) !IdRef {
+ if (self.strings.get(string)) |id| {
+ return id;
}
- return result.value_ptr.*;
+ const id = self.allocId();
+ try self.strings.put(self.gpa, try self.arena.allocator().dupe(u8, string), id);
+
+ try self.sections.debug_strings.emit(self.gpa, .OpString, .{
+ .id_result = id,
+ .string = string,
+ });
+
+ return id;
}
-pub fn intType(self: *Module, signedness: std.builtin.Signedness, bits: u16) !CacheRef {
- return try self.resolve(.{ .int_type = .{
- .signedness = signedness,
- .bits = bits,
- } });
+pub fn structType(self: *Module, types: []const IdRef, maybe_names: ?[]const []const u8) !IdRef {
+ const result_id = self.allocId();
+
+ try self.sections.types_globals_constants.emit(self.gpa, .OpTypeStruct, .{
+ .id_result = result_id,
+ .id_ref = types,
+ });
+
+ if (maybe_names) |names| {
+ assert(names.len == types.len);
+ for (names, 0..) |name, i| {
+ try self.memberDebugName(result_id, @intCast(i), name);
+ }
+ }
+
+ return result_id;
}
-pub fn vectorType(self: *Module, len: u32, elem_ty_ref: CacheRef) !CacheRef {
- return try self.resolve(.{ .vector_type = .{
- .component_type = elem_ty_ref,
- .component_count = len,
- } });
+pub fn boolType(self: *Module) !IdRef {
+ if (self.cache2.bool_type) |id| return id;
+
+ const result_id = self.allocId();
+ try self.sections.types_globals_constants.emit(self.gpa, .OpTypeBool, .{
+ .id_result = result_id,
+ });
+ self.cache2.bool_type = result_id;
+ return result_id;
+}
+
+pub fn voidType(self: *Module) !IdRef {
+ if (self.cache2.void_type) |id| return id;
+
+ const result_id = self.allocId();
+ try self.sections.types_globals_constants.emit(self.gpa, .OpTypeVoid, .{
+ .id_result = result_id,
+ });
+ self.cache2.void_type = result_id;
+ try self.debugName(result_id, "void");
+ return result_id;
}
-pub fn arrayType(self: *Module, len: u32, elem_ty_ref: CacheRef) !CacheRef {
- const len_ty_ref = try self.resolve(.{ .int_type = .{
- .signedness = .unsigned,
- .bits = 32,
- } });
- const len_ref = try self.resolve(.{ .int = .{
- .ty = len_ty_ref,
- .value = .{ .uint64 = len },
- } });
- return try self.resolve(.{ .array_type = .{
- .element_type = elem_ty_ref,
- .length = len_ref,
- } });
+pub fn intType(self: *Module, signedness: std.builtin.Signedness, bits: u16) !IdRef {
+ assert(bits > 0);
+ const entry = try self.cache2.int_types.getOrPut(self.gpa, .{ .signedness = signedness, .bits = bits });
+ if (!entry.found_existing) {
+ const result_id = self.allocId();
+ entry.value_ptr.* = result_id;
+ try self.sections.types_globals_constants.emit(self.gpa, .OpTypeInt, .{
+ .id_result = result_id,
+ .width = bits,
+ .signedness = switch (signedness) {
+ .signed => 1,
+ .unsigned => 0,
+ },
+ });
+
+ switch (signedness) {
+ .signed => try self.debugNameFmt(result_id, "i{}", .{bits}),
+ .unsigned => try self.debugNameFmt(result_id, "u{}", .{bits}),
+ }
+ }
+ return entry.value_ptr.*;
+}
+
+pub fn floatType(self: *Module, bits: u16) !IdRef {
+ assert(bits > 0);
+ const entry = try self.cache2.float_types.getOrPut(self.gpa, .{ .bits = bits });
+ if (!entry.found_existing) {
+ const result_id = self.allocId();
+ entry.value_ptr.* = result_id;
+ try self.sections.types_globals_constants.emit(self.gpa, .OpTypeFloat, .{
+ .id_result = result_id,
+ .width = bits,
+ });
+ try self.debugNameFmt(result_id, "f{}", .{bits});
+ }
+ return entry.value_ptr.*;
+}
+
+pub fn vectorType(self: *Module, len: u32, child_id: IdRef) !IdRef {
+ const result_id = self.allocId();
+ try self.sections.types_globals_constants.emit(self.gpa, .OpTypeVector, .{
+ .id_result = result_id,
+ .component_type = child_id,
+ .component_count = len,
+ });
+ return result_id;
}
pub fn constUndef(self: *Module, ty_id: IdRef) !IdRef {
@@ -526,7 +599,7 @@ pub fn declareEntryPoint(
) !void {
try self.entry_points.append(self.gpa, .{
.decl_index = decl_index,
- .name = try self.resolveString(name),
+ .name = try self.arena.allocator().dupe(u8, name),
.execution_model = execution_model,
});
}
src/codegen/spirv.zig
@@ -22,8 +22,6 @@ const IdResultType = spec.IdResultType;
const StorageClass = spec.StorageClass;
const SpvModule = @import("spirv/Module.zig");
-const CacheRef = SpvModule.CacheRef;
-const CacheString = SpvModule.CacheString;
const SpvSection = @import("spirv/Section.zig");
const SpvAssembler = @import("spirv/Assembler.zig");
@@ -32,16 +30,11 @@ const InstMap = std.AutoHashMapUnmanaged(Air.Inst.Index, IdRef);
pub const zig_call_abi_ver = 3;
-/// We want to store some extra facts about types as mapped from Zig to SPIR-V.
-/// This structure is used to keep that extra information, as well as
-/// the cached reference to the type.
-const SpvTypeInfo = struct {
- ty_ref: CacheRef,
-};
-
-const TypeMap = std.AutoHashMapUnmanaged(InternPool.Index, SpvTypeInfo);
-
const InternMap = std.AutoHashMapUnmanaged(struct { InternPool.Index, DeclGen.Repr }, IdResult);
+const PtrTypeMap = std.AutoHashMapUnmanaged(
+ struct { InternPool.Index, StorageClass },
+ struct { ty_id: IdRef, fwd_emitted: bool },
+);
const ControlFlow = union(enum) {
const Structured = struct {
@@ -164,17 +157,17 @@ pub const Object = struct {
/// A map of Zig InternPool indices for anonymous decls to SPIR-V decl indices.
anon_decl_link: std.AutoHashMapUnmanaged(struct { InternPool.Index, StorageClass }, SpvModule.Decl.Index) = .{},
- /// A map that maps AIR intern pool indices to SPIR-V cache references (which
- /// is basically the same thing except for SPIR-V).
- /// This map is typically only used for structures that are deemed heavy enough
- /// that it is worth to store them here. The SPIR-V module also interns types,
- /// and so the main purpose of this map is to avoid recomputation and to
- /// cache extra information about the type rather than to aid in validity
- /// of the SPIR-V module.
- type_map: TypeMap = .{},
-
+ /// A map that maps AIR intern pool indices to SPIR-V result-ids.
intern_map: InternMap = .{},
+ /// This map serves a dual purpose:
+ /// - It keeps track of pointers that are currently being emitted, so that we can tell
+ /// if they are recursive and need an OpTypeForwardPointer.
+ /// - It caches pointers by child-type. This is required because sometimes we rely on
+ /// ID-equality for pointers, and pointers constructed via `ptrType()` aren't interned
+ /// via the usual `intern_map` mechanism.
+ ptr_types: PtrTypeMap = .{},
+
pub fn init(gpa: Allocator) Object {
return .{
.gpa = gpa,
@@ -186,8 +179,8 @@ pub const Object = struct {
self.spv.deinit();
self.decl_link.deinit(self.gpa);
self.anon_decl_link.deinit(self.gpa);
- self.type_map.deinit(self.gpa);
self.intern_map.deinit(self.gpa);
+ self.ptr_types.deinit(self.gpa);
}
fn genDecl(
@@ -209,8 +202,8 @@ pub const Object = struct {
.decl_index = decl_index,
.air = air,
.liveness = liveness,
- .type_map = &self.type_map,
.intern_map = &self.intern_map,
+ .ptr_types = &self.ptr_types,
.control_flow = switch (structured_cfg) {
true => .{ .structured = .{} },
false => .{ .unstructured = .{} },
@@ -315,15 +308,12 @@ const DeclGen = struct {
/// A map keeping track of which instruction generated which result-id.
inst_results: InstMap = .{},
- /// A map that maps AIR intern pool indices to SPIR-V cache references.
- /// See Object.type_map
- type_map: *TypeMap,
-
+ /// A map that maps AIR intern pool indices to SPIR-V result-ids.
+ /// See `Object.intern_map`.
intern_map: *InternMap,
- /// Child types of pointers that are currently in progress of being resolved. If a pointer
- /// is already in this map, its recursive.
- wip_pointers: std.AutoHashMapUnmanaged(struct { InternPool.Index, StorageClass }, CacheRef) = .{},
+ /// Module's pointer types, see `Object.ptr_types`.
+ ptr_types: *PtrTypeMap,
/// This field keeps track of the current state wrt structured or unstructured control flow.
control_flow: ControlFlow,
@@ -410,7 +400,6 @@ const DeclGen = struct {
pub fn deinit(self: *DeclGen) void {
self.args.deinit(self.gpa);
self.inst_results.deinit(self.gpa);
- self.wip_pointers.deinit(self.gpa);
self.control_flow.deinit(self.gpa);
self.func.deinit(self.gpa);
}
@@ -460,7 +449,7 @@ const DeclGen = struct {
const mod = self.module;
const ty = Type.fromInterned(mod.intern_pool.typeOf(val));
- const decl_ptr_ty_ref = try self.ptrType(ty, .Generic);
+ const decl_ptr_ty_id = try self.ptrType(ty, .Generic);
const spv_decl_index = blk: {
const entry = try self.object.anon_decl_link.getOrPut(self.object.gpa, .{ val, .Function });
@@ -468,7 +457,7 @@ const DeclGen = struct {
try self.addFunctionDep(entry.value_ptr.*, .Function);
const result_id = self.spv.declPtr(entry.value_ptr.*).result_id;
- return try self.castToGeneric(self.typeId(decl_ptr_ty_ref), result_id);
+ return try self.castToGeneric(decl_ptr_ty_id, result_id);
}
const spv_decl_index = try self.spv.allocDecl(.invocation_global);
@@ -496,19 +485,14 @@ const DeclGen = struct {
self.func = .{};
defer self.func.deinit(self.gpa);
- const void_ty_ref = try self.resolveType(Type.void, .direct);
- const initializer_proto_ty_ref = try self.spv.resolve(.{ .function_type = .{
- .return_type = void_ty_ref,
- .parameters = &.{},
- } });
+ const initializer_proto_ty_id = try self.functionType(Type.void, &.{});
const initializer_id = self.spv.allocId();
-
try self.func.prologue.emit(self.spv.gpa, .OpFunction, .{
- .id_result_type = self.typeId(void_ty_ref),
+ .id_result_type = try self.resolveType(Type.void, .direct),
.id_result = initializer_id,
.function_control = .{},
- .function_type = self.typeId(initializer_proto_ty_ref),
+ .function_type = initializer_proto_ty_id,
});
const root_block_id = self.spv.allocId();
try self.func.prologue.emit(self.spv.gpa, .OpLabel, .{
@@ -528,9 +512,9 @@ const DeclGen = struct {
try self.spv.debugNameFmt(initializer_id, "initializer of __anon_{d}", .{@intFromEnum(val)});
- const fn_decl_ptr_ty_ref = try self.ptrType(ty, .Function);
+ const fn_decl_ptr_ty_id = try self.ptrType(ty, .Function);
try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpExtInst, .{
- .id_result_type = self.typeId(fn_decl_ptr_ty_ref),
+ .id_result_type = fn_decl_ptr_ty_id,
.id_result = result_id,
.set = try self.spv.importInstructionSet(.zig),
.instruction = .{ .inst = 0 }, // TODO: Put this definition somewhere...
@@ -538,7 +522,7 @@ const DeclGen = struct {
});
}
- return try self.castToGeneric(self.typeId(decl_ptr_ty_ref), result_id);
+ return try self.castToGeneric(decl_ptr_ty_id, result_id);
}
fn addFunctionDep(self: *DeclGen, decl_index: SpvModule.Decl.Index, storage_class: StorageClass) !void {
@@ -712,7 +696,7 @@ const DeclGen = struct {
return try self.constInt(Type.u1, @intFromBool(value), .indirect);
},
.direct => {
- const result_ty_id = try self.resolveType2(Type.bool, .direct);
+ const result_ty_id = try self.resolveType(Type.bool, .direct);
const result_id = self.spv.allocId();
const operands = .{
.id_result_type = result_ty_id,
@@ -751,7 +735,7 @@ const DeclGen = struct {
else
bits & (@as(u64, 1) << @intCast(backing_bits)) - 1;
- const result_ty_id = try self.resolveType2(scalar_ty, repr);
+ const result_ty_id = try self.resolveType(scalar_ty, repr);
const result_id = self.spv.allocId();
const section = &self.spv.sections.types_globals_constants;
@@ -779,7 +763,7 @@ const DeclGen = struct {
defer self.gpa.free(ids);
@memset(ids, result_id);
- const vec_ty_id = try self.resolveType2(ty, repr);
+ const vec_ty_id = try self.resolveType(ty, repr);
const vec_result_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpCompositeConstruct, .{
.id_result_type = vec_ty_id,
@@ -802,8 +786,8 @@ const DeclGen = struct {
// TODO: Make this OpCompositeConstruct when we can
const ptr_composite_id = try self.alloc(ty, .{ .storage_class = .Function });
for (constituents, types, 0..) |constitent_id, member_ty, index| {
- const ptr_member_ty_ref = try self.ptrType(member_ty, .Function);
- const ptr_id = try self.accessChain(ptr_member_ty_ref, ptr_composite_id, &.{@as(u32, @intCast(index))});
+ const ptr_member_ty_id = try self.ptrType(member_ty, .Function);
+ const ptr_id = try self.accessChain(ptr_member_ty_id, ptr_composite_id, &.{@as(u32, @intCast(index))});
try self.func.body.emit(self.spv.gpa, .OpStore, .{
.pointer = ptr_id,
.object = constitent_id,
@@ -824,9 +808,9 @@ const DeclGen = struct {
// TODO: Make this OpCompositeConstruct when we can
const mod = self.module;
const ptr_composite_id = try self.alloc(ty, .{ .storage_class = .Function });
- const ptr_elem_ty_ref = try self.ptrType(ty.elemType2(mod), .Function);
+ const ptr_elem_ty_id = try self.ptrType(ty.elemType2(mod), .Function);
for (constituents, 0..) |constitent_id, index| {
- const ptr_id = try self.accessChain(ptr_elem_ty_ref, ptr_composite_id, &.{@as(u32, @intCast(index))});
+ const ptr_id = try self.accessChain(ptr_elem_ty_id, ptr_composite_id, &.{@as(u32, @intCast(index))});
try self.func.body.emit(self.spv.gpa, .OpStore, .{
.pointer = ptr_id,
.object = constitent_id,
@@ -848,9 +832,9 @@ const DeclGen = struct {
// TODO: Make this OpCompositeConstruct when we can
const mod = self.module;
const ptr_composite_id = try self.alloc(ty, .{ .storage_class = .Function });
- const ptr_elem_ty_ref = try self.ptrType(ty.elemType2(mod), .Function);
+ const ptr_elem_ty_id = try self.ptrType(ty.elemType2(mod), .Function);
for (constituents, 0..) |constitent_id, index| {
- const ptr_id = try self.accessChain(ptr_elem_ty_ref, ptr_composite_id, &.{@as(u32, @intCast(index))});
+ const ptr_id = try self.accessChain(ptr_elem_ty_id, ptr_composite_id, &.{@as(u32, @intCast(index))});
try self.func.body.emit(self.spv.gpa, .OpStore, .{
.pointer = ptr_id,
.object = constitent_id,
@@ -876,8 +860,7 @@ const DeclGen = struct {
const mod = self.module;
const target = self.getTarget();
- const result_ty_ref = try self.resolveType(ty, repr);
- const result_ty_id = self.typeId(result_ty_ref);
+ const result_ty_id = try self.resolveType(ty, repr);
const ip = &mod.intern_pool;
log.debug("lowering constant: ty = {}, val = {}", .{ ty.fmt(mod), val.fmtValue(mod) });
@@ -1033,7 +1016,7 @@ const DeclGen = struct {
const payload_id = if (maybe_payload_val) |payload_val|
try self.constant(payload_ty, payload_val, .indirect)
else
- try self.spv.constUndef(try self.resolveType2(payload_ty, .indirect));
+ try self.spv.constUndef(try self.resolveType(payload_ty, .indirect));
return try self.constructStruct(
ty,
@@ -1134,8 +1117,9 @@ const DeclGen = struct {
}
fn constantPtr(self: *DeclGen, ptr_ty: Type, ptr_val: Value) Error!IdRef {
- const result_ty_id = try self.resolveType2(ptr_ty, .direct);
- const result_ty_ref = try self.resolveType(ptr_ty, .direct);
+ // TODO: Caching??
+
+ const result_ty_id = try self.resolveType(ptr_ty, .direct);
const mod = self.module;
if (ptr_val.isUndef(mod)) return self.spv.constUndef(result_ty_id);
@@ -1149,7 +1133,7 @@ const DeclGen = struct {
// that is not implemented by Mesa yet. Therefore, just generate it
// as a runtime operation.
try self.func.body.emit(self.spv.gpa, .OpConvertUToPtr, .{
- .id_result_type = self.typeId(result_ty_ref),
+ .id_result_type = result_ty_id,
.id_result = ptr_id,
.integer_value = try self.constant(Type.usize, Value.fromInterned(int), .direct),
});
@@ -1167,16 +1151,17 @@ const DeclGen = struct {
// TODO: Can we consolidate this in ptrElemPtr?
const elem_ty = parent_ptr_ty.elemType2(mod); // use elemType() so that we get T for *[N]T.
- const elem_ptr_ty_ref = try self.ptrType(elem_ty, self.spvStorageClass(parent_ptr_ty.ptrAddressSpace(mod)));
+ const elem_ptr_ty_id = try self.ptrType(elem_ty, self.spvStorageClass(parent_ptr_ty.ptrAddressSpace(mod)));
- if (elem_ptr_ty_ref == result_ty_ref) {
+ // TODO: Can we remove this ID comparison?
+ if (elem_ptr_ty_id == result_ty_id) {
return elem_ptr_id;
}
// This may happen when we have pointer-to-array and the result is
// another pointer-to-array instead of a pointer-to-element.
const result_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpBitcast, .{
- .id_result_type = self.typeId(result_ty_ref),
+ .id_result_type = result_ty_id,
.id_result = result_id,
.operand = elem_ptr_id,
});
@@ -1200,7 +1185,7 @@ const DeclGen = struct {
const mod = self.module;
const ip = &mod.intern_pool;
- const ty_ref = try self.resolveType(ty, .direct);
+ const ty_id = try self.resolveType(ty, .direct);
const decl_val = anon_decl.val;
const decl_ty = Type.fromInterned(ip.typeOf(decl_val));
@@ -1215,7 +1200,7 @@ const DeclGen = struct {
// const is_fn_body = decl_ty.zigTypeTag(mod) == .Fn;
if (!decl_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod)) {
// Pointer to nothing - return undefoined
- return self.spv.constUndef(self.typeId(ty_ref));
+ return self.spv.constUndef(ty_id);
}
if (decl_ty.zigTypeTag(mod) == .Fn) {
@@ -1224,14 +1209,14 @@ const DeclGen = struct {
// Anon decl refs are always generic.
assert(ty.ptrAddressSpace(mod) == .generic);
- const decl_ptr_ty_ref = try self.ptrType(decl_ty, .Generic);
+ const decl_ptr_ty_id = try self.ptrType(decl_ty, .Generic);
const ptr_id = try self.resolveAnonDecl(decl_val);
- if (decl_ptr_ty_ref != ty_ref) {
+ if (decl_ptr_ty_id != ty_id) {
// Differing pointer types, insert a cast.
const casted_ptr_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpBitcast, .{
- .id_result_type = self.typeId(ty_ref),
+ .id_result_type = ty_id,
.id_result = casted_ptr_id,
.operand = ptr_id,
});
@@ -1243,8 +1228,7 @@ const DeclGen = struct {
fn constantDeclRef(self: *DeclGen, ty: Type, decl_index: InternPool.DeclIndex) !IdRef {
const mod = self.module;
- const ty_ref = try self.resolveType(ty, .direct);
- const ty_id = self.typeId(ty_ref);
+ const ty_id = try self.resolveType(ty, .direct);
const decl = mod.declPtr(decl_index);
switch (mod.intern_pool.indexToKey(decl.val.ip_index)) {
@@ -1273,14 +1257,14 @@ const DeclGen = struct {
const final_storage_class = self.spvStorageClass(decl.@"addrspace");
try self.addFunctionDep(spv_decl_index, final_storage_class);
- const decl_ptr_ty_ref = try self.ptrType(decl.typeOf(mod), final_storage_class);
+ const decl_ptr_ty_id = try self.ptrType(decl.typeOf(mod), final_storage_class);
const ptr_id = switch (final_storage_class) {
- .Generic => try self.castToGeneric(self.typeId(decl_ptr_ty_ref), decl_id),
+ .Generic => try self.castToGeneric(decl_ptr_ty_id, decl_id),
else => decl_id,
};
- if (decl_ptr_ty_ref != ty_ref) {
+ if (decl_ptr_ty_id != ty_id) {
// Differing pointer types, insert a cast.
const casted_ptr_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpBitcast, .{
@@ -1295,36 +1279,18 @@ const DeclGen = struct {
}
// Turn a Zig type's name into a cache reference.
- fn resolveTypeName(self: *DeclGen, ty: Type) !CacheString {
+ fn resolveTypeName(self: *DeclGen, ty: Type) ![]const u8 {
var name = std.ArrayList(u8).init(self.gpa);
defer name.deinit();
try ty.print(name.writer(), self.module);
- return try self.spv.resolveString(name.items);
- }
-
- /// Turn a Zig type into a SPIR-V Type, and return its type result-id.
- fn resolveTypeId(self: *DeclGen, ty: Type) !IdResultType {
- const type_ref = try self.resolveType(ty, .direct);
- return self.spv.resultId(type_ref);
- }
-
- /// Turn a Zig type into a SPIR-V Type result-id.
- /// This function represents the "new interface", where types handled only
- /// with Type and IdResult, and CacheRef is not used. Prefer this for now.
- fn resolveType2(self: *DeclGen, ty: Type, repr: Repr) !IdResult {
- const type_ref = try self.resolveType(ty, repr);
- return self.typeId(type_ref);
- }
-
- fn typeId(self: *DeclGen, ty_ref: CacheRef) IdRef {
- return self.spv.resultId(ty_ref);
+ return try name.toOwnedSlice();
}
/// Create an integer type suitable for storing at least 'bits' bits.
/// The integer type that is returned by this function is the type that is used to perform
/// actual operations (as well as store) a Zig type of a particular number of bits. To create
/// a type with an exact size, use SpvModule.intType.
- fn intType(self: *DeclGen, signedness: std.builtin.Signedness, bits: u16) !CacheRef {
+ fn intType(self: *DeclGen, signedness: std.builtin.Signedness, bits: u16) !IdRef {
const backing_bits = self.backingIntBits(bits) orelse {
// TODO: Integers too big for any native type are represented as "composite integers":
// An array of largestSupportedIntBits.
@@ -1339,31 +1305,69 @@ const DeclGen = struct {
return self.spv.intType(.unsigned, backing_bits);
}
- fn ptrType(self: *DeclGen, child_ty: Type, storage_class: StorageClass) !CacheRef {
+ fn arrayType(self: *DeclGen, len: u32, child_ty: IdRef) !IdRef {
+ // TODO: Cache??
+ const len_id = try self.constInt(Type.u32, len, .direct);
+ const result_id = self.spv.allocId();
+
+ try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpTypeArray, .{
+ .id_result = result_id,
+ .element_type = child_ty,
+ .length = len_id,
+ });
+ return result_id;
+ }
+
+ fn ptrType(self: *DeclGen, child_ty: Type, storage_class: StorageClass) !IdRef {
const key = .{ child_ty.toIntern(), storage_class };
- const entry = try self.wip_pointers.getOrPut(self.gpa, key);
+ const entry = try self.ptr_types.getOrPut(self.gpa, key);
if (entry.found_existing) {
- const fwd_ref = entry.value_ptr.*;
- try self.spv.cache.recursive_ptrs.put(self.spv.gpa, fwd_ref, {});
- return fwd_ref;
+ const fwd_id = entry.value_ptr.ty_id;
+ if (!entry.value_ptr.fwd_emitted) {
+ try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpTypeForwardPointer, .{
+ .pointer_type = fwd_id,
+ .storage_class = storage_class,
+ });
+ entry.value_ptr.fwd_emitted = true;
+ }
+ return fwd_id;
}
- const fwd_ref = try self.spv.resolve(.{ .fwd_ptr_type = .{
- .zig_child_type = child_ty.toIntern(),
- .storage_class = storage_class,
- } });
- entry.value_ptr.* = fwd_ref;
+ const result_id = self.spv.allocId();
+ entry.value_ptr.* = .{
+ .ty_id = result_id,
+ .fwd_emitted = false,
+ };
- const child_ty_ref = try self.resolveType(child_ty, .indirect);
- _ = try self.spv.resolve(.{ .ptr_type = .{
+ const child_ty_id = try self.resolveType(child_ty, .indirect);
+
+ try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpTypePointer, .{
+ .id_result = result_id,
.storage_class = storage_class,
- .child_type = child_ty_ref,
- .fwd = fwd_ref,
- } });
+ .type = child_ty_id,
+ });
+
+ return result_id;
+ }
+
+ fn functionType(self: *DeclGen, return_ty: Type, param_types: []const Type) !IdRef {
+ // TODO: Cache??
- assert(self.wip_pointers.remove(key));
+ const param_ids = try self.gpa.alloc(IdRef, param_types.len);
+ defer self.gpa.free(param_ids);
+
+ for (param_types, param_ids) |param_ty, *param_id| {
+ param_id.* = try self.resolveType(param_ty, .direct);
+ }
- return fwd_ref;
+ const ty_id = self.spv.allocId();
+ try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpTypeFunction, .{
+ .id_result = ty_id,
+ .return_type = try self.resolveFnReturnType(return_ty),
+ .id_ref_2 = param_ids,
+ });
+
+ return ty_id;
}
/// Generate a union type. Union types are always generated with the
@@ -1384,7 +1388,7 @@ const DeclGen = struct {
/// padding: [padding_size]u8,
/// }
/// If any of the fields' size is 0, it will be omitted.
- fn resolveUnionType(self: *DeclGen, ty: Type) !CacheRef {
+ fn resolveUnionType(self: *DeclGen, ty: Type) !IdRef {
const mod = self.module;
const ip = &mod.intern_pool;
const union_obj = mod.typeToUnion(ty).?;
@@ -1399,48 +1403,43 @@ const DeclGen = struct {
return try self.resolveType(Type.fromInterned(union_obj.enum_tag_ty), .indirect);
}
- if (self.type_map.get(ty.toIntern())) |info| return info.ty_ref;
-
- var member_types: [4]CacheRef = undefined;
- var member_names: [4]CacheString = undefined;
+ var member_types: [4]IdRef = undefined;
+ var member_names: [4][]const u8 = undefined;
- const u8_ty_ref = try self.resolveType(Type.u8, .direct); // TODO: What if Int8Type is not enabled?
+ const u8_ty_id = try self.resolveType(Type.u8, .direct); // TODO: What if Int8Type is not enabled?
if (layout.tag_size != 0) {
- const tag_ty_ref = try self.resolveType(Type.fromInterned(union_obj.enum_tag_ty), .indirect);
- member_types[layout.tag_index] = tag_ty_ref;
- member_names[layout.tag_index] = try self.spv.resolveString("(tag)");
+ const tag_ty_id = try self.resolveType(Type.fromInterned(union_obj.enum_tag_ty), .indirect);
+ member_types[layout.tag_index] = tag_ty_id;
+ member_names[layout.tag_index] = "(tag)";
}
if (layout.payload_size != 0) {
- const payload_ty_ref = try self.resolveType(layout.payload_ty, .indirect);
- member_types[layout.payload_index] = payload_ty_ref;
- member_names[layout.payload_index] = try self.spv.resolveString("(payload)");
+ const payload_ty_id = try self.resolveType(layout.payload_ty, .indirect);
+ member_types[layout.payload_index] = payload_ty_id;
+ member_names[layout.payload_index] = "(payload)";
}
if (layout.payload_padding_size != 0) {
- const payload_padding_ty_ref = try self.spv.arrayType(@intCast(layout.payload_padding_size), u8_ty_ref);
- member_types[layout.payload_padding_index] = payload_padding_ty_ref;
- member_names[layout.payload_padding_index] = try self.spv.resolveString("(payload padding)");
+ const payload_padding_ty_id = try self.arrayType(@intCast(layout.payload_padding_size), u8_ty_id);
+ member_types[layout.payload_padding_index] = payload_padding_ty_id;
+ member_names[layout.payload_padding_index] = "(payload padding)";
}
if (layout.padding_size != 0) {
- const padding_ty_ref = try self.spv.arrayType(@intCast(layout.padding_size), u8_ty_ref);
- member_types[layout.padding_index] = padding_ty_ref;
- member_names[layout.padding_index] = try self.spv.resolveString("(padding)");
+ const padding_ty_id = try self.arrayType(@intCast(layout.padding_size), u8_ty_id);
+ member_types[layout.padding_index] = padding_ty_id;
+ member_names[layout.padding_index] = "(padding)";
}
- const ty_ref = try self.spv.resolve(.{ .struct_type = .{
- .name = try self.resolveTypeName(ty),
- .member_types = member_types[0..layout.total_fields],
- .member_names = member_names[0..layout.total_fields],
- } });
-
- try self.type_map.put(self.gpa, ty.toIntern(), .{ .ty_ref = ty_ref });
- return ty_ref;
+ const result_id = try self.spv.structType(member_types[0..layout.total_fields], member_names[0..layout.total_fields]);
+ const type_name = try self.resolveTypeName(ty);
+ defer self.gpa.free(type_name);
+ try self.spv.debugName(result_id, type_name);
+ return result_id;
}
- fn resolveFnReturnType(self: *DeclGen, ret_ty: Type) !CacheRef {
+ fn resolveFnReturnType(self: *DeclGen, ret_ty: Type) !IdRef {
const mod = self.module;
if (!ret_ty.hasRuntimeBitsIgnoreComptime(mod)) {
// If the return type is an error set or an error union, then we make this
@@ -1457,25 +1456,45 @@ const DeclGen = struct {
}
/// Turn a Zig type into a SPIR-V Type, and return a reference to it.
- fn resolveType(self: *DeclGen, ty: Type, repr: Repr) Error!CacheRef {
+ fn resolveType(self: *DeclGen, ty: Type, repr: Repr) Error!IdRef {
+ if (self.intern_map.get(.{ ty.toIntern(), repr })) |id| {
+ return id;
+ }
+
+ const id = try self.resolveTypeInner(ty, repr);
+ try self.intern_map.put(self.gpa, .{ ty.toIntern(), repr }, id);
+ return id;
+ }
+
+ fn resolveTypeInner(self: *DeclGen, ty: Type, repr: Repr) Error!IdRef {
const mod = self.module;
const ip = &mod.intern_pool;
log.debug("resolveType: ty = {}", .{ty.fmt(mod)});
const target = self.getTarget();
+
+ const section = &self.spv.sections.types_globals_constants;
+
switch (ty.zigTypeTag(mod)) {
.NoReturn => {
assert(repr == .direct);
- return try self.spv.resolve(.void_type);
+ return try self.spv.voidType();
},
.Void => switch (repr) {
- .direct => return try self.spv.resolve(.void_type),
+ .direct => {
+ return try self.spv.voidType();
+ },
// Pointers to void
- .indirect => return try self.spv.resolve(.{ .opaque_type = .{
- .name = try self.spv.resolveString("void"),
- } }),
+ .indirect => {
+ const result_id = self.spv.allocId();
+ try section.emit(self.spv.gpa, .OpTypeOpaque, .{
+ .id_result = result_id,
+ .literal_string = "void",
+ });
+ return result_id;
+ },
},
.Bool => switch (repr) {
- .direct => return try self.spv.resolve(.bool_type),
+ .direct => return try self.spv.boolType(),
.indirect => return try self.resolveType(Type.u1, .indirect),
},
.Int => {
@@ -1484,15 +1503,18 @@ const DeclGen = struct {
// Some times, the backend will be asked to generate a pointer to i0. OpTypeInt
// with 0 bits is invalid, so return an opaque type in this case.
assert(repr == .indirect);
- return try self.spv.resolve(.{ .opaque_type = .{
- .name = try self.spv.resolveString("u0"),
- } });
+ const result_id = self.spv.allocId();
+ try section.emit(self.spv.gpa, .OpTypeOpaque, .{
+ .id_result = result_id,
+ .literal_string = "u0",
+ });
+ return result_id;
}
return try self.intType(int_info.signedness, int_info.bits);
},
.Enum => {
const tag_ty = ty.intTagType(mod);
- return self.resolveType(tag_ty, repr);
+ return try self.resolveType(tag_ty, repr);
},
.Float => {
// We can (and want) not really emulate floating points with other floating point types like with the integer types,
@@ -1510,27 +1532,29 @@ const DeclGen = struct {
return self.fail("Floating point width of {} bits is not supported for the current SPIR-V feature set", .{bits});
}
- return try self.spv.resolve(.{ .float_type = .{ .bits = bits } });
+ return try self.spv.floatType(bits);
},
.Array => {
- if (self.type_map.get(ty.toIntern())) |info| return info.ty_ref;
-
const elem_ty = ty.childType(mod);
- const elem_ty_ref = try self.resolveType(elem_ty, .indirect);
+ const elem_ty_id = try self.resolveType(elem_ty, .indirect);
const total_len = std.math.cast(u32, ty.arrayLenIncludingSentinel(mod)) orelse {
return self.fail("array type of {} elements is too large", .{ty.arrayLenIncludingSentinel(mod)});
};
- const ty_ref = if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) blk: {
+
+ if (!elem_ty.hasRuntimeBitsIgnoreComptime(mod)) {
// The size of the array would be 0, but that is not allowed in SPIR-V.
// This path can be reached when the backend is asked to generate a pointer to
// an array of some zero-bit type. This should always be an indirect path.
assert(repr == .indirect);
// We cannot use the child type here, so just use an opaque type.
- break :blk try self.spv.resolve(.{ .opaque_type = .{
- .name = try self.spv.resolveString("zero-sized array"),
- } });
- } else if (total_len == 0) blk: {
+ const result_id = self.spv.allocId();
+ try section.emit(self.spv.gpa, .OpTypeOpaque, .{
+ .id_result = result_id,
+ .literal_string = "zero-sized array",
+ });
+ return result_id;
+ } else if (total_len == 0) {
// The size of the array would be 0, but that is not allowed in SPIR-V.
// This path can be reached for example when there is a slicing of a pointer
// that produces a zero-length array. In all cases where this type can be generated,
@@ -1540,16 +1564,13 @@ const DeclGen = struct {
// In this case, we have an array of a non-zero sized type. In this case,
// generate an array of 1 element instead, so that ptr_elem_ptr instructions
// can be lowered to ptrAccessChain instead of manually performing the math.
- break :blk try self.spv.arrayType(1, elem_ty_ref);
- } else try self.spv.arrayType(total_len, elem_ty_ref);
-
- try self.type_map.put(self.gpa, ty.toIntern(), .{ .ty_ref = ty_ref });
- return ty_ref;
+ return try self.arrayType(1, elem_ty_id);
+ } else {
+ return try self.arrayType(total_len, elem_ty_id);
+ }
},
.Fn => switch (repr) {
.direct => {
- if (self.type_map.get(ty.toIntern())) |info| return info.ty_ref;
-
const fn_info = mod.typeToFunc(ty).?;
comptime assert(zig_call_abi_ver == 3);
@@ -1562,25 +1583,28 @@ const DeclGen = struct {
if (fn_info.is_var_args)
return self.fail("VarArgs functions are unsupported for SPIR-V", .{});
- const param_ty_refs = try self.gpa.alloc(CacheRef, fn_info.param_types.len);
- defer self.gpa.free(param_ty_refs);
+ // Note: Logic is different from functionType().
+ const param_ty_ids = try self.gpa.alloc(IdRef, fn_info.param_types.len);
+ defer self.gpa.free(param_ty_ids);
var param_index: usize = 0;
for (fn_info.param_types.get(ip)) |param_ty_index| {
const param_ty = Type.fromInterned(param_ty_index);
if (!param_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
- param_ty_refs[param_index] = try self.resolveType(param_ty, .direct);
+ param_ty_ids[param_index] = try self.resolveType(param_ty, .direct);
param_index += 1;
}
- const return_ty_ref = try self.resolveFnReturnType(Type.fromInterned(fn_info.return_type));
- const ty_ref = try self.spv.resolve(.{ .function_type = .{
- .return_type = return_ty_ref,
- .parameters = param_ty_refs[0..param_index],
- } });
+ const return_ty_id = try self.resolveFnReturnType(Type.fromInterned(fn_info.return_type));
+
+ const result_id = self.spv.allocId();
+ try section.emit(self.spv.gpa, .OpTypeFunction, .{
+ .id_result = result_id,
+ .return_type = return_ty_id,
+ .id_ref_2 = param_ty_ids[0..param_index],
+ });
- try self.type_map.put(self.gpa, ty.toIntern(), .{ .ty_ref = ty_ref });
- return ty_ref;
+ return result_id;
},
.indirect => {
// TODO: Represent function pointers properly.
@@ -1591,46 +1615,35 @@ const DeclGen = struct {
.Pointer => {
const ptr_info = ty.ptrInfo(mod);
- // Note: Don't cache this pointer type, it would mess up the recursive pointer functionality
- // in ptrType()!
-
const storage_class = self.spvStorageClass(ptr_info.flags.address_space);
- const ptr_ty_ref = try self.ptrType(Type.fromInterned(ptr_info.child), storage_class);
+ const ptr_ty_id = try self.ptrType(Type.fromInterned(ptr_info.child), storage_class);
if (ptr_info.flags.size != .Slice) {
- return ptr_ty_ref;
+ return ptr_ty_id;
}
- const size_ty_ref = try self.resolveType(Type.usize, .direct);
- return self.spv.resolve(.{ .struct_type = .{
- .member_types = &.{ ptr_ty_ref, size_ty_ref },
- .member_names = &.{
- try self.spv.resolveString("ptr"),
- try self.spv.resolveString("len"),
- },
- } });
+ const size_ty_id = try self.resolveType(Type.usize, .direct);
+ return self.spv.structType(
+ &.{ ptr_ty_id, size_ty_id },
+ &.{ "ptr", "len" },
+ );
},
.Vector => {
- if (self.type_map.get(ty.toIntern())) |info| return info.ty_ref;
-
const elem_ty = ty.childType(mod);
- const elem_ty_ref = try self.resolveType(elem_ty, .indirect);
+ // TODO: Make `.direct`.
+ const elem_ty_id = try self.resolveType(elem_ty, .indirect);
const len = ty.vectorLen(mod);
- const ty_ref = if (self.isVector(ty))
- try self.spv.vectorType(len, elem_ty_ref)
- else
- try self.spv.arrayType(len, elem_ty_ref);
-
- try self.type_map.put(self.gpa, ty.toIntern(), .{ .ty_ref = ty_ref });
- return ty_ref;
+ if (self.isVector(ty)) {
+ return try self.spv.vectorType(len, elem_ty_id);
+ } else {
+ return try self.arrayType(len, elem_ty_id);
+ }
},
.Struct => {
- if (self.type_map.get(ty.toIntern())) |info| return info.ty_ref;
-
const struct_type = switch (ip.indexToKey(ty.toIntern())) {
.anon_struct_type => |tuple| {
- const member_types = try self.gpa.alloc(CacheRef, tuple.values.len);
+ const member_types = try self.gpa.alloc(IdRef, tuple.values.len);
defer self.gpa.free(member_types);
var member_index: usize = 0;
@@ -1641,13 +1654,11 @@ const DeclGen = struct {
member_index += 1;
}
- const ty_ref = try self.spv.resolve(.{ .struct_type = .{
- .name = try self.resolveTypeName(ty),
- .member_types = member_types[0..member_index],
- } });
-
- try self.type_map.put(self.gpa, ty.toIntern(), .{ .ty_ref = ty_ref });
- return ty_ref;
+ const result_id = try self.spv.structType(member_types[0..member_index], null);
+ const type_name = try self.resolveTypeName(ty);
+ defer self.gpa.free(type_name);
+ try self.spv.debugName(result_id, type_name);
+ return result_id;
},
.struct_type => ip.loadStructType(ty.toIntern()),
else => unreachable,
@@ -1657,10 +1668,10 @@ const DeclGen = struct {
return try self.resolveType(Type.fromInterned(struct_type.backingIntType(ip).*), .direct);
}
- var member_types = std.ArrayList(CacheRef).init(self.gpa);
+ var member_types = std.ArrayList(IdRef).init(self.gpa);
defer member_types.deinit();
- var member_names = std.ArrayList(CacheString).init(self.gpa);
+ var member_names = std.ArrayList([]const u8).init(self.gpa);
defer member_names.deinit();
var it = struct_type.iterateRuntimeOrder(ip);
@@ -1674,17 +1685,14 @@ const DeclGen = struct {
const field_name = struct_type.fieldName(ip, field_index).unwrap() orelse
try ip.getOrPutStringFmt(mod.gpa, "{d}", .{field_index});
try member_types.append(try self.resolveType(field_ty, .indirect));
- try member_names.append(try self.spv.resolveString(ip.stringToSlice(field_name)));
+ try member_names.append(ip.stringToSlice(field_name));
}
- const ty_ref = try self.spv.resolve(.{ .struct_type = .{
- .name = try self.resolveTypeName(ty),
- .member_types = member_types.items,
- .member_names = member_names.items,
- } });
-
- try self.type_map.put(self.gpa, ty.toIntern(), .{ .ty_ref = ty_ref });
- return ty_ref;
+ const result_id = try self.spv.structType(member_types.items, member_names.items);
+ const type_name = try self.resolveTypeName(ty);
+ defer self.gpa.free(type_name);
+ try self.spv.debugName(result_id, type_name);
+ return result_id;
},
.Optional => {
const payload_ty = ty.optionalChild(mod);
@@ -1695,77 +1703,58 @@ const DeclGen = struct {
return try self.resolveType(Type.bool, .indirect);
}
- const payload_ty_ref = try self.resolveType(payload_ty, .indirect);
+ const payload_ty_id = try self.resolveType(payload_ty, .indirect);
if (ty.optionalReprIsPayload(mod)) {
// Optional is actually a pointer or a slice.
- return payload_ty_ref;
+ return payload_ty_id;
}
- if (self.type_map.get(ty.toIntern())) |info| return info.ty_ref;
-
- const bool_ty_ref = try self.resolveType(Type.bool, .indirect);
-
- const ty_ref = try self.spv.resolve(.{ .struct_type = .{
- .member_types = &.{ payload_ty_ref, bool_ty_ref },
- .member_names = &.{
- try self.spv.resolveString("payload"),
- try self.spv.resolveString("valid"),
- },
- } });
+ const bool_ty_id = try self.resolveType(Type.bool, .indirect);
- try self.type_map.put(self.gpa, ty.toIntern(), .{ .ty_ref = ty_ref });
- return ty_ref;
+ return try self.spv.structType(
+ &.{ payload_ty_id, bool_ty_id },
+ &.{ "payload", "valid" },
+ );
},
.Union => return try self.resolveUnionType(ty),
.ErrorSet => return try self.resolveType(Type.u16, repr),
.ErrorUnion => {
const payload_ty = ty.errorUnionPayload(mod);
- const error_ty_ref = try self.resolveType(Type.anyerror, .indirect);
+ const error_ty_id = try self.resolveType(Type.anyerror, .indirect);
const eu_layout = self.errorUnionLayout(payload_ty);
if (!eu_layout.payload_has_bits) {
- return error_ty_ref;
+ return error_ty_id;
}
- if (self.type_map.get(ty.toIntern())) |info| return info.ty_ref;
-
- const payload_ty_ref = try self.resolveType(payload_ty, .indirect);
+ const payload_ty_id = try self.resolveType(payload_ty, .indirect);
- var member_types: [2]CacheRef = undefined;
- var member_names: [2]CacheString = undefined;
+ var member_types: [2]IdRef = undefined;
+ var member_names: [2][]const u8 = undefined;
if (eu_layout.error_first) {
// Put the error first
- member_types = .{ error_ty_ref, payload_ty_ref };
- member_names = .{
- try self.spv.resolveString("error"),
- try self.spv.resolveString("payload"),
- };
+ member_types = .{ error_ty_id, payload_ty_id };
+ member_names = .{ "error", "payload" };
// TODO: ABI padding?
} else {
// Put the payload first.
- member_types = .{ payload_ty_ref, error_ty_ref };
- member_names = .{
- try self.spv.resolveString("payload"),
- try self.spv.resolveString("error"),
- };
+ member_types = .{ payload_ty_id, error_ty_id };
+ member_names = .{ "payload", "error" };
// TODO: ABI padding?
}
- const ty_ref = try self.spv.resolve(.{ .struct_type = .{
- .name = try self.resolveTypeName(ty),
- .member_types = &member_types,
- .member_names = &member_names,
- } });
-
- try self.type_map.put(self.gpa, ty.toIntern(), .{ .ty_ref = ty_ref });
- return ty_ref;
+ return try self.spv.structType(&member_types, &member_names);
},
.Opaque => {
- return try self.spv.resolve(.{
- .opaque_type = .{
- .name = .none, // TODO
- },
+ const type_name = try self.resolveTypeName(ty);
+ defer self.gpa.free(type_name);
+
+ const result_id = self.spv.allocId();
+ try section.emit(self.spv.gpa, .OpTypeOpaque, .{
+ .id_result = result_id,
+ .literal_string = type_name,
});
+ return result_id;
},
.Null,
@@ -1773,9 +1762,10 @@ const DeclGen = struct {
.EnumLiteral,
.ComptimeFloat,
.ComptimeInt,
+ .Type,
=> unreachable, // Must be comptime.
- else => |tag| return self.todo("Implement zig type '{}'", .{tag}),
+ .Frame, .AnyFrame => unreachable, // TODO
}
}
@@ -1924,7 +1914,6 @@ const DeclGen = struct {
result_ty: Type,
ty: Type,
/// Always in direct representation.
- ty_ref: CacheRef,
ty_id: IdRef,
/// True if the input is an array type.
is_array: bool,
@@ -1984,14 +1973,13 @@ const DeclGen = struct {
@memset(results, undefined);
const ty = if (is_array) result_ty.scalarType(mod) else result_ty;
- const ty_ref = try self.resolveType(ty, .direct);
+ const ty_id = try self.resolveType(ty, .direct);
return .{
.dg = self,
.result_ty = result_ty,
.ty = ty,
- .ty_ref = ty_ref,
- .ty_id = self.typeId(ty_ref),
+ .ty_id = ty_id,
.is_array = is_array,
.results = results,
};
@@ -2018,16 +2006,13 @@ const DeclGen = struct {
/// TODO is to also write out the error as a function call parameter, and to somehow fetch
/// the name of an error in the text executor.
fn generateTestEntryPoint(self: *DeclGen, name: []const u8, spv_test_decl_index: SpvModule.Decl.Index) !void {
- const anyerror_ty_ref = try self.resolveType(Type.anyerror, .direct);
- const ptr_anyerror_ty_ref = try self.ptrType(Type.anyerror, .CrossWorkgroup);
- const void_ty_ref = try self.resolveType(Type.void, .direct);
-
- const kernel_proto_ty_ref = try self.spv.resolve(.{
- .function_type = .{
- .return_type = void_ty_ref,
- .parameters = &.{ptr_anyerror_ty_ref},
- },
+ const anyerror_ty_id = try self.resolveType(Type.anyerror, .direct);
+ const ptr_anyerror_ty = try self.module.ptrType(.{
+ .child = Type.anyerror.toIntern(),
+ .flags = .{ .address_space = .global },
});
+ const ptr_anyerror_ty_id = try self.resolveType(ptr_anyerror_ty, .direct);
+ const kernel_proto_ty_id = try self.functionType(Type.void, &.{ptr_anyerror_ty});
const test_id = self.spv.declPtr(spv_test_decl_index).result_id;
@@ -2039,20 +2024,20 @@ const DeclGen = struct {
const section = &self.spv.sections.functions;
try section.emit(self.spv.gpa, .OpFunction, .{
- .id_result_type = self.typeId(void_ty_ref),
+ .id_result_type = try self.resolveType(Type.void, .direct),
.id_result = kernel_id,
.function_control = .{},
- .function_type = self.typeId(kernel_proto_ty_ref),
+ .function_type = kernel_proto_ty_id,
});
try section.emit(self.spv.gpa, .OpFunctionParameter, .{
- .id_result_type = self.typeId(ptr_anyerror_ty_ref),
+ .id_result_type = ptr_anyerror_ty_id,
.id_result = p_error_id,
});
try section.emit(self.spv.gpa, .OpLabel, .{
.id_result = self.spv.allocId(),
});
try section.emit(self.spv.gpa, .OpFunctionCall, .{
- .id_result_type = self.typeId(anyerror_ty_ref),
+ .id_result_type = anyerror_ty_id,
.id_result = error_id,
.function = test_id,
});
@@ -2084,17 +2069,17 @@ const DeclGen = struct {
.func => {
assert(decl.typeOf(mod).zigTypeTag(mod) == .Fn);
const fn_info = mod.typeToFunc(decl.typeOf(mod)).?;
- const return_ty_ref = try self.resolveFnReturnType(Type.fromInterned(fn_info.return_type));
+ const return_ty_id = try self.resolveFnReturnType(Type.fromInterned(fn_info.return_type));
- const prototype_ty_ref = try self.resolveType(decl.typeOf(mod), .direct);
+ const prototype_ty_id = try self.resolveType(decl.typeOf(mod), .direct);
try self.func.prologue.emit(self.spv.gpa, .OpFunction, .{
- .id_result_type = self.typeId(return_ty_ref),
+ .id_result_type = return_ty_id,
.id_result = result_id,
.function_control = switch (fn_info.cc) {
.Inline => .{ .Inline = true },
else => .{},
},
- .function_type = self.typeId(prototype_ty_ref),
+ .function_type = prototype_ty_id,
});
comptime assert(zig_call_abi_ver == 3);
@@ -2103,7 +2088,7 @@ const DeclGen = struct {
const param_ty = Type.fromInterned(param_ty_index);
if (!param_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
- const param_type_id = try self.resolveTypeId(param_ty);
+ const param_type_id = try self.resolveType(param_ty, .direct);
const arg_result_id = self.spv.allocId();
try self.func.prologue.emit(self.spv.gpa, .OpFunctionParameter, .{
.id_result_type = param_type_id,
@@ -2159,10 +2144,10 @@ const DeclGen = struct {
const final_storage_class = self.spvStorageClass(decl.@"addrspace");
assert(final_storage_class != .Generic); // These should be instance globals
- const ptr_ty_ref = try self.ptrType(decl.typeOf(mod), final_storage_class);
+ const ptr_ty_id = try self.ptrType(decl.typeOf(mod), final_storage_class);
try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpVariable, .{
- .id_result_type = self.typeId(ptr_ty_ref),
+ .id_result_type = ptr_ty_id,
.id_result = result_id,
.storage_class = final_storage_class,
});
@@ -2182,22 +2167,18 @@ const DeclGen = struct {
try self.spv.declareDeclDeps(spv_decl_index, &.{});
- const ptr_ty_ref = try self.ptrType(decl.typeOf(mod), .Function);
+ const ptr_ty_id = try self.ptrType(decl.typeOf(mod), .Function);
if (maybe_init_val) |init_val| {
// TODO: Combine with resolveAnonDecl?
- const void_ty_ref = try self.resolveType(Type.void, .direct);
- const initializer_proto_ty_ref = try self.spv.resolve(.{ .function_type = .{
- .return_type = void_ty_ref,
- .parameters = &.{},
- } });
+ const initializer_proto_ty_id = try self.functionType(Type.void, &.{});
const initializer_id = self.spv.allocId();
try self.func.prologue.emit(self.spv.gpa, .OpFunction, .{
- .id_result_type = self.typeId(void_ty_ref),
+ .id_result_type = try self.resolveType(Type.void, .direct),
.id_result = initializer_id,
.function_control = .{},
- .function_type = self.typeId(initializer_proto_ty_ref),
+ .function_type = initializer_proto_ty_id,
});
const root_block_id = self.spv.allocId();
@@ -2220,7 +2201,7 @@ const DeclGen = struct {
try self.spv.debugNameFmt(initializer_id, "initializer of {s}", .{fqn});
try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpExtInst, .{
- .id_result_type = self.typeId(ptr_ty_ref),
+ .id_result_type = ptr_ty_id,
.id_result = result_id,
.set = try self.spv.importInstructionSet(.zig),
.instruction = .{ .inst = 0 }, // TODO: Put this definition somewhere...
@@ -2228,7 +2209,7 @@ const DeclGen = struct {
});
} else {
try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpExtInst, .{
- .id_result_type = self.typeId(ptr_ty_ref),
+ .id_result_type = ptr_ty_id,
.id_result = result_id,
.set = try self.spv.importInstructionSet(.zig),
.instruction = .{ .inst = 0 }, // TODO: Put this definition somewhere...
@@ -2244,7 +2225,7 @@ const DeclGen = struct {
const one_id = try self.constInt(ty, 1, .direct);
const result_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpSelect, .{
- .id_result_type = try self.resolveType2(ty, .direct),
+ .id_result_type = try self.resolveType(ty, .direct),
.id_result = result_id,
.condition = condition_id,
.object_1 = one_id,
@@ -2261,7 +2242,7 @@ const DeclGen = struct {
.Bool => blk: {
const result_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpINotEqual, .{
- .id_result_type = try self.resolveType2(Type.bool, .direct),
+ .id_result_type = try self.resolveType(Type.bool, .direct),
.id_result = result_id,
.operand_1 = operand_id,
.operand_2 = try self.constBool(false, .indirect),
@@ -2283,11 +2264,11 @@ const DeclGen = struct {
}
fn extractField(self: *DeclGen, result_ty: Type, object: IdRef, field: u32) !IdRef {
- const result_ty_ref = try self.resolveType(result_ty, .indirect);
+ const result_ty_id = try self.resolveType(result_ty, .indirect);
const result_id = self.spv.allocId();
const indexes = [_]u32{field};
try self.func.body.emit(self.spv.gpa, .OpCompositeExtract, .{
- .id_result_type = self.typeId(result_ty_ref),
+ .id_result_type = result_ty_id,
.id_result = result_id,
.composite = object,
.indexes = &indexes,
@@ -2301,13 +2282,13 @@ const DeclGen = struct {
};
fn load(self: *DeclGen, value_ty: Type, ptr_id: IdRef, options: MemoryOptions) !IdRef {
- const indirect_value_ty_ref = try self.resolveType(value_ty, .indirect);
+ const indirect_value_ty_id = try self.resolveType(value_ty, .indirect);
const result_id = self.spv.allocId();
const access = spec.MemoryAccess.Extended{
.Volatile = options.is_volatile,
};
try self.func.body.emit(self.spv.gpa, .OpLoad, .{
- .id_result_type = self.typeId(indirect_value_ty_ref),
+ .id_result_type = indirect_value_ty_id,
.id_result = result_id,
.pointer = ptr_id,
.memory_access = access,
@@ -2519,7 +2500,8 @@ const DeclGen = struct {
const result_ty = self.typeOfIndex(inst);
const shift_ty = self.typeOf(bin_op.rhs);
- const shift_ty_ref = try self.resolveType(shift_ty, .direct);
+ const scalar_result_ty_id = try self.resolveType(result_ty.scalarType(mod), .direct);
+ const scalar_shift_ty_id = try self.resolveType(shift_ty.scalarType(mod), .direct);
const info = self.arithmeticTypeInfo(result_ty);
switch (info.class) {
@@ -2536,7 +2518,7 @@ const DeclGen = struct {
// Sometimes Zig doesn't make both of the arguments the same types here. SPIR-V expects that,
// so just manually upcast it if required.
- const shift_id = if (shift_ty_ref != wip.ty_ref) blk: {
+ const shift_id = if (scalar_shift_ty_id != scalar_result_ty_id) blk: {
const shift_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpUConvert, .{
.id_result_type = wip.ty_id,
@@ -2663,7 +2645,7 @@ const DeclGen = struct {
const result_id = self.spv.allocId();
const mask_id = try self.constInt(ty, mask_value, .direct);
try self.func.body.emit(self.spv.gpa, .OpBitwiseAnd, .{
- .id_result_type = try self.resolveType2(ty, .direct),
+ .id_result_type = try self.resolveType(ty, .direct),
.id_result = result_id,
.operand_1 = value_id,
.operand_2 = mask_id,
@@ -2675,14 +2657,14 @@ const DeclGen = struct {
const shift_amt_id = try self.constInt(ty, info.backing_bits - info.bits, .direct);
const left_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpShiftLeftLogical, .{
- .id_result_type = try self.resolveType2(ty, .direct),
+ .id_result_type = try self.resolveType(ty, .direct),
.id_result = left_id,
.base = value_id,
.shift = shift_amt_id,
});
const right_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpShiftRightArithmetic, .{
- .id_result_type = try self.resolveType2(ty, .direct),
+ .id_result_type = try self.resolveType(ty, .direct),
.id_result = right_id,
.base = left_id,
.shift = shift_amt_id,
@@ -2698,7 +2680,7 @@ const DeclGen = struct {
const lhs_id = try self.resolve(bin_op.lhs);
const rhs_id = try self.resolve(bin_op.rhs);
const ty = self.typeOfIndex(inst);
- const ty_id = try self.resolveType2(ty, .direct);
+ const ty_id = try self.resolveType(ty, .direct);
const info = self.arithmeticTypeInfo(ty);
switch (info.class) {
.composite_integer => unreachable, // TODO
@@ -2759,7 +2741,7 @@ const DeclGen = struct {
fn floor(self: *DeclGen, ty: Type, operand_id: IdRef) !IdRef {
const target = self.getTarget();
- const ty_ref = try self.resolveType(ty, .direct);
+ const ty_id = try self.resolveType(ty, .direct);
const ext_inst: Word = switch (target.os.tag) {
.opencl => 25,
.vulkan => 8,
@@ -2773,7 +2755,7 @@ const DeclGen = struct {
const result_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpExtInst, .{
- .id_result_type = self.typeId(ty_ref),
+ .id_result_type = ty_id,
.id_result = result_id,
.set = set_id,
.instruction = .{ .inst = ext_inst },
@@ -2928,11 +2910,12 @@ const DeclGen = struct {
const operand_ty = self.typeOf(extra.lhs);
const ov_ty = result_ty.structFieldType(1, self.module);
- const bool_ty_ref = try self.resolveType(Type.bool, .direct);
- const cmp_ty_ref = if (self.isVector(operand_ty))
- try self.spv.vectorType(operand_ty.vectorLen(mod), bool_ty_ref)
+ const bool_ty_id = try self.resolveType(Type.bool, .direct);
+ const cmp_ty_id = if (self.isVector(operand_ty))
+ // TODO: Resolving a vector type with .direct should return a SPIR-V vector
+ try self.spv.vectorType(operand_ty.vectorLen(mod), try self.resolveType(Type.bool, .direct))
else
- bool_ty_ref;
+ bool_ty_id;
const info = self.arithmeticTypeInfo(operand_ty);
switch (info.class) {
@@ -2968,7 +2951,7 @@ const DeclGen = struct {
// For subtraction the conditions need to be swapped.
const overflowed_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, ucmp, .{
- .id_result_type = self.typeId(cmp_ty_ref),
+ .id_result_type = cmp_ty_id,
.id_result = overflowed_id,
.operand_1 = result_id.*,
.operand_2 = lhs_elem_id,
@@ -2996,7 +2979,7 @@ const DeclGen = struct {
const rhs_lt_zero_id = self.spv.allocId();
const zero_id = try self.constInt(wip_result.ty, 0, .direct);
try self.func.body.emit(self.spv.gpa, .OpSLessThan, .{
- .id_result_type = self.typeId(cmp_ty_ref),
+ .id_result_type = cmp_ty_id,
.id_result = rhs_lt_zero_id,
.operand_1 = rhs_elem_id,
.operand_2 = zero_id,
@@ -3004,7 +2987,7 @@ const DeclGen = struct {
const value_gt_lhs_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, scmp, .{
- .id_result_type = self.typeId(cmp_ty_ref),
+ .id_result_type = cmp_ty_id,
.id_result = value_gt_lhs_id,
.operand_1 = lhs_elem_id,
.operand_2 = result_id.*,
@@ -3012,7 +2995,7 @@ const DeclGen = struct {
const overflowed_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpLogicalEqual, .{
- .id_result_type = self.typeId(cmp_ty_ref),
+ .id_result_type = cmp_ty_id,
.id_result = overflowed_id,
.operand_1 = rhs_lt_zero_id,
.operand_2 = value_gt_lhs_id,
@@ -3096,15 +3079,17 @@ const DeclGen = struct {
const result_ty = self.typeOfIndex(inst);
const operand_ty = self.typeOf(extra.lhs);
const shift_ty = self.typeOf(extra.rhs);
- const shift_ty_ref = try self.resolveType(shift_ty, .direct);
+ const scalar_shift_ty_id = try self.resolveType(shift_ty.scalarType(mod), .direct);
+ const scalar_operand_ty_id = try self.resolveType(operand_ty.scalarType(mod), .direct);
const ov_ty = result_ty.structFieldType(1, self.module);
- const bool_ty_ref = try self.resolveType(Type.bool, .direct);
- const cmp_ty_ref = if (self.isVector(operand_ty))
- try self.spv.vectorType(operand_ty.vectorLen(mod), bool_ty_ref)
+ const bool_ty_id = try self.resolveType(Type.bool, .direct);
+ const cmp_ty_id = if (self.isVector(operand_ty))
+ // TODO: Resolving a vector type with .direct should return a SPIR-V vector
+ try self.spv.vectorType(operand_ty.vectorLen(mod), try self.resolveType(Type.bool, .direct))
else
- bool_ty_ref;
+ bool_ty_id;
const info = self.arithmeticTypeInfo(operand_ty);
switch (info.class) {
@@ -3123,7 +3108,7 @@ const DeclGen = struct {
// Sometimes Zig doesn't make both of the arguments the same types here. SPIR-V expects that,
// so just manually upcast it if required.
- const shift_id = if (shift_ty_ref != wip_result.ty_ref) blk: {
+ const shift_id = if (scalar_shift_ty_id != scalar_operand_ty_id) blk: {
const shift_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpUConvert, .{
.id_result_type = wip_result.ty_id,
@@ -3164,7 +3149,7 @@ const DeclGen = struct {
const overflowed_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpINotEqual, .{
- .id_result_type = self.typeId(cmp_ty_ref),
+ .id_result_type = cmp_ty_id,
.id_result = overflowed_id,
.operand_1 = lhs_elem_id,
.operand_2 = right_shift_id,
@@ -3235,8 +3220,7 @@ const DeclGen = struct {
defer wip.deinit();
const elem_ty = if (wip.is_array) operand_ty.scalarType(mod) else operand_ty;
- const elem_ty_ref = try self.resolveType(elem_ty, .direct);
- const elem_ty_id = self.typeId(elem_ty_ref);
+ const elem_ty_id = try self.resolveType(elem_ty, .direct);
for (wip.results, 0..) |*result_id, i| {
const elem = try wip.elementAt(operand_ty, operand, i);
@@ -3261,6 +3245,8 @@ const DeclGen = struct {
.id_ref_4 = &.{elem},
});
+ // TODO: Comparison should be removed..
+ // Its valid because SpvModule caches numeric types
if (wip.ty_id == elem_ty_id) {
result_id.* = tmp;
continue;
@@ -3307,8 +3293,7 @@ const DeclGen = struct {
const operand = try self.resolve(reduce.operand);
const operand_ty = self.typeOf(reduce.operand);
const scalar_ty = operand_ty.scalarType(mod);
- const scalar_ty_ref = try self.resolveType(scalar_ty, .direct);
- const scalar_ty_id = self.typeId(scalar_ty_ref);
+ const scalar_ty_id = try self.resolveType(scalar_ty, .direct);
const info = self.arithmeticTypeInfo(operand_ty);
@@ -3408,13 +3393,13 @@ const DeclGen = struct {
fn accessChainId(
self: *DeclGen,
- result_ty_ref: CacheRef,
+ result_ty_id: IdRef,
base: IdRef,
indices: []const IdRef,
) !IdRef {
const result_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpInBoundsAccessChain, .{
- .id_result_type = self.typeId(result_ty_ref),
+ .id_result_type = result_ty_id,
.id_result = result_id,
.base = base,
.indexes = indices,
@@ -3428,18 +3413,18 @@ const DeclGen = struct {
/// is the latter and PtrAccessChain is the former.
fn accessChain(
self: *DeclGen,
- result_ty_ref: CacheRef,
+ result_ty_id: IdRef,
base: IdRef,
indices: []const u32,
) !IdRef {
const ids = try self.indicesToIds(indices);
defer self.gpa.free(ids);
- return try self.accessChainId(result_ty_ref, base, ids);
+ return try self.accessChainId(result_ty_id, base, ids);
}
fn ptrAccessChain(
self: *DeclGen,
- result_ty_ref: CacheRef,
+ result_ty_id: IdRef,
base: IdRef,
element: IdRef,
indices: []const u32,
@@ -3449,7 +3434,7 @@ const DeclGen = struct {
const result_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpInBoundsPtrAccessChain, .{
- .id_result_type = self.typeId(result_ty_ref),
+ .id_result_type = result_ty_id,
.id_result = result_id,
.base = base,
.element = element,
@@ -3460,21 +3445,21 @@ const DeclGen = struct {
fn ptrAdd(self: *DeclGen, result_ty: Type, ptr_ty: Type, ptr_id: IdRef, offset_id: IdRef) !IdRef {
const mod = self.module;
- const result_ty_ref = try self.resolveType(result_ty, .direct);
+ const result_ty_id = try self.resolveType(result_ty, .direct);
switch (ptr_ty.ptrSize(mod)) {
.One => {
// Pointer to array
// TODO: Is this correct?
- return try self.accessChainId(result_ty_ref, ptr_id, &.{offset_id});
+ return try self.accessChainId(result_ty_id, ptr_id, &.{offset_id});
},
.C, .Many => {
- return try self.ptrAccessChain(result_ty_ref, ptr_id, offset_id, &.{});
+ return try self.ptrAccessChain(result_ty_id, ptr_id, offset_id, &.{});
},
.Slice => {
// TODO: This is probably incorrect. A slice should be returned here, though this is what llvm does.
const slice_ptr_id = try self.extractField(result_ty, ptr_id, 0);
- return try self.ptrAccessChain(result_ty_ref, slice_ptr_id, offset_id, &.{});
+ return try self.ptrAccessChain(result_ty_id, slice_ptr_id, offset_id, &.{});
},
}
}
@@ -3497,12 +3482,12 @@ const DeclGen = struct {
const ptr_ty = self.typeOf(bin_op.lhs);
const offset_id = try self.resolve(bin_op.rhs);
const offset_ty = self.typeOf(bin_op.rhs);
- const offset_ty_ref = try self.resolveType(offset_ty, .direct);
+ const offset_ty_id = try self.resolveType(offset_ty, .direct);
const result_ty = self.typeOfIndex(inst);
const negative_offset_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpSNegate, .{
- .id_result_type = self.typeId(offset_ty_ref),
+ .id_result_type = offset_ty_id,
.id_result = negative_offset_id,
.operand = offset_id,
});
@@ -3520,7 +3505,7 @@ const DeclGen = struct {
const mod = self.module;
var cmp_lhs_id = lhs_id;
var cmp_rhs_id = rhs_id;
- const bool_ty_ref = try self.resolveType(Type.bool, .direct);
+ const bool_ty_id = try self.resolveType(Type.bool, .direct);
const op_ty = switch (ty.zigTypeTag(mod)) {
.Int, .Bool, .Float => ty,
.Enum => ty.intTagType(mod),
@@ -3532,7 +3517,7 @@ const DeclGen = struct {
cmp_lhs_id = self.spv.allocId();
cmp_rhs_id = self.spv.allocId();
- const usize_ty_id = try self.resolveType2(Type.usize, .direct);
+ const usize_ty_id = try self.resolveType(Type.usize, .direct);
try self.func.body.emit(self.spv.gpa, .OpConvertPtrToU, .{
.id_result_type = usize_ty_id,
@@ -3594,20 +3579,20 @@ const DeclGen = struct {
const pl_eq_id = try self.cmp(op, Type.bool, payload_ty, lhs_pl_id, rhs_pl_id);
const lhs_not_valid_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpLogicalNot, .{
- .id_result_type = self.typeId(bool_ty_ref),
+ .id_result_type = bool_ty_id,
.id_result = lhs_not_valid_id,
.operand = lhs_valid_id,
});
const impl_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpLogicalOr, .{
- .id_result_type = self.typeId(bool_ty_ref),
+ .id_result_type = bool_ty_id,
.id_result = impl_id,
.operand_1 = lhs_not_valid_id,
.operand_2 = pl_eq_id,
});
const result_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpLogicalAnd, .{
- .id_result_type = self.typeId(bool_ty_ref),
+ .id_result_type = bool_ty_id,
.id_result = result_id,
.operand_1 = valid_eq_id,
.operand_2 = impl_id,
@@ -3620,14 +3605,14 @@ const DeclGen = struct {
const impl_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpLogicalAnd, .{
- .id_result_type = self.typeId(bool_ty_ref),
+ .id_result_type = bool_ty_id,
.id_result = impl_id,
.operand_1 = lhs_valid_id,
.operand_2 = pl_neq_id,
});
const result_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpLogicalOr, .{
- .id_result_type = self.typeId(bool_ty_ref),
+ .id_result_type = bool_ty_id,
.id_result = result_id,
.operand_1 = valid_neq_id,
.operand_2 = impl_id,
@@ -3695,7 +3680,7 @@ const DeclGen = struct {
const result_id = self.spv.allocId();
try self.func.body.emitRaw(self.spv.gpa, opcode, 4);
- self.func.body.writeOperand(spec.IdResultType, self.typeId(bool_ty_ref));
+ self.func.body.writeOperand(spec.IdResultType, bool_ty_id);
self.func.body.writeOperand(spec.IdResult, result_id);
self.func.body.writeOperand(spec.IdResultType, cmp_lhs_id);
self.func.body.writeOperand(spec.IdResultType, cmp_rhs_id);
@@ -3728,6 +3713,7 @@ const DeclGen = struct {
return try self.cmp(op, result_ty, ty, lhs_id, rhs_id);
}
+ /// Bitcast one type to another. Note: both types, input, output are expected in **direct** representation.
fn bitCast(
self: *DeclGen,
dst_ty: Type,
@@ -3735,13 +3721,11 @@ const DeclGen = struct {
src_id: IdRef,
) !IdRef {
const mod = self.module;
- const src_ty_ref = try self.resolveType(src_ty, .direct);
- const dst_ty_ref = try self.resolveType(dst_ty, .direct);
- const src_key = self.spv.cache.lookup(src_ty_ref);
- const dst_key = self.spv.cache.lookup(dst_ty_ref);
+ const src_ty_id = try self.resolveType(src_ty, .direct);
+ const dst_ty_id = try self.resolveType(dst_ty, .direct);
const result_id = blk: {
- if (src_ty_ref == dst_ty_ref) {
+ if (src_ty_id == dst_ty_id) {
break :blk src_id;
}
@@ -3751,7 +3735,7 @@ const DeclGen = struct {
if (src_ty.zigTypeTag(mod) == .Int and dst_ty.isPtrAtRuntime(mod)) {
const result_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpConvertUToPtr, .{
- .id_result_type = self.typeId(dst_ty_ref),
+ .id_result_type = dst_ty_id,
.id_result = result_id,
.integer_value = src_id,
});
@@ -3761,10 +3745,11 @@ const DeclGen = struct {
// We can only use OpBitcast for specific conversions: between numerical types, and
// between pointers. If the resolved spir-v types fall into this category then emit OpBitcast,
// otherwise use a temporary and perform a pointer cast.
- if ((src_key.isNumericalType() and dst_key.isNumericalType()) or (src_key == .ptr_type and dst_key == .ptr_type)) {
+ const can_bitcast = (src_ty.isNumeric(mod) and dst_ty.isNumeric(mod)) or (src_ty.isPtrAtRuntime(mod) and dst_ty.isPtrAtRuntime(mod));
+ if (can_bitcast) {
const result_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpBitcast, .{
- .id_result_type = self.typeId(dst_ty_ref),
+ .id_result_type = dst_ty_id,
.id_result = result_id,
.operand = src_id,
});
@@ -3772,13 +3757,13 @@ const DeclGen = struct {
break :blk result_id;
}
- const dst_ptr_ty_ref = try self.ptrType(dst_ty, .Function);
+ const dst_ptr_ty_id = try self.ptrType(dst_ty, .Function);
const tmp_id = try self.alloc(src_ty, .{ .storage_class = .Function });
try self.store(src_ty, tmp_id, src_id, .{});
const casted_ptr_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpBitcast, .{
- .id_result_type = self.typeId(dst_ptr_ty_ref),
+ .id_result_type = dst_ptr_ty_id,
.id_result = casted_ptr_id,
.operand = tmp_id,
});
@@ -3850,7 +3835,7 @@ const DeclGen = struct {
}
fn intFromPtr(self: *DeclGen, operand_id: IdRef) !IdRef {
- const result_type_id = try self.resolveTypeId(Type.usize);
+ const result_type_id = try self.resolveType(Type.usize, .direct);
const result_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpConvertPtrToU, .{
.id_result_type = result_type_id,
@@ -3871,21 +3856,21 @@ const DeclGen = struct {
const operand_ty = self.typeOf(ty_op.operand);
const operand_id = try self.resolve(ty_op.operand);
const result_ty = self.typeOfIndex(inst);
- const result_ty_ref = try self.resolveType(result_ty, .direct);
- return try self.floatFromInt(result_ty_ref, operand_ty, operand_id);
+ return try self.floatFromInt(result_ty, operand_ty, operand_id);
}
- fn floatFromInt(self: *DeclGen, result_ty_ref: CacheRef, operand_ty: Type, operand_id: IdRef) !IdRef {
+ fn floatFromInt(self: *DeclGen, result_ty: Type, operand_ty: Type, operand_id: IdRef) !IdRef {
const operand_info = self.arithmeticTypeInfo(operand_ty);
const result_id = self.spv.allocId();
+ const result_ty_id = try self.resolveType(result_ty, .direct);
switch (operand_info.signedness) {
.signed => try self.func.body.emit(self.spv.gpa, .OpConvertSToF, .{
- .id_result_type = self.typeId(result_ty_ref),
+ .id_result_type = result_ty_id,
.id_result = result_id,
.signed_value = operand_id,
}),
.unsigned => try self.func.body.emit(self.spv.gpa, .OpConvertUToF, .{
- .id_result_type = self.typeId(result_ty_ref),
+ .id_result_type = result_ty_id,
.id_result = result_id,
.unsigned_value = operand_id,
}),
@@ -3902,16 +3887,16 @@ const DeclGen = struct {
fn intFromFloat(self: *DeclGen, result_ty: Type, operand_id: IdRef) !IdRef {
const result_info = self.arithmeticTypeInfo(result_ty);
- const result_ty_ref = try self.resolveType(result_ty, .direct);
+ const result_ty_id = try self.resolveType(result_ty, .direct);
const result_id = self.spv.allocId();
switch (result_info.signedness) {
.signed => try self.func.body.emit(self.spv.gpa, .OpConvertFToS, .{
- .id_result_type = self.typeId(result_ty_ref),
+ .id_result_type = result_ty_id,
.id_result = result_id,
.float_value = operand_id,
}),
.unsigned => try self.func.body.emit(self.spv.gpa, .OpConvertFToU, .{
- .id_result_type = self.typeId(result_ty_ref),
+ .id_result_type = result_ty_id,
.id_result = result_id,
.float_value = operand_id,
}),
@@ -3937,7 +3922,7 @@ const DeclGen = struct {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand_id = try self.resolve(ty_op.operand);
const dest_ty = self.typeOfIndex(inst);
- const dest_ty_id = try self.resolveTypeId(dest_ty);
+ const dest_ty_id = try self.resolveType(dest_ty, .direct);
const result_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpFConvert, .{
@@ -3987,7 +3972,7 @@ const DeclGen = struct {
const slice_ty = self.typeOfIndex(inst);
const elem_ptr_ty = slice_ty.slicePtrFieldType(mod);
- const elem_ptr_ty_ref = try self.resolveType(elem_ptr_ty, .direct);
+ const elem_ptr_ty_id = try self.resolveType(elem_ptr_ty, .direct);
const array_ptr_id = try self.resolve(ty_op.operand);
const len_id = try self.constInt(Type.usize, array_ty.arrayLen(mod), .direct);
@@ -3997,7 +3982,7 @@ const DeclGen = struct {
try self.bitCast(elem_ptr_ty, array_ptr_ty, array_ptr_id)
else
// Convert the pointer-to-array to a pointer to the first element.
- try self.accessChain(elem_ptr_ty_ref, array_ptr_id, &.{0});
+ try self.accessChain(elem_ptr_ty_id, array_ptr_id, &.{0});
return try self.constructStruct(
slice_ty,
@@ -4171,10 +4156,10 @@ const DeclGen = struct {
const index_id = try self.resolve(bin_op.rhs);
const ptr_ty = self.typeOfIndex(inst);
- const ptr_ty_ref = try self.resolveType(ptr_ty, .direct);
+ const ptr_ty_id = try self.resolveType(ptr_ty, .direct);
const slice_ptr = try self.extractField(ptr_ty, slice_id, 0);
- return try self.ptrAccessChain(ptr_ty_ref, slice_ptr, index_id, &.{});
+ return try self.ptrAccessChain(ptr_ty_id, slice_ptr, index_id, &.{});
}
fn airSliceElemVal(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
@@ -4187,10 +4172,10 @@ const DeclGen = struct {
const index_id = try self.resolve(bin_op.rhs);
const ptr_ty = slice_ty.slicePtrFieldType(mod);
- const ptr_ty_ref = try self.resolveType(ptr_ty, .direct);
+ const ptr_ty_id = try self.resolveType(ptr_ty, .direct);
const slice_ptr = try self.extractField(ptr_ty, slice_id, 0);
- const elem_ptr = try self.ptrAccessChain(ptr_ty_ref, slice_ptr, index_id, &.{});
+ const elem_ptr = try self.ptrAccessChain(ptr_ty_id, slice_ptr, index_id, &.{});
return try self.load(slice_ty.childType(mod), elem_ptr, .{ .is_volatile = slice_ty.isVolatilePtr(mod) });
}
@@ -4198,14 +4183,14 @@ const DeclGen = struct {
const mod = self.module;
// Construct new pointer type for the resulting pointer
const elem_ty = ptr_ty.elemType2(mod); // use elemType() so that we get T for *[N]T.
- const elem_ptr_ty_ref = try self.ptrType(elem_ty, self.spvStorageClass(ptr_ty.ptrAddressSpace(mod)));
+ const elem_ptr_ty_id = try self.ptrType(elem_ty, self.spvStorageClass(ptr_ty.ptrAddressSpace(mod)));
if (ptr_ty.isSinglePointer(mod)) {
// Pointer-to-array. In this case, the resulting pointer is not of the same type
// as the ptr_ty (we want a *T, not a *[N]T), and hence we need to use accessChain.
- return try self.accessChainId(elem_ptr_ty_ref, ptr_id, &.{index_id});
+ return try self.accessChainId(elem_ptr_ty_id, ptr_id, &.{index_id});
} else {
// Resulting pointer type is the same as the ptr_ty, so use ptrAccessChain
- return try self.ptrAccessChain(elem_ptr_ty_ref, ptr_id, index_id, &.{});
+ return try self.ptrAccessChain(elem_ptr_ty_id, ptr_id, index_id, &.{});
}
}
@@ -4238,11 +4223,11 @@ const DeclGen = struct {
// For now, just generate a temporary and use that.
// TODO: This backend probably also should use isByRef from llvm...
- const elem_ptr_ty_ref = try self.ptrType(elem_ty, .Function);
+ const elem_ptr_ty_id = try self.ptrType(elem_ty, .Function);
const tmp_id = try self.alloc(array_ty, .{ .storage_class = .Function });
try self.store(array_ty, tmp_id, array_id, .{});
- const elem_ptr_id = try self.accessChainId(elem_ptr_ty_ref, tmp_id, &.{index_id});
+ const elem_ptr_id = try self.accessChainId(elem_ptr_ty_id, tmp_id, &.{index_id});
return try self.load(elem_ty, elem_ptr_id, .{});
}
@@ -4267,13 +4252,13 @@ const DeclGen = struct {
const scalar_ty = vector_ty.scalarType(mod);
const storage_class = self.spvStorageClass(vector_ptr_ty.ptrAddressSpace(mod));
- const scalar_ptr_ty_ref = try self.ptrType(scalar_ty, storage_class);
+ const scalar_ptr_ty_id = try self.ptrType(scalar_ty, storage_class);
const vector_ptr = try self.resolve(data.vector_ptr);
const index = try self.resolve(extra.lhs);
const operand = try self.resolve(extra.rhs);
- const elem_ptr_id = try self.accessChainId(scalar_ptr_ty_ref, vector_ptr, &.{index});
+ const elem_ptr_id = try self.accessChainId(scalar_ptr_ty_id, vector_ptr, &.{index});
try self.store(scalar_ty, elem_ptr_id, operand, .{
.is_volatile = vector_ptr_ty.isVolatilePtr(mod),
});
@@ -4289,7 +4274,7 @@ const DeclGen = struct {
if (layout.tag_size == 0) return;
const tag_ty = un_ty.unionTagTypeSafety(mod).?;
- const tag_ptr_ty_ref = try self.ptrType(tag_ty, self.spvStorageClass(un_ptr_ty.ptrAddressSpace(mod)));
+ const tag_ptr_ty_id = try self.ptrType(tag_ty, self.spvStorageClass(un_ptr_ty.ptrAddressSpace(mod)));
const union_ptr_id = try self.resolve(bin_op.lhs);
const new_tag_id = try self.resolve(bin_op.rhs);
@@ -4297,7 +4282,7 @@ const DeclGen = struct {
if (!layout.has_payload) {
try self.store(tag_ty, union_ptr_id, new_tag_id, .{ .is_volatile = un_ptr_ty.isVolatilePtr(mod) });
} else {
- const ptr_id = try self.accessChain(tag_ptr_ty_ref, union_ptr_id, &.{layout.tag_index});
+ const ptr_id = try self.accessChain(tag_ptr_ty_id, union_ptr_id, &.{layout.tag_index});
try self.store(tag_ty, ptr_id, new_tag_id, .{ .is_volatile = un_ptr_ty.isVolatilePtr(mod) });
}
}
@@ -4353,20 +4338,20 @@ const DeclGen = struct {
const tmp_id = try self.alloc(ty, .{ .storage_class = .Function });
if (layout.tag_size != 0) {
- const tag_ptr_ty_ref = try self.ptrType(tag_ty, .Function);
- const ptr_id = try self.accessChain(tag_ptr_ty_ref, tmp_id, &.{@as(u32, @intCast(layout.tag_index))});
+ const tag_ptr_ty_id = try self.ptrType(tag_ty, .Function);
+ const ptr_id = try self.accessChain(tag_ptr_ty_id, tmp_id, &.{@as(u32, @intCast(layout.tag_index))});
const tag_id = try self.constInt(tag_ty, tag_int, .direct);
try self.store(tag_ty, ptr_id, tag_id, .{});
}
const payload_ty = Type.fromInterned(union_ty.field_types.get(ip)[active_field]);
if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
- const pl_ptr_ty_ref = try self.ptrType(layout.payload_ty, .Function);
- const pl_ptr_id = try self.accessChain(pl_ptr_ty_ref, tmp_id, &.{layout.payload_index});
- const active_pl_ptr_ty_ref = try self.ptrType(payload_ty, .Function);
+ const pl_ptr_ty_id = try self.ptrType(layout.payload_ty, .Function);
+ const pl_ptr_id = try self.accessChain(pl_ptr_ty_id, tmp_id, &.{layout.payload_index});
+ const active_pl_ptr_ty_id = try self.ptrType(payload_ty, .Function);
const active_pl_ptr_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpBitcast, .{
- .id_result_type = self.typeId(active_pl_ptr_ty_ref),
+ .id_result_type = active_pl_ptr_ty_id,
.id_result = active_pl_ptr_id,
.operand = pl_ptr_id,
});
@@ -4425,13 +4410,13 @@ const DeclGen = struct {
const tmp_id = try self.alloc(object_ty, .{ .storage_class = .Function });
try self.store(object_ty, tmp_id, object_id, .{});
- const pl_ptr_ty_ref = try self.ptrType(layout.payload_ty, .Function);
- const pl_ptr_id = try self.accessChain(pl_ptr_ty_ref, tmp_id, &.{layout.payload_index});
+ const pl_ptr_ty_id = try self.ptrType(layout.payload_ty, .Function);
+ const pl_ptr_id = try self.accessChain(pl_ptr_ty_id, tmp_id, &.{layout.payload_index});
- const active_pl_ptr_ty_ref = try self.ptrType(field_ty, .Function);
+ const active_pl_ptr_ty_id = try self.ptrType(field_ty, .Function);
const active_pl_ptr_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpBitcast, .{
- .id_result_type = self.typeId(active_pl_ptr_ty_ref),
+ .id_result_type = active_pl_ptr_ty_id,
.id_result = active_pl_ptr_id,
.operand = pl_ptr_id,
});
@@ -4448,7 +4433,7 @@ const DeclGen = struct {
const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
const parent_ty = ty_pl.ty.toType().childType(mod);
- const res_ty = try self.resolveType(ty_pl.ty.toType(), .indirect);
+ const result_ty_id = try self.resolveType(ty_pl.ty.toType(), .indirect);
const field_ptr = try self.resolve(extra.field_ptr);
const field_ptr_int = try self.intFromPtr(field_ptr);
@@ -4463,7 +4448,7 @@ const DeclGen = struct {
const base_ptr = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpConvertUToPtr, .{
- .id_result_type = self.spv.resultId(res_ty),
+ .id_result_type = result_ty_id,
.id_result = base_ptr,
.integer_value = base_ptr_int,
});
@@ -4478,7 +4463,7 @@ const DeclGen = struct {
object_ptr: IdRef,
field_index: u32,
) !IdRef {
- const result_ty_ref = try self.resolveType(result_ptr_ty, .direct);
+ const result_ty_id = try self.resolveType(result_ptr_ty, .direct);
const mod = self.module;
const object_ty = object_ptr_ty.childType(mod);
@@ -4486,7 +4471,7 @@ const DeclGen = struct {
.Struct => switch (object_ty.containerLayout(mod)) {
.@"packed" => unreachable, // TODO
else => {
- return try self.accessChain(result_ty_ref, object_ptr, &.{field_index});
+ return try self.accessChain(result_ty_id, object_ptr, &.{field_index});
},
},
.Union => switch (object_ty.containerLayout(mod)) {
@@ -4496,16 +4481,16 @@ const DeclGen = struct {
if (!layout.has_payload) {
// Asked to get a pointer to a zero-sized field. Just lower this
// to undefined, there is no reason to make it be a valid pointer.
- return try self.spv.constUndef(self.typeId(result_ty_ref));
+ return try self.spv.constUndef(result_ty_id);
}
const storage_class = self.spvStorageClass(object_ptr_ty.ptrAddressSpace(mod));
- const pl_ptr_ty_ref = try self.ptrType(layout.payload_ty, storage_class);
- const pl_ptr_id = try self.accessChain(pl_ptr_ty_ref, object_ptr, &.{layout.payload_index});
+ const pl_ptr_ty_id = try self.ptrType(layout.payload_ty, storage_class);
+ const pl_ptr_id = try self.accessChain(pl_ptr_ty_id, object_ptr, &.{layout.payload_index});
const active_pl_ptr_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpBitcast, .{
- .id_result_type = self.typeId(result_ty_ref),
+ .id_result_type = result_ty_id,
.id_result = active_pl_ptr_id,
.operand = pl_ptr_id,
});
@@ -4533,7 +4518,7 @@ const DeclGen = struct {
};
// Allocate a function-local variable, with possible initializer.
- // This function returns a pointer to a variable of type `ty_ref`,
+ // This function returns a pointer to a variable of type `ty`,
// which is in the Generic address space. The variable is actually
// placed in the Function address space.
fn alloc(
@@ -4541,13 +4526,13 @@ const DeclGen = struct {
ty: Type,
options: AllocOptions,
) !IdRef {
- const ptr_fn_ty_ref = try self.ptrType(ty, .Function);
+ const ptr_fn_ty_id = try self.ptrType(ty, .Function);
// SPIR-V requires that OpVariable declarations for locals go into the first block, so we are just going to
// directly generate them into func.prologue instead of the body.
const var_id = self.spv.allocId();
try self.func.prologue.emit(self.spv.gpa, .OpVariable, .{
- .id_result_type = self.typeId(ptr_fn_ty_ref),
+ .id_result_type = ptr_fn_ty_id,
.id_result = var_id,
.storage_class = .Function,
.initializer = options.initializer,
@@ -4560,9 +4545,9 @@ const DeclGen = struct {
switch (options.storage_class) {
.Generic => {
- const ptr_gn_ty_ref = try self.ptrType(ty, .Generic);
+ const ptr_gn_ty_id = try self.ptrType(ty, .Generic);
// Convert to a generic pointer
- return self.castToGeneric(self.typeId(ptr_gn_ty_ref), var_id);
+ return self.castToGeneric(ptr_gn_ty_id, var_id);
},
.Function => return var_id,
else => unreachable,
@@ -4590,9 +4575,9 @@ const DeclGen = struct {
assert(self.control_flow == .structured);
const result_id = self.spv.allocId();
- const block_id_ty_ref = try self.resolveType(Type.u32, .direct);
+ const block_id_ty_id = try self.resolveType(Type.u32, .direct);
try self.func.body.emitRaw(self.spv.gpa, .OpPhi, @intCast(2 + incoming.len * 2)); // result type + result + variable/parent...
- self.func.body.writeOperand(spec.IdResultType, self.typeId(block_id_ty_ref));
+ self.func.body.writeOperand(spec.IdResultType, block_id_ty_id);
self.func.body.writeOperand(spec.IdRef, result_id);
for (incoming) |incoming_block| {
@@ -4690,8 +4675,8 @@ const DeclGen = struct {
// Make sure that we are still in a block when exiting the function.
// TODO: Can we get rid of that?
try self.beginSpvBlock(self.spv.allocId());
- const block_id_ty_ref = try self.resolveType(Type.u32, .direct);
- return try self.spv.constUndef(self.typeId(block_id_ty_ref));
+ const block_id_ty_id = try self.resolveType(Type.u32, .direct);
+ return try self.spv.constUndef(block_id_ty_id);
}
// The top-most merge actually only has a single source, the
@@ -4772,7 +4757,7 @@ const DeclGen = struct {
assert(block.label != null);
const result_id = self.spv.allocId();
- const result_type_id = try self.resolveTypeId(ty);
+ const result_type_id = try self.resolveType(ty, .direct);
try self.func.body.emitRaw(
self.spv.gpa,
@@ -4810,9 +4795,9 @@ const DeclGen = struct {
// Check if the target of the branch was this current block.
const this_block = try self.constInt(Type.u32, @intFromEnum(inst), .direct);
const jump_to_this_block_id = self.spv.allocId();
- const bool_ty_ref = try self.resolveType(Type.bool, .direct);
+ const bool_ty_id = try self.resolveType(Type.bool, .direct);
try self.func.body.emit(self.spv.gpa, .OpIEqual, .{
- .id_result_type = self.typeId(bool_ty_ref),
+ .id_result_type = bool_ty_id,
.id_result = jump_to_this_block_id,
.operand_1 = next_block,
.operand_2 = this_block,
@@ -5099,7 +5084,7 @@ const DeclGen = struct {
const err_union_ty = self.typeOf(pl_op.operand);
const payload_ty = self.typeOfIndex(inst);
- const bool_ty_ref = try self.resolveType(Type.bool, .direct);
+ const bool_ty_id = try self.resolveType(Type.bool, .direct);
const eu_layout = self.errorUnionLayout(payload_ty);
@@ -5112,7 +5097,7 @@ const DeclGen = struct {
const zero_id = try self.constInt(Type.anyerror, 0, .direct);
const is_err_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpINotEqual, .{
- .id_result_type = self.typeId(bool_ty_ref),
+ .id_result_type = bool_ty_id,
.id_result = is_err_id,
.operand_1 = err_id,
.operand_2 = zero_id,
@@ -5164,11 +5149,11 @@ const DeclGen = struct {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
const operand_id = try self.resolve(ty_op.operand);
const err_union_ty = self.typeOf(ty_op.operand);
- const err_ty_ref = try self.resolveType(Type.anyerror, .direct);
+ const err_ty_id = try self.resolveType(Type.anyerror, .direct);
if (err_union_ty.errorUnionSet(mod).errorSetIsEmpty(mod)) {
// No error possible, so just return undefined.
- return try self.spv.constUndef(self.typeId(err_ty_ref));
+ return try self.spv.constUndef(err_ty_id);
}
const payload_ty = err_union_ty.errorUnionPayload(mod);
@@ -5207,11 +5192,11 @@ const DeclGen = struct {
return operand_id;
}
- const payload_ty_ref = try self.resolveType(payload_ty, .indirect);
+ const payload_ty_id = try self.resolveType(payload_ty, .indirect);
var members: [2]IdRef = undefined;
members[eu_layout.errorFieldIndex()] = operand_id;
- members[eu_layout.payloadFieldIndex()] = try self.spv.constUndef(self.typeId(payload_ty_ref));
+ members[eu_layout.payloadFieldIndex()] = try self.spv.constUndef(payload_ty_id);
var types: [2]Type = undefined;
types[eu_layout.errorFieldIndex()] = Type.anyerror;
@@ -5250,7 +5235,7 @@ const DeclGen = struct {
const optional_ty = if (is_pointer) operand_ty.childType(mod) else operand_ty;
const payload_ty = optional_ty.optionalChild(mod);
- const bool_ty_ref = try self.resolveType(Type.bool, .direct);
+ const bool_ty_id = try self.resolveType(Type.bool, .direct);
if (optional_ty.optionalReprIsPayload(mod)) {
// Pointer payload represents nullability: pointer or slice.
@@ -5269,7 +5254,7 @@ const DeclGen = struct {
else
loaded_id;
- const payload_ty_id = try self.resolveType2(ptr_ty, .direct);
+ const payload_ty_id = try self.resolveType(ptr_ty, .direct);
const null_id = try self.spv.constNull(payload_ty_id);
const op: std.math.CompareOperator = switch (pred) {
.is_null => .eq,
@@ -5282,8 +5267,8 @@ const DeclGen = struct {
if (is_pointer) {
if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
const storage_class = self.spvStorageClass(operand_ty.ptrAddressSpace(mod));
- const bool_ptr_ty = try self.ptrType(Type.bool, storage_class);
- const tag_ptr_id = try self.accessChain(bool_ptr_ty, operand_id, &.{1});
+ const bool_ptr_ty_id = try self.ptrType(Type.bool, storage_class);
+ const tag_ptr_id = try self.accessChain(bool_ptr_ty_id, operand_id, &.{1});
break :blk try self.load(Type.bool, tag_ptr_id, .{});
}
@@ -5304,7 +5289,7 @@ const DeclGen = struct {
// Invert condition
const result_id = self.spv.allocId();
try self.func.body.emit(self.spv.gpa, .OpLogicalNot, .{
- .id_result_type = self.typeId(bool_ty_ref),
+ .id_result_type = bool_ty_id,
.id_result = result_id,
.operand = is_non_null_id,
});
@@ -5326,7 +5311,7 @@ const DeclGen = struct {
const payload_ty = err_union_ty.errorUnionPayload(mod);
const eu_layout = self.errorUnionLayout(payload_ty);
- const bool_ty_ref = try self.resolveType(Type.bool, .direct);
+ const bool_ty_id = try self.resolveType(Type.bool, .direct);
const error_id = if (!eu_layout.payload_has_bits)
operand_id
@@ -5335,7 +5320,7 @@ const DeclGen = struct {
const result_id = self.spv.allocId();
const operands = .{
- .id_result_type = self.typeId(bool_ty_ref),
+ .id_result_type = bool_ty_id,
.id_result = result_id,
.operand_1 = error_id,
.operand_2 = try self.constInt(Type.anyerror, 0, .direct),
@@ -5371,7 +5356,7 @@ const DeclGen = struct {
const optional_ty = operand_ty.childType(mod);
const payload_ty = optional_ty.optionalChild(mod);
const result_ty = self.typeOfIndex(inst);
- const result_ty_ref = try self.resolveType(result_ty, .direct);
+ const result_ty_id = try self.resolveType(result_ty, .direct);
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
// There is no payload, but we still need to return a valid pointer.
@@ -5384,7 +5369,7 @@ const DeclGen = struct {
return try self.bitCast(result_ty, operand_ty, operand_id);
}
- return try self.accessChain(result_ty_ref, operand_id, &.{0});
+ return try self.accessChain(result_ty_id, operand_id, &.{0});
}
fn airWrapOptional(self: *DeclGen, inst: Air.Inst.Index) !?IdRef {
@@ -5586,9 +5571,8 @@ const DeclGen = struct {
const mod = self.module;
const decl = mod.declPtr(self.decl_index);
const path = decl.getFileScope(mod).sub_file_path;
- const src_fname_id = try self.spv.resolveSourceFileName(path);
try self.func.body.emit(self.spv.gpa, .OpLine, .{
- .file = src_fname_id,
+ .file = try self.spv.resolveString(path),
.line = self.base_line + dbg_stmt.line + 1,
.column = dbg_stmt.column + 1,
});
@@ -5757,7 +5741,7 @@ const DeclGen = struct {
const fn_info = mod.typeToFunc(zig_fn_ty).?;
const return_type = fn_info.return_type;
- const result_type_ref = try self.resolveFnReturnType(Type.fromInterned(return_type));
+ const result_type_id = try self.resolveFnReturnType(Type.fromInterned(return_type));
const result_id = self.spv.allocId();
const callee_id = try self.resolve(pl_op.operand);
@@ -5778,7 +5762,7 @@ const DeclGen = struct {
}
try self.func.body.emit(self.spv.gpa, .OpFunctionCall, .{
- .id_result_type = self.typeId(result_type_ref),
+ .id_result_type = result_type_id,
.id_result = result_id,
.function = callee_id,
.id_ref_3 = params[0..n_params],
src/link/SpirV/BinaryModule.zig
@@ -116,7 +116,8 @@ pub const Instruction = struct {
const instruction_len = self.words[self.offset] >> 16;
defer self.offset += instruction_len;
defer self.index += 1;
- assert(instruction_len != 0 and self.offset < self.words.len); // Verified in BinaryModule.parse.
+ assert(instruction_len != 0);
+ assert(self.offset < self.words.len);
return Instruction{
.opcode = @enumFromInt(self.words[self.offset] & 0xFFFF),