master
1//! This structure represents a SPIR-V (sections) module being compiled, and keeps
2//! track of all relevant information. That includes the actual instructions, the
3//! current result-id bound, and data structures for querying result-id's of data
4//! which needs to be persistent over different calls to Decl code generation.
5//!
6//! A SPIR-V binary module supports both little- and big endian layout. The layout
7//! is detected by the magic word in the header. Therefore, we can ignore any byte
8//! order throughout the implementation, and just use the host byte order, and make
9//! this a problem for the consumer.
10const std = @import("std");
11const Allocator = std.mem.Allocator;
12const assert = std.debug.assert;
13
14const Zcu = @import("../../Zcu.zig");
15const InternPool = @import("../../InternPool.zig");
16const Section = @import("Section.zig");
17const spec = @import("spec.zig");
18const Word = spec.Word;
19const Id = spec.Id;
20
21const Module = @This();
22
23gpa: Allocator,
24arena: Allocator,
25zcu: *Zcu,
26nav_link: std.AutoHashMapUnmanaged(InternPool.Nav.Index, Decl.Index) = .empty,
27uav_link: std.AutoHashMapUnmanaged(struct { InternPool.Index, spec.StorageClass }, Decl.Index) = .empty,
28intern_map: std.AutoHashMapUnmanaged(struct { InternPool.Index, Repr }, Id) = .empty,
29decls: std.ArrayList(Decl) = .empty,
30decl_deps: std.ArrayList(Decl.Index) = .empty,
31entry_points: std.AutoArrayHashMapUnmanaged(Id, EntryPoint) = .empty,
32/// This map serves a dual purpose:
33/// - It keeps track of pointers that are currently being emitted, so that we can tell
34/// if they are recursive and need an OpTypeForwardPointer.
35/// - It caches pointers by child-type. This is required because sometimes we rely on
36/// ID-equality for pointers, and pointers constructed via `ptrType()` aren't interned
37/// via the usual `intern_map` mechanism.
38ptr_types: std.AutoHashMapUnmanaged(struct { Id, spec.StorageClass }, Id) = .{},
39/// For test declarations compiled for Vulkan target, we have to add a buffer.
40/// We only need to generate this once, this holds the link information related to that.
41error_buffer: ?Decl.Index = null,
42/// SPIR-V instructions return result-ids.
43/// This variable holds the module-wide counter for these.
44next_result_id: Word = 1,
45/// Some types shouldn't be emitted more than one time, but cannot be caught by
46/// the `intern_map` during codegen. Sometimes, IDs are compared to check if
47/// types are the same, so we can't delay until the dedup pass. Therefore,
48/// this is an ad-hoc structure to cache types where required.
49/// According to the SPIR-V specification, section 2.8, this includes all non-aggregate
50/// non-pointer types.
51/// Additionally, this is used for other values which can be cached, for example,
52/// built-in variables.
53cache: struct {
54 bool_type: ?Id = null,
55 void_type: ?Id = null,
56 opaque_types: std.StringHashMapUnmanaged(Id) = .empty,
57 int_types: std.AutoHashMapUnmanaged(std.builtin.Type.Int, Id) = .empty,
58 float_types: std.AutoHashMapUnmanaged(std.builtin.Type.Float, Id) = .empty,
59 vector_types: std.AutoHashMapUnmanaged(struct { Id, u32 }, Id) = .empty,
60 array_types: std.AutoHashMapUnmanaged(struct { Id, Id }, Id) = .empty,
61 struct_types: std.ArrayHashMapUnmanaged(StructType, Id, StructType.HashContext, true) = .empty,
62 fn_types: std.ArrayHashMapUnmanaged(FnType, Id, FnType.HashContext, true) = .empty,
63
64 capabilities: std.AutoHashMapUnmanaged(spec.Capability, void) = .empty,
65 extensions: std.StringHashMapUnmanaged(void) = .empty,
66 extended_instruction_set: std.AutoHashMapUnmanaged(spec.InstructionSet, Id) = .empty,
67 decorations: std.AutoHashMapUnmanaged(struct { Id, spec.Decoration }, void) = .empty,
68 builtins: std.AutoHashMapUnmanaged(struct { spec.BuiltIn, spec.StorageClass }, Decl.Index) = .empty,
69 strings: std.StringArrayHashMapUnmanaged(Id) = .empty,
70
71 bool_const: [2]?Id = .{ null, null },
72 constants: std.ArrayHashMapUnmanaged(Constant, Id, Constant.HashContext, true) = .empty,
73} = .{},
74/// Module layout, according to SPIR-V Spec section 2.4, "Logical Layout of a Module".
75sections: struct {
76 capabilities: Section = .{},
77 extensions: Section = .{},
78 extended_instruction_set: Section = .{},
79 memory_model: Section = .{},
80 execution_modes: Section = .{},
81 debug_strings: Section = .{},
82 debug_names: Section = .{},
83 annotations: Section = .{},
84 globals: Section = .{},
85 functions: Section = .{},
86} = .{},
87
88pub const big_int_bits = 32;
89
90/// Data can be lowered into in two basic representations: indirect, which is when
91/// a type is stored in memory, and direct, which is how a type is stored when its
92/// a direct SPIR-V value.
93pub const Repr = enum {
94 /// A SPIR-V value as it would be used in operations.
95 direct,
96 /// A SPIR-V value as it is stored in memory.
97 indirect,
98};
99
100/// Declarations, both functions and globals, can have dependencies. These are used for 2 things:
101/// - Globals must be declared before they are used, also between globals. The compiler processes
102/// globals unordered, so we must use the dependencies here to figure out how to order the globals
103/// in the final module. The Globals structure is also used for that.
104/// - Entry points must declare the complete list of OpVariable instructions that they access.
105/// For these we use the same dependency structure.
106/// In this mechanism, globals will only depend on other globals, while functions may depend on
107/// globals or other functions.
108pub const Decl = struct {
109 /// Index to refer to a Decl by.
110 pub const Index = enum(u32) { _ };
111
112 /// Useful to tell what kind of decl this is, and hold the result-id or field index
113 /// to be used for this decl.
114 pub const Kind = enum {
115 func,
116 global,
117 invocation_global,
118 };
119
120 /// See comment on Kind
121 kind: Kind,
122 /// The result-id associated to this decl. The specific meaning of this depends on `kind`:
123 /// - For `func`, this is the result-id of the associated OpFunction instruction.
124 /// - For `global`, this is the result-id of the associated OpVariable instruction.
125 /// - For `invocation_global`, this is the result-id of the associated InvocationGlobal instruction.
126 result_id: Id,
127 /// The offset of the first dependency of this decl in the `decl_deps` array.
128 begin_dep: usize = 0,
129 /// The past-end offset of the dependencies of this decl in the `decl_deps` array.
130 end_dep: usize = 0,
131};
132
133/// This models a kernel entry point.
134pub const EntryPoint = struct {
135 /// The declaration that should be exported.
136 decl_index: Decl.Index,
137 /// The name of the kernel to be exported.
138 name: []const u8,
139 /// Calling Convention
140 exec_model: spec.ExecutionModel,
141 exec_mode: ?spec.ExecutionMode = null,
142};
143
144const StructType = struct {
145 fields: []const Id,
146 ip_index: InternPool.Index,
147
148 const HashContext = struct {
149 pub fn hash(_: @This(), ty: StructType) u32 {
150 var hasher = std.hash.Wyhash.init(0);
151 hasher.update(std.mem.sliceAsBytes(ty.fields));
152 hasher.update(std.mem.asBytes(&ty.ip_index));
153 return @truncate(hasher.final());
154 }
155
156 pub fn eql(_: @This(), a: StructType, b: StructType, _: usize) bool {
157 return a.ip_index == b.ip_index and std.mem.eql(Id, a.fields, b.fields);
158 }
159 };
160};
161
162const FnType = struct {
163 return_ty: Id,
164 params: []const Id,
165
166 const HashContext = struct {
167 pub fn hash(_: @This(), ty: FnType) u32 {
168 var hasher = std.hash.Wyhash.init(0);
169 hasher.update(std.mem.asBytes(&ty.return_ty));
170 hasher.update(std.mem.sliceAsBytes(ty.params));
171 return @truncate(hasher.final());
172 }
173
174 pub fn eql(_: @This(), a: FnType, b: FnType, _: usize) bool {
175 return a.return_ty == b.return_ty and
176 std.mem.eql(Id, a.params, b.params);
177 }
178 };
179};
180
181const Constant = struct {
182 ty: Id,
183 value: spec.LiteralContextDependentNumber,
184
185 const HashContext = struct {
186 pub fn hash(_: @This(), value: Constant) u32 {
187 const Tag = @typeInfo(spec.LiteralContextDependentNumber).@"union".tag_type.?;
188 var hasher = std.hash.Wyhash.init(0);
189 hasher.update(std.mem.asBytes(&value.ty));
190 hasher.update(std.mem.asBytes(&@as(Tag, value.value)));
191 switch (value.value) {
192 inline else => |v| hasher.update(std.mem.asBytes(&v)),
193 }
194 return @truncate(hasher.final());
195 }
196
197 pub fn eql(_: @This(), a: Constant, b: Constant, _: usize) bool {
198 if (a.ty != b.ty) return false;
199 const Tag = @typeInfo(spec.LiteralContextDependentNumber).@"union".tag_type.?;
200 if (@as(Tag, a.value) != @as(Tag, b.value)) return false;
201 return switch (a.value) {
202 inline else => |v, tag| v == @field(b.value, @tagName(tag)),
203 };
204 }
205 };
206};
207
208pub fn deinit(module: *Module) void {
209 module.nav_link.deinit(module.gpa);
210 module.uav_link.deinit(module.gpa);
211 module.intern_map.deinit(module.gpa);
212 module.ptr_types.deinit(module.gpa);
213
214 module.sections.capabilities.deinit(module.gpa);
215 module.sections.extensions.deinit(module.gpa);
216 module.sections.extended_instruction_set.deinit(module.gpa);
217 module.sections.memory_model.deinit(module.gpa);
218 module.sections.execution_modes.deinit(module.gpa);
219 module.sections.debug_strings.deinit(module.gpa);
220 module.sections.debug_names.deinit(module.gpa);
221 module.sections.annotations.deinit(module.gpa);
222 module.sections.globals.deinit(module.gpa);
223 module.sections.functions.deinit(module.gpa);
224
225 module.cache.opaque_types.deinit(module.gpa);
226 module.cache.int_types.deinit(module.gpa);
227 module.cache.float_types.deinit(module.gpa);
228 module.cache.vector_types.deinit(module.gpa);
229 module.cache.array_types.deinit(module.gpa);
230 module.cache.struct_types.deinit(module.gpa);
231 module.cache.fn_types.deinit(module.gpa);
232 module.cache.capabilities.deinit(module.gpa);
233 module.cache.extensions.deinit(module.gpa);
234 module.cache.extended_instruction_set.deinit(module.gpa);
235 module.cache.decorations.deinit(module.gpa);
236 module.cache.builtins.deinit(module.gpa);
237 module.cache.strings.deinit(module.gpa);
238
239 module.cache.constants.deinit(module.gpa);
240
241 module.decls.deinit(module.gpa);
242 module.decl_deps.deinit(module.gpa);
243 module.entry_points.deinit(module.gpa);
244
245 module.* = undefined;
246}
247
248/// Fetch or allocate a result id for nav index. This function also marks the nav as alive.
249/// Note: Function does not actually generate the nav, it just allocates an index.
250pub fn resolveNav(module: *Module, ip: *InternPool, nav_index: InternPool.Nav.Index) !Decl.Index {
251 const entry = try module.nav_link.getOrPut(module.gpa, nav_index);
252 if (!entry.found_existing) {
253 const nav = ip.getNav(nav_index);
254 // TODO: Extern fn?
255 const kind: Decl.Kind = if (ip.isFunctionType(nav.typeOf(ip)))
256 .func
257 else switch (nav.getAddrspace()) {
258 .generic => .invocation_global,
259 else => .global,
260 };
261 entry.value_ptr.* = try module.allocDecl(kind);
262 }
263
264 return entry.value_ptr.*;
265}
266
267pub fn allocIds(module: *Module, n: u32) spec.IdRange {
268 defer module.next_result_id += n;
269 return .{ .base = module.next_result_id, .len = n };
270}
271
272pub fn allocId(module: *Module) Id {
273 return module.allocIds(1).at(0);
274}
275
276pub fn idBound(module: Module) Word {
277 return module.next_result_id;
278}
279
280pub fn addEntryPointDeps(
281 module: *Module,
282 decl_index: Decl.Index,
283 seen: *std.DynamicBitSetUnmanaged,
284 interface: *std.array_list.Managed(Id),
285) !void {
286 const decl = module.declPtr(decl_index);
287 const deps = module.decl_deps.items[decl.begin_dep..decl.end_dep];
288
289 if (seen.isSet(@intFromEnum(decl_index))) {
290 return;
291 }
292
293 seen.set(@intFromEnum(decl_index));
294
295 if (decl.kind == .global) {
296 try interface.append(decl.result_id);
297 }
298
299 for (deps) |dep| {
300 try module.addEntryPointDeps(dep, seen, interface);
301 }
302}
303
304fn entryPoints(module: *Module) !Section {
305 const target = module.zcu.getTarget();
306
307 var entry_points = Section{};
308 errdefer entry_points.deinit(module.gpa);
309
310 var interface = std.array_list.Managed(Id).init(module.gpa);
311 defer interface.deinit();
312
313 var seen = try std.DynamicBitSetUnmanaged.initEmpty(module.gpa, module.decls.items.len);
314 defer seen.deinit(module.gpa);
315
316 for (module.entry_points.keys(), module.entry_points.values()) |entry_point_id, entry_point| {
317 interface.items.len = 0;
318 seen.setRangeValue(.{ .start = 0, .end = module.decls.items.len }, false);
319
320 try module.addEntryPointDeps(entry_point.decl_index, &seen, &interface);
321 try entry_points.emit(module.gpa, .OpEntryPoint, .{
322 .execution_model = entry_point.exec_model,
323 .entry_point = entry_point_id,
324 .name = entry_point.name,
325 .interface = interface.items,
326 });
327
328 if (entry_point.exec_mode == null and entry_point.exec_model == .fragment) {
329 switch (target.os.tag) {
330 .vulkan, .opengl => |tag| {
331 try module.sections.execution_modes.emit(module.gpa, .OpExecutionMode, .{
332 .entry_point = entry_point_id,
333 .mode = if (tag == .vulkan) .origin_upper_left else .origin_lower_left,
334 });
335 },
336 .opencl => {},
337 else => unreachable,
338 }
339 }
340 }
341
342 return entry_points;
343}
344
345pub fn finalize(module: *Module, gpa: Allocator) ![]Word {
346 const target = module.zcu.getTarget();
347
348 // Emit capabilities and extensions
349 switch (target.os.tag) {
350 .opengl => {
351 try module.addCapability(.shader);
352 try module.addCapability(.matrix);
353 },
354 .vulkan => {
355 try module.addCapability(.shader);
356 try module.addCapability(.matrix);
357 if (target.cpu.arch == .spirv64) {
358 try module.addExtension("SPV_KHR_physical_storage_buffer");
359 try module.addCapability(.physical_storage_buffer_addresses);
360 }
361 },
362 .opencl, .amdhsa => {
363 try module.addCapability(.kernel);
364 try module.addCapability(.addresses);
365 },
366 else => unreachable,
367 }
368 if (target.cpu.arch == .spirv64) try module.addCapability(.int64);
369 if (target.cpu.has(.spirv, .int64)) try module.addCapability(.int64);
370 if (target.cpu.has(.spirv, .float16)) {
371 if (target.os.tag == .opencl) try module.addExtension("cl_khr_fp16");
372 try module.addCapability(.float16);
373 }
374 if (target.cpu.has(.spirv, .float64)) try module.addCapability(.float64);
375 if (target.cpu.has(.spirv, .generic_pointer)) try module.addCapability(.generic_pointer);
376 if (target.cpu.has(.spirv, .vector16)) try module.addCapability(.vector16);
377 if (target.cpu.has(.spirv, .storage_push_constant16)) {
378 try module.addExtension("SPV_KHR_16bit_storage");
379 try module.addCapability(.storage_push_constant16);
380 }
381 if (target.cpu.has(.spirv, .arbitrary_precision_integers)) {
382 try module.addExtension("SPV_INTEL_arbitrary_precision_integers");
383 try module.addCapability(.arbitrary_precision_integers_intel);
384 }
385 if (target.cpu.has(.spirv, .variable_pointers)) {
386 try module.addExtension("SPV_KHR_variable_pointers");
387 try module.addCapability(.variable_pointers_storage_buffer);
388 try module.addCapability(.variable_pointers);
389 }
390 // These are well supported
391 try module.addCapability(.int8);
392 try module.addCapability(.int16);
393
394 // Emit memory model
395 const addressing_model: spec.AddressingModel = switch (target.os.tag) {
396 .opengl => .logical,
397 .vulkan => if (target.cpu.arch == .spirv32) .logical else .physical_storage_buffer64,
398 .opencl => if (target.cpu.arch == .spirv32) .physical32 else .physical64,
399 .amdhsa => .physical64,
400 else => unreachable,
401 };
402 try module.sections.memory_model.emit(module.gpa, .OpMemoryModel, .{
403 .addressing_model = addressing_model,
404 .memory_model = switch (target.os.tag) {
405 .opencl => .open_cl,
406 .vulkan, .opengl => .glsl450,
407 else => unreachable,
408 },
409 });
410
411 var entry_points = try module.entryPoints();
412 defer entry_points.deinit(module.gpa);
413
414 const version: spec.Version = .{
415 .major = 1,
416 .minor = blk: {
417 // Prefer higher versions
418 if (target.cpu.has(.spirv, .v1_6)) break :blk 6;
419 if (target.cpu.has(.spirv, .v1_5)) break :blk 5;
420 if (target.cpu.has(.spirv, .v1_4)) break :blk 4;
421 if (target.cpu.has(.spirv, .v1_3)) break :blk 3;
422 if (target.cpu.has(.spirv, .v1_2)) break :blk 2;
423 if (target.cpu.has(.spirv, .v1_1)) break :blk 1;
424 break :blk 0;
425 },
426 };
427
428 const zig_version = @import("builtin").zig_version;
429 const zig_spirv_compiler_version = comptime (zig_version.major << 12) | (zig_version.minor << 7) | zig_version.patch;
430
431 // A SPIR-V Generator Magic Number is a 32 bit word: The high order 16
432 // bits are a tool ID, which should be unique across all SPIR-V
433 // generators. The low order 16 bits are reserved for use as a tool
434 // version number, or any other purpose the tool supplier chooses.
435 // Only the tool IDs are reserved with Khronos.
436 // See https://github.com/KhronosGroup/SPIRV-Headers/blob/f2e4bd213104fe323a01e935df56557328d37ac8/include/spirv/spir-v.xml#L17C5-L21C54
437 const generator_id: u32 = (spec.zig_generator_id << 16) | zig_spirv_compiler_version;
438
439 const header = [_]Word{
440 spec.magic_number,
441 version.toWord(),
442 generator_id,
443 module.idBound(),
444 0, // Schema (currently reserved for future use)
445 };
446
447 var source = Section{};
448 defer source.deinit(module.gpa);
449 try module.sections.debug_strings.emit(module.gpa, .OpSource, .{
450 .source_language = .zig,
451 .version = zig_spirv_compiler_version,
452 // We cannot emit these because the Khronos translator does not parse this instruction
453 // correctly.
454 // See https://github.com/KhronosGroup/SPIRV-LLVM-Translator/issues/2188
455 .file = null,
456 .source = null,
457 });
458
459 // Note: needs to be kept in order according to section 2.3!
460 const buffers = &[_][]const Word{
461 &header,
462 module.sections.capabilities.toWords(),
463 module.sections.extensions.toWords(),
464 module.sections.extended_instruction_set.toWords(),
465 module.sections.memory_model.toWords(),
466 entry_points.toWords(),
467 module.sections.execution_modes.toWords(),
468 source.toWords(),
469 module.sections.debug_strings.toWords(),
470 module.sections.debug_names.toWords(),
471 module.sections.annotations.toWords(),
472 module.sections.globals.toWords(),
473 module.sections.functions.toWords(),
474 };
475
476 var total_result_size: usize = 0;
477 for (buffers) |buffer| {
478 total_result_size += buffer.len;
479 }
480 const result = try gpa.alloc(Word, total_result_size);
481 errdefer comptime unreachable;
482
483 var offset: usize = 0;
484 for (buffers) |buffer| {
485 @memcpy(result[offset..][0..buffer.len], buffer);
486 offset += buffer.len;
487 }
488
489 return result;
490}
491
492pub fn addCapability(module: *Module, cap: spec.Capability) !void {
493 const entry = try module.cache.capabilities.getOrPut(module.gpa, cap);
494 if (entry.found_existing) return;
495 try module.sections.capabilities.emit(module.gpa, .OpCapability, .{ .capability = cap });
496}
497
498pub fn addExtension(module: *Module, ext: []const u8) !void {
499 const entry = try module.cache.extensions.getOrPut(module.gpa, ext);
500 if (entry.found_existing) return;
501 try module.sections.extensions.emit(module.gpa, .OpExtension, .{ .name = ext });
502}
503
504/// Imports or returns the existing id of an extended instruction set
505pub fn importInstructionSet(module: *Module, set: spec.InstructionSet) !Id {
506 assert(set != .core);
507
508 const gop = try module.cache.extended_instruction_set.getOrPut(module.gpa, set);
509 if (gop.found_existing) return gop.value_ptr.*;
510
511 const result_id = module.allocId();
512 try module.sections.extended_instruction_set.emit(module.gpa, .OpExtInstImport, .{
513 .id_result = result_id,
514 .name = @tagName(set),
515 });
516 gop.value_ptr.* = result_id;
517
518 return result_id;
519}
520
521pub fn boolType(module: *Module) !Id {
522 if (module.cache.bool_type) |id| return id;
523
524 const result_id = module.allocId();
525 try module.sections.globals.emit(module.gpa, .OpTypeBool, .{
526 .id_result = result_id,
527 });
528 module.cache.bool_type = result_id;
529 return result_id;
530}
531
532pub fn voidType(module: *Module) !Id {
533 if (module.cache.void_type) |id| return id;
534
535 const result_id = module.allocId();
536 try module.sections.globals.emit(module.gpa, .OpTypeVoid, .{
537 .id_result = result_id,
538 });
539 module.cache.void_type = result_id;
540 try module.debugName(result_id, "void");
541 return result_id;
542}
543
544pub fn opaqueType(module: *Module, name: []const u8) !Id {
545 if (module.cache.opaque_types.get(name)) |id| return id;
546 const result_id = module.allocId();
547 const name_dup = try module.arena.dupe(u8, name);
548 try module.sections.globals.emit(module.gpa, .OpTypeOpaque, .{
549 .id_result = result_id,
550 .literal_string = name_dup,
551 });
552 try module.debugName(result_id, name_dup);
553 try module.cache.opaque_types.put(module.gpa, name_dup, result_id);
554 return result_id;
555}
556
557pub fn backingIntBits(module: *Module, bits: u16) struct { u16, bool } {
558 assert(bits != 0);
559 const target = module.zcu.getTarget();
560
561 if (target.cpu.has(.spirv, .arbitrary_precision_integers) and bits <= 32) {
562 return .{ bits, false };
563 }
564
565 // We require Int8 and Int16 capabilities and benefit Int64 when available.
566 // 32-bit integers are always supported (see spec, 2.16.1, Data rules).
567 const ints = [_]struct { bits: u16, enabled: bool }{
568 .{ .bits = 8, .enabled = true },
569 .{ .bits = 16, .enabled = true },
570 .{ .bits = 32, .enabled = true },
571 .{
572 .bits = 64,
573 .enabled = target.cpu.has(.spirv, .int64) or target.cpu.arch == .spirv64,
574 },
575 };
576
577 for (ints) |int| {
578 if (bits <= int.bits and int.enabled) return .{ int.bits, false };
579 }
580
581 // Big int
582 return .{ std.mem.alignForward(u16, bits, big_int_bits), true };
583}
584
585pub fn intType(module: *Module, signedness: std.builtin.Signedness, bits: u16) !Id {
586 assert(bits > 0);
587
588 const target = module.zcu.getTarget();
589 const actual_signedness = switch (target.os.tag) {
590 // Kernel only supports unsigned ints.
591 .opencl, .amdhsa => .unsigned,
592 else => signedness,
593 };
594 const backing_bits, const big_int = module.backingIntBits(bits);
595 if (big_int) {
596 // TODO: support composite integers larger than 64 bit
597 assert(backing_bits <= 64);
598 const u32_ty = try module.intType(.unsigned, 32);
599 const len_id = try module.constant(u32_ty, .{ .uint32 = backing_bits / big_int_bits });
600 return module.arrayType(len_id, u32_ty);
601 }
602
603 const entry = try module.cache.int_types.getOrPut(module.gpa, .{ .signedness = actual_signedness, .bits = backing_bits });
604 if (!entry.found_existing) {
605 const result_id = module.allocId();
606 entry.value_ptr.* = result_id;
607 try module.sections.globals.emit(module.gpa, .OpTypeInt, .{
608 .id_result = result_id,
609 .width = backing_bits,
610 .signedness = switch (actual_signedness) {
611 .signed => 1,
612 .unsigned => 0,
613 },
614 });
615
616 switch (actual_signedness) {
617 .signed => try module.debugNameFmt(result_id, "i{}", .{backing_bits}),
618 .unsigned => try module.debugNameFmt(result_id, "u{}", .{backing_bits}),
619 }
620 }
621 return entry.value_ptr.*;
622}
623
624pub fn floatType(module: *Module, bits: u16) !Id {
625 assert(bits > 0);
626 const entry = try module.cache.float_types.getOrPut(module.gpa, .{ .bits = bits });
627 if (!entry.found_existing) {
628 const result_id = module.allocId();
629 entry.value_ptr.* = result_id;
630 try module.sections.globals.emit(module.gpa, .OpTypeFloat, .{
631 .id_result = result_id,
632 .width = bits,
633 });
634 try module.debugNameFmt(result_id, "f{}", .{bits});
635 }
636 return entry.value_ptr.*;
637}
638
639pub fn vectorType(module: *Module, len: u32, child_ty_id: Id) !Id {
640 const entry = try module.cache.vector_types.getOrPut(module.gpa, .{ child_ty_id, len });
641 if (!entry.found_existing) {
642 const result_id = module.allocId();
643 entry.value_ptr.* = result_id;
644 try module.sections.globals.emit(module.gpa, .OpTypeVector, .{
645 .id_result = result_id,
646 .component_type = child_ty_id,
647 .component_count = len,
648 });
649 }
650 return entry.value_ptr.*;
651}
652
653pub fn arrayType(module: *Module, len_id: Id, child_ty_id: Id) !Id {
654 const entry = try module.cache.array_types.getOrPut(module.gpa, .{ child_ty_id, len_id });
655 if (!entry.found_existing) {
656 const result_id = module.allocId();
657 entry.value_ptr.* = result_id;
658 try module.sections.globals.emit(module.gpa, .OpTypeArray, .{
659 .id_result = result_id,
660 .element_type = child_ty_id,
661 .length = len_id,
662 });
663 }
664 return entry.value_ptr.*;
665}
666
667pub fn ptrType(module: *Module, child_ty_id: Id, storage_class: spec.StorageClass) !Id {
668 const key = .{ child_ty_id, storage_class };
669 const gop = try module.ptr_types.getOrPut(module.gpa, key);
670 if (!gop.found_existing) {
671 gop.value_ptr.* = module.allocId();
672 try module.sections.globals.emit(module.gpa, .OpTypePointer, .{
673 .id_result = gop.value_ptr.*,
674 .storage_class = storage_class,
675 .type = child_ty_id,
676 });
677 return gop.value_ptr.*;
678 }
679 return gop.value_ptr.*;
680}
681
682pub fn structType(
683 module: *Module,
684 types: []const Id,
685 maybe_names: ?[]const []const u8,
686 maybe_offsets: ?[]const u32,
687 ip_index: InternPool.Index,
688) !Id {
689 const target = module.zcu.getTarget();
690 const actual_ip_index = if (module.zcu.comp.config.root_strip) .none else ip_index;
691
692 if (module.cache.struct_types.get(.{ .fields = types, .ip_index = actual_ip_index })) |id| return id;
693 const result_id = module.allocId();
694 const types_dup = try module.arena.dupe(Id, types);
695 try module.sections.globals.emit(module.gpa, .OpTypeStruct, .{
696 .id_result = result_id,
697 .id_ref = types_dup,
698 });
699
700 if (maybe_names) |names| {
701 assert(names.len == types.len);
702 for (names, 0..) |name, i| {
703 try module.memberDebugName(result_id, @intCast(i), name);
704 }
705 }
706
707 switch (target.os.tag) {
708 .vulkan, .opengl => {
709 if (maybe_offsets) |offsets| {
710 assert(offsets.len == types.len);
711 for (offsets, 0..) |offset, i| {
712 try module.decorateMember(
713 result_id,
714 @intCast(i),
715 .{ .offset = .{ .byte_offset = offset } },
716 );
717 }
718 }
719 },
720 else => {},
721 }
722
723 try module.cache.struct_types.put(
724 module.gpa,
725 .{ .fields = types_dup, .ip_index = actual_ip_index },
726 result_id,
727 );
728 return result_id;
729}
730
731pub fn functionType(module: *Module, return_ty_id: Id, param_type_ids: []const Id) !Id {
732 if (module.cache.fn_types.get(.{
733 .return_ty = return_ty_id,
734 .params = param_type_ids,
735 })) |id| return id;
736 const result_id = module.allocId();
737 const params_dup = try module.arena.dupe(Id, param_type_ids);
738 try module.sections.globals.emit(module.gpa, .OpTypeFunction, .{
739 .id_result = result_id,
740 .return_type = return_ty_id,
741 .id_ref_2 = params_dup,
742 });
743 try module.cache.fn_types.put(module.gpa, .{
744 .return_ty = return_ty_id,
745 .params = params_dup,
746 }, result_id);
747 return result_id;
748}
749
750pub fn constant(module: *Module, ty_id: Id, value: spec.LiteralContextDependentNumber) !Id {
751 const gop = try module.cache.constants.getOrPut(module.gpa, .{ .ty = ty_id, .value = value });
752 if (!gop.found_existing) {
753 gop.value_ptr.* = module.allocId();
754 try module.sections.globals.emit(module.gpa, .OpConstant, .{
755 .id_result_type = ty_id,
756 .id_result = gop.value_ptr.*,
757 .value = value,
758 });
759 }
760 return gop.value_ptr.*;
761}
762
763pub fn constBool(module: *Module, value: bool) !Id {
764 if (module.cache.bool_const[@intFromBool(value)]) |b| return b;
765
766 const result_ty_id = try module.boolType();
767 const result_id = module.allocId();
768 module.cache.bool_const[@intFromBool(value)] = result_id;
769
770 switch (value) {
771 inline else => |value_ct| try module.sections.globals.emit(
772 module.gpa,
773 if (value_ct) .OpConstantTrue else .OpConstantFalse,
774 .{
775 .id_result_type = result_ty_id,
776 .id_result = result_id,
777 },
778 ),
779 }
780
781 return result_id;
782}
783
784pub fn builtin(
785 module: *Module,
786 result_ty_id: Id,
787 spirv_builtin: spec.BuiltIn,
788 storage_class: spec.StorageClass,
789) !Decl.Index {
790 const gop = try module.cache.builtins.getOrPut(module.gpa, .{ spirv_builtin, storage_class });
791 if (!gop.found_existing) {
792 const decl_index = try module.allocDecl(.global);
793 const decl = module.declPtr(decl_index);
794
795 gop.value_ptr.* = decl_index;
796 try module.sections.globals.emit(module.gpa, .OpVariable, .{
797 .id_result_type = result_ty_id,
798 .id_result = decl.result_id,
799 .storage_class = storage_class,
800 });
801 try module.decorate(decl.result_id, .{ .built_in = .{ .built_in = spirv_builtin } });
802 }
803 return gop.value_ptr.*;
804}
805
806pub fn constUndef(module: *Module, ty_id: Id) !Id {
807 const result_id = module.allocId();
808 try module.sections.globals.emit(module.gpa, .OpUndef, .{
809 .id_result_type = ty_id,
810 .id_result = result_id,
811 });
812 return result_id;
813}
814
815pub fn constNull(module: *Module, ty_id: Id) !Id {
816 const result_id = module.allocId();
817 try module.sections.globals.emit(module.gpa, .OpConstantNull, .{
818 .id_result_type = ty_id,
819 .id_result = result_id,
820 });
821 return result_id;
822}
823
824/// Decorate a result-id.
825pub fn decorate(
826 module: *Module,
827 target: Id,
828 decoration: spec.Decoration.Extended,
829) !void {
830 const gop = try module.cache.decorations.getOrPut(module.gpa, .{ target, decoration });
831 if (!gop.found_existing) {
832 try module.sections.annotations.emit(module.gpa, .OpDecorate, .{
833 .target = target,
834 .decoration = decoration,
835 });
836 }
837}
838
839/// Decorate a result-id which is a member of some struct.
840/// We really don't have to and shouldn't need to cache this.
841pub fn decorateMember(
842 module: *Module,
843 structure_type: Id,
844 member: u32,
845 decoration: spec.Decoration.Extended,
846) !void {
847 try module.sections.annotations.emit(module.gpa, .OpMemberDecorate, .{
848 .structure_type = structure_type,
849 .member = member,
850 .decoration = decoration,
851 });
852}
853
854pub fn allocDecl(module: *Module, kind: Decl.Kind) !Decl.Index {
855 try module.decls.append(module.gpa, .{
856 .kind = kind,
857 .result_id = module.allocId(),
858 });
859
860 return @as(Decl.Index, @enumFromInt(@as(u32, @intCast(module.decls.items.len - 1))));
861}
862
863pub fn declPtr(module: *Module, index: Decl.Index) *Decl {
864 return &module.decls.items[@intFromEnum(index)];
865}
866
867/// Declare a SPIR-V function as an entry point. This causes an extra wrapper
868/// function to be generated, which is then exported as the real entry point. The purpose of this
869/// wrapper is to allocate and initialize the structure holding the instance globals.
870pub fn declareEntryPoint(
871 module: *Module,
872 decl_index: Decl.Index,
873 name: []const u8,
874 exec_model: spec.ExecutionModel,
875 exec_mode: ?spec.ExecutionMode,
876) !void {
877 const gop = try module.entry_points.getOrPut(module.gpa, module.declPtr(decl_index).result_id);
878 gop.value_ptr.decl_index = decl_index;
879 gop.value_ptr.name = name;
880 gop.value_ptr.exec_model = exec_model;
881 // Might've been set by assembler
882 if (!gop.found_existing) gop.value_ptr.exec_mode = exec_mode;
883}
884
885pub fn debugName(module: *Module, target: Id, name: []const u8) !void {
886 if (module.zcu.comp.config.root_strip) return;
887 try module.sections.debug_names.emit(module.gpa, .OpName, .{
888 .target = target,
889 .name = name,
890 });
891}
892
893pub fn debugNameFmt(module: *Module, target: Id, comptime fmt: []const u8, args: anytype) !void {
894 if (module.zcu.comp.config.root_strip) return;
895 const name = try std.fmt.allocPrint(module.gpa, fmt, args);
896 defer module.gpa.free(name);
897 try module.debugName(target, name);
898}
899
900pub fn memberDebugName(module: *Module, target: Id, member: u32, name: []const u8) !void {
901 if (module.zcu.comp.config.root_strip) return;
902 try module.sections.debug_names.emit(module.gpa, .OpMemberName, .{
903 .type = target,
904 .member = member,
905 .name = name,
906 });
907}
908
909pub fn debugString(module: *Module, string: []const u8) !Id {
910 const entry = try module.cache.strings.getOrPut(module.gpa, string);
911 if (!entry.found_existing) {
912 entry.value_ptr.* = module.allocId();
913 try module.sections.debug_strings.emit(module.gpa, .OpString, .{
914 .id_result = entry.value_ptr.*,
915 .string = string,
916 });
917 }
918 return entry.value_ptr.*;
919}
920
921pub fn storageClass(module: *Module, as: std.builtin.AddressSpace) spec.StorageClass {
922 const target = module.zcu.getTarget();
923 return switch (as) {
924 .generic => .function,
925 .global => switch (target.os.tag) {
926 .opencl, .amdhsa => .cross_workgroup,
927 else => .storage_buffer,
928 },
929 .push_constant => .push_constant,
930 .output => .output,
931 .uniform => .uniform,
932 .storage_buffer => .storage_buffer,
933 .physical_storage_buffer => .physical_storage_buffer,
934 .constant => .uniform_constant,
935 .shared => .workgroup,
936 .local => .function,
937 .input => .input,
938 .gs,
939 .fs,
940 .ss,
941 .far,
942 .param,
943 .flash,
944 .flash1,
945 .flash2,
946 .flash3,
947 .flash4,
948 .flash5,
949 .cog,
950 .lut,
951 .hub,
952 => unreachable,
953 };
954}