master
1pub const Class = enum {
2 /// INTEGER: This class consists of integral types that fit into one of the general
3 /// purpose registers.
4 integer,
5 /// SSE: The class consists of types that fit into a vector register.
6 sse,
7 /// SSEUP: The class consists of types that fit into a vector register and can be passed
8 /// and returned in the upper bytes of it.
9 sseup,
10 /// X87, X87UP: These classes consist of types that will be returned via the
11 /// x87 FPU.
12 x87,
13 /// The 15-bit exponent, 1-bit sign, and 6 bytes of padding of an `f80`.
14 x87up,
15 /// NO_CLASS: This class is used as initializer in the algorithms. It will be used for
16 /// padding and empty structures and unions.
17 none,
18 /// MEMORY: This class consists of types that will be passed and returned in mem-
19 /// ory via the stack.
20 memory,
21 /// Win64 passes 128-bit integers as `Class.memory` but returns them as `Class.sse`.
22 win_i128,
23 /// A `Class.sse` containing one `f32`.
24 float,
25 /// A `Class.sse` containing two `f32`s.
26 float_combine,
27 /// Clang passes each vector element in a separate `Class.integer`, but returns as `Class.memory`.
28 integer_per_element,
29
30 pub const one_integer: [8]Class = .{
31 .integer, .none, .none, .none,
32 .none, .none, .none, .none,
33 };
34 pub const two_integers: [8]Class = .{
35 .integer, .integer, .none, .none,
36 .none, .none, .none, .none,
37 };
38 pub const three_integers: [8]Class = .{
39 .integer, .integer, .integer, .none,
40 .none, .none, .none, .none,
41 };
42 pub const four_integers: [8]Class = .{
43 .integer, .integer, .integer, .integer,
44 .none, .none, .none, .none,
45 };
46 pub const len_integers: [8]Class = .{
47 .integer_per_element, .none, .none, .none,
48 .none, .none, .none, .none,
49 };
50
51 pub const @"f16" = @"f64";
52 pub const @"f32": [8]Class = .{
53 .float, .none, .none, .none,
54 .none, .none, .none, .none,
55 };
56 pub const @"f64": [8]Class = .{
57 .sse, .none, .none, .none,
58 .none, .none, .none, .none,
59 };
60 pub const @"f80": [8]Class = .{
61 .x87, .x87up, .none, .none,
62 .none, .none, .none, .none,
63 };
64 pub const @"f128": [8]Class = .{
65 .sse, .sseup, .none, .none,
66 .none, .none, .none, .none,
67 };
68
69 /// COMPLEX_X87: This class consists of types that will be returned via the x87
70 /// FPU.
71 pub const complex_x87: [8]Class = .{
72 .x87, .x87up, .x87, .x87up,
73 .none, .none, .none, .none,
74 };
75
76 pub const stack: [8]Class = .{
77 .memory, .none, .none, .none,
78 .none, .none, .none, .none,
79 };
80
81 pub fn isX87(class: Class) bool {
82 return switch (class) {
83 .x87, .x87up => true,
84 else => false,
85 };
86 }
87
88 /// Combine a field class with the prev one.
89 fn combineSystemV(prev_class: Class, next_class: Class) Class {
90 // "If both classes are equal, this is the resulting class."
91 if (prev_class == next_class)
92 return if (prev_class == .float) .float_combine else prev_class;
93
94 // "If one of the classes is NO_CLASS, the resulting class
95 // is the other class."
96 if (prev_class == .none) return next_class;
97
98 // "If one of the classes is MEMORY, the result is the MEMORY class."
99 if (prev_class == .memory or next_class == .memory) return .memory;
100
101 // "If one of the classes is INTEGER, the result is the INTEGER."
102 if (prev_class == .integer or next_class == .integer) return .integer;
103
104 // "If one of the classes is X87, X87UP, COMPLEX_X87 class,
105 // MEMORY is used as class."
106 if (prev_class.isX87() or next_class.isX87()) return .memory;
107
108 // "Otherwise class SSE is used."
109 return .sse;
110 }
111};
112
113pub const Context = enum { ret, arg, other };
114
115pub fn classifyWindows(ty: Type, zcu: *Zcu, target: *const std.Target, ctx: Context) Class {
116 // https://docs.microsoft.com/en-gb/cpp/build/x64-calling-convention?view=vs-2017
117 // "There's a strict one-to-one correspondence between a function call's arguments
118 // and the registers used for those arguments. Any argument that doesn't fit in 8
119 // bytes, or isn't 1, 2, 4, or 8 bytes, must be passed by reference. A single argument
120 // is never spread across multiple registers."
121 // "All floating point operations are done using the 16 XMM registers."
122 // "Structs and unions of size 8, 16, 32, or 64 bits, and __m64 types, are passed
123 // as if they were integers of the same size."
124 return switch (ty.zigTypeTag(zcu)) {
125 .pointer,
126 .int,
127 .bool,
128 .@"enum",
129 .void,
130 .noreturn,
131 .error_set,
132 .@"struct",
133 .@"union",
134 .optional,
135 .array,
136 .error_union,
137 .@"anyframe",
138 .frame,
139 => switch (ty.abiSize(zcu)) {
140 0 => unreachable,
141 1, 2, 4, 8 => .integer,
142 else => switch (ty.zigTypeTag(zcu)) {
143 .int => .win_i128,
144 .@"struct", .@"union" => if (ty.containerLayout(zcu) == .@"packed")
145 .win_i128
146 else
147 .memory,
148 else => .memory,
149 },
150 },
151
152 .float => switch (ty.floatBits(target)) {
153 16, 32, 64 => .sse,
154 80 => .memory,
155 128 => if (ctx == .arg) .memory else .sse,
156 else => unreachable,
157 },
158 .vector => .sse,
159
160 .type,
161 .comptime_float,
162 .comptime_int,
163 .undefined,
164 .null,
165 .@"fn",
166 .@"opaque",
167 .enum_literal,
168 => unreachable,
169 };
170}
171
172/// There are a maximum of 8 possible return slots. Returned values are in
173/// the beginning of the array; unused slots are filled with .none.
174pub fn classifySystemV(ty: Type, zcu: *Zcu, target: *const std.Target, ctx: Context) [8]Class {
175 switch (ty.zigTypeTag(zcu)) {
176 .pointer => switch (ty.ptrSize(zcu)) {
177 .slice => return Class.two_integers,
178 else => return Class.one_integer,
179 },
180 .int, .@"enum", .error_set => {
181 const bits = ty.intInfo(zcu).bits;
182 if (bits <= 64 * 1) return Class.one_integer;
183 if (bits <= 64 * 2) return Class.two_integers;
184 if (bits <= 64 * 3) return Class.three_integers;
185 if (bits <= 64 * 4) return Class.four_integers;
186 return Class.stack;
187 },
188 .bool, .void, .noreturn => return Class.one_integer,
189 .float => switch (ty.floatBits(target)) {
190 16 => {
191 if (ctx == .other) return Class.stack;
192 // TODO clang doesn't allow __fp16 as .ret or .arg
193 return Class.f16;
194 },
195 32 => return Class.f32,
196 64 => return Class.f64,
197 // "Arguments of types __float128, _Decimal128 and __m128 are
198 // split into two halves. The least significant ones belong
199 // to class SSE, the most significant one to class SSEUP."
200 128 => return Class.f128,
201 // "The 64-bit mantissa of arguments of type long double
202 // belongs to class X87, the 16-bit exponent plus 6 bytes
203 // of padding belongs to class X87UP."
204 80 => return Class.f80,
205 else => unreachable,
206 },
207 .vector => {
208 const elem_ty = ty.childType(zcu);
209 const bits = elem_ty.bitSize(zcu) * ty.arrayLen(zcu);
210 if (elem_ty.toIntern() == .bool_type) {
211 if (bits <= 32) return Class.one_integer;
212 if (bits <= 64) return Class.f64;
213 if (ctx == .other) return Class.stack;
214 if (bits <= 128) return Class.len_integers;
215 if (bits <= 256 and target.cpu.has(.x86, .avx)) return Class.len_integers;
216 if (bits <= 512 and target.cpu.has(.x86, .avx512f)) return Class.len_integers;
217 return Class.stack;
218 }
219 if (elem_ty.isRuntimeFloat() and elem_ty.floatBits(target) == 80) {
220 if (bits <= 80 * 1) return Class.f80;
221 if (bits <= 80 * 2) return Class.complex_x87;
222 return Class.stack;
223 }
224 if (bits <= 64 * 1) return .{
225 .sse, .none, .none, .none,
226 .none, .none, .none, .none,
227 };
228 if (bits <= 64 * 2) return .{
229 .sse, .sseup, .none, .none,
230 .none, .none, .none, .none,
231 };
232 if (ctx == .arg and !target.cpu.has(.x86, .avx)) return Class.stack;
233 if (bits <= 64 * 3) return .{
234 .sse, .sseup, .sseup, .none,
235 .none, .none, .none, .none,
236 };
237 if (bits <= 64 * 4) return .{
238 .sse, .sseup, .sseup, .sseup,
239 .none, .none, .none, .none,
240 };
241 if (ctx == .arg and !target.cpu.has(.x86, .avx512f)) return Class.stack;
242 if (bits <= 64 * 5) return .{
243 .sse, .sseup, .sseup, .sseup,
244 .sseup, .none, .none, .none,
245 };
246 if (bits <= 64 * 6) return .{
247 .sse, .sseup, .sseup, .sseup,
248 .sseup, .sseup, .none, .none,
249 };
250 if (bits <= 64 * 7) return .{
251 .sse, .sseup, .sseup, .sseup,
252 .sseup, .sseup, .sseup, .none,
253 };
254 if (bits <= 64 * 8 or (ctx == .ret and bits <= @as(u64, if (target.cpu.has(.x86, .avx512f))
255 64 * 32
256 else if (target.cpu.has(.x86, .avx))
257 64 * 16
258 else
259 64 * 8))) return .{
260 .sse, .sseup, .sseup, .sseup,
261 .sseup, .sseup, .sseup, .sseup,
262 };
263 return Class.stack;
264 },
265 .optional => {
266 if (ty.optionalReprIsPayload(zcu)) {
267 return classifySystemV(ty.optionalChild(zcu), zcu, target, ctx);
268 }
269 return Class.stack;
270 },
271 .@"struct", .@"union" => {
272 // "If the size of an object is larger than eight eightbytes, or
273 // it contains unaligned fields, it has class MEMORY"
274 // "If the size of the aggregate exceeds a single eightbyte, each is classified
275 // separately.".
276 const ty_size = ty.abiSize(zcu);
277 switch (ty.containerLayout(zcu)) {
278 .auto => unreachable,
279 .@"extern" => {},
280 .@"packed" => {
281 if (ty_size <= 8) return Class.one_integer;
282 if (ty_size <= 16) return Class.two_integers;
283 unreachable; // frontend should not have allowed this type as extern
284 },
285 }
286 if (ty_size > 64) return Class.stack;
287
288 var result: [8]Class = @splat(.none);
289 _ = if (zcu.typeToStruct(ty)) |loaded_struct|
290 classifySystemVStruct(&result, 0, loaded_struct, zcu, target)
291 else if (zcu.typeToUnion(ty)) |loaded_union|
292 classifySystemVUnion(&result, 0, loaded_union, zcu, target)
293 else
294 unreachable;
295
296 // Post-merger cleanup
297
298 // "If one of the classes is MEMORY, the whole argument is passed in memory"
299 // "If X87UP is not preceded by X87, the whole argument is passed in memory."
300 for (result, 0..) |class, i| switch (class) {
301 .memory => return Class.stack,
302 .x87up => if (i == 0 or result[i - 1] != .x87) return Class.stack,
303 else => continue,
304 };
305 // "If the size of the aggregate exceeds two eightbytes and the first eight-
306 // byte isn’t SSE or any other eightbyte isn’t SSEUP, the whole argument
307 // is passed in memory."
308 if (ty_size > 16 and (result[0] != .sse or
309 std.mem.indexOfNone(Class, result[1..], &.{ .sseup, .none }) != null)) return Class.stack;
310
311 // "If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE."
312 for (&result, 0..) |*item, i| {
313 if (item.* == .sseup) switch (result[i - 1]) {
314 .sse, .sseup => continue,
315 else => item.* = .sse,
316 };
317 }
318 return result;
319 },
320 .array => {
321 const ty_size = ty.abiSize(zcu);
322 if (ty_size <= 8) return Class.one_integer;
323 if (ty_size <= 16) return Class.two_integers;
324 return Class.stack;
325 },
326 else => unreachable,
327 }
328}
329
330fn classifySystemVStruct(
331 result: *[8]Class,
332 starting_byte_offset: u64,
333 loaded_struct: InternPool.LoadedStructType,
334 zcu: *Zcu,
335 target: *const std.Target,
336) u64 {
337 const ip = &zcu.intern_pool;
338 var byte_offset = starting_byte_offset;
339 var field_it = loaded_struct.iterateRuntimeOrder(ip);
340 while (field_it.next()) |field_index| {
341 const field_ty = Type.fromInterned(loaded_struct.field_types.get(ip)[field_index]);
342 const field_align = loaded_struct.fieldAlign(ip, field_index);
343 byte_offset = std.mem.alignForward(
344 u64,
345 byte_offset,
346 field_align.toByteUnits() orelse field_ty.abiAlignment(zcu).toByteUnits().?,
347 );
348 if (zcu.typeToStruct(field_ty)) |field_loaded_struct| {
349 switch (field_loaded_struct.layout) {
350 .auto => unreachable,
351 .@"extern" => {
352 byte_offset = classifySystemVStruct(result, byte_offset, field_loaded_struct, zcu, target);
353 continue;
354 },
355 .@"packed" => {},
356 }
357 } else if (zcu.typeToUnion(field_ty)) |field_loaded_union| {
358 switch (field_loaded_union.flagsUnordered(ip).layout) {
359 .auto => unreachable,
360 .@"extern" => {
361 byte_offset = classifySystemVUnion(result, byte_offset, field_loaded_union, zcu, target);
362 continue;
363 },
364 .@"packed" => {},
365 }
366 }
367 const field_classes = std.mem.sliceTo(&classifySystemV(field_ty, zcu, target, .other), .none);
368 for (result[@intCast(byte_offset / 8)..][0..field_classes.len], field_classes) |*result_class, field_class|
369 result_class.* = result_class.combineSystemV(field_class);
370 byte_offset += field_ty.abiSize(zcu);
371 }
372 const final_byte_offset = starting_byte_offset + loaded_struct.sizeUnordered(ip);
373 std.debug.assert(final_byte_offset == std.mem.alignForward(
374 u64,
375 byte_offset,
376 loaded_struct.flagsUnordered(ip).alignment.toByteUnits().?,
377 ));
378 return final_byte_offset;
379}
380
381fn classifySystemVUnion(
382 result: *[8]Class,
383 starting_byte_offset: u64,
384 loaded_union: InternPool.LoadedUnionType,
385 zcu: *Zcu,
386 target: *const std.Target,
387) u64 {
388 const ip = &zcu.intern_pool;
389 for (0..loaded_union.field_types.len) |field_index| {
390 const field_ty = Type.fromInterned(loaded_union.field_types.get(ip)[field_index]);
391 if (zcu.typeToStruct(field_ty)) |field_loaded_struct| {
392 switch (field_loaded_struct.layout) {
393 .auto => unreachable,
394 .@"extern" => {
395 _ = classifySystemVStruct(result, starting_byte_offset, field_loaded_struct, zcu, target);
396 continue;
397 },
398 .@"packed" => {},
399 }
400 } else if (zcu.typeToUnion(field_ty)) |field_loaded_union| {
401 switch (field_loaded_union.flagsUnordered(ip).layout) {
402 .auto => unreachable,
403 .@"extern" => {
404 _ = classifySystemVUnion(result, starting_byte_offset, field_loaded_union, zcu, target);
405 continue;
406 },
407 .@"packed" => {},
408 }
409 }
410 const field_classes = std.mem.sliceTo(&classifySystemV(field_ty, zcu, target, .other), .none);
411 for (result[@intCast(starting_byte_offset / 8)..][0..field_classes.len], field_classes) |*result_class, field_class|
412 result_class.* = result_class.combineSystemV(field_class);
413 }
414 return starting_byte_offset + loaded_union.sizeUnordered(ip);
415}
416
417pub const zigcc = struct {
418 pub const stack_align: ?InternPool.Alignment = null;
419 pub const return_in_regs = true;
420 pub const params_in_regs = true;
421
422 const volatile_gpr = gp_regs.len - 5;
423 const volatile_x87 = x87_regs.len - 1;
424 const volatile_sse = sse_avx_regs.len;
425
426 /// Note that .rsp and .rbp also belong to this set, however, we never expect to use them
427 /// for anything else but stack offset tracking therefore we exclude them from this set.
428 pub const callee_preserved_regs = gp_regs[volatile_gpr..] ++ x87_regs[volatile_x87 .. x87_regs.len - 1] ++ sse_avx_regs[volatile_sse..];
429 /// These registers need to be preserved (saved on the stack) and restored by the caller before
430 /// the caller relinquishes control to a subroutine via call instruction (or similar).
431 /// In other words, these registers are free to use by the callee.
432 pub const caller_preserved_regs = gp_regs[0..volatile_gpr] ++ x87_regs[0..volatile_x87] ++ sse_avx_regs[0..volatile_sse];
433
434 const int_param_regs = gp_regs[0 .. volatile_gpr - 1];
435 const x87_param_regs = x87_regs[0..volatile_x87];
436 const sse_param_regs = sse_avx_regs[0 .. volatile_sse / 2];
437 const int_return_regs = gp_regs[0..volatile_gpr];
438 const x87_return_regs = x87_regs[0..volatile_x87];
439 const sse_return_regs = sse_avx_regs[0..volatile_gpr];
440};
441
442pub const SysV = struct {
443 /// Note that .rsp and .rbp also belong to this set, however, we never expect to use them
444 /// for anything else but stack offset tracking therefore we exclude them from this set.
445 pub const callee_preserved_regs = [_]Register{ .rbx, .r12, .r13, .r14, .r15 };
446 /// These registers need to be preserved (saved on the stack) and restored by the caller before
447 /// the caller relinquishes control to a subroutine via call instruction (or similar).
448 /// In other words, these registers are free to use by the callee.
449 pub const caller_preserved_regs = [_]Register{ .rax, .rcx, .rdx, .rsi, .rdi, .r8, .r9, .r10, .r11 } ++ x87_regs ++ sse_avx_regs;
450
451 pub const c_abi_int_param_regs = [_]Register{ .rdi, .rsi, .rdx, .rcx, .r8, .r9 };
452 pub const c_abi_x87_param_regs = x87_regs[0..0];
453 pub const c_abi_sse_param_regs = sse_avx_regs[0..8];
454 pub const c_abi_int_return_regs = [_]Register{ .rax, .rdx };
455 pub const c_abi_x87_return_regs = x87_regs[0..2];
456 pub const c_abi_sse_return_regs = sse_avx_regs[0..4];
457};
458
459pub const Win64 = struct {
460 /// Note that .rsp and .rbp also belong to this set, however, we never expect to use them
461 /// for anything else but stack offset tracking therefore we exclude them from this set.
462 pub const callee_preserved_regs = [_]Register{ .rbx, .rsi, .rdi, .r12, .r13, .r14, .r15 };
463 /// These registers need to be preserved (saved on the stack) and restored by the caller before
464 /// the caller relinquishes control to a subroutine via call instruction (or similar).
465 /// In other words, these registers are free to use by the callee.
466 pub const caller_preserved_regs = [_]Register{ .rax, .rcx, .rdx, .r8, .r9, .r10, .r11 } ++ x87_regs ++ sse_avx_regs;
467
468 pub const c_abi_int_param_regs = [_]Register{ .rcx, .rdx, .r8, .r9 };
469 pub const c_abi_x87_param_regs = x87_regs[0..0];
470 pub const c_abi_sse_param_regs = sse_avx_regs[0..4];
471 pub const c_abi_int_return_regs = [_]Register{.rax};
472 pub const c_abi_x87_return_regs = x87_regs[0..0];
473 pub const c_abi_sse_return_regs = sse_avx_regs[0..1];
474};
475
476pub fn getCalleePreservedRegs(cc: std.builtin.CallingConvention.Tag) []const Register {
477 return switch (cc) {
478 .auto => zigcc.callee_preserved_regs,
479 .x86_64_sysv => &SysV.callee_preserved_regs,
480 .x86_64_win => &Win64.callee_preserved_regs,
481 else => unreachable,
482 };
483}
484
485pub fn getCallerPreservedRegs(cc: std.builtin.CallingConvention.Tag) []const Register {
486 return switch (cc) {
487 .auto => zigcc.caller_preserved_regs,
488 .x86_64_sysv => &SysV.caller_preserved_regs,
489 .x86_64_win => &Win64.caller_preserved_regs,
490 else => unreachable,
491 };
492}
493
494pub fn getCAbiIntParamRegs(cc: std.builtin.CallingConvention.Tag) []const Register {
495 return switch (cc) {
496 .auto => zigcc.int_param_regs,
497 .x86_64_sysv => &SysV.c_abi_int_param_regs,
498 .x86_64_win => &Win64.c_abi_int_param_regs,
499 else => unreachable,
500 };
501}
502
503pub fn getCAbiX87ParamRegs(cc: std.builtin.CallingConvention.Tag) []const Register {
504 return switch (cc) {
505 .auto => zigcc.x87_param_regs,
506 .x86_64_sysv => SysV.c_abi_x87_param_regs,
507 .x86_64_win => Win64.c_abi_x87_param_regs,
508 else => unreachable,
509 };
510}
511
512pub fn getCAbiSseParamRegs(cc: std.builtin.CallingConvention.Tag, target: *const std.Target) []const Register {
513 return switch (cc) {
514 .auto => switch (target.cpu.arch) {
515 else => unreachable,
516 .x86 => zigcc.sse_param_regs[0 .. zigcc.sse_param_regs.len / 2],
517 .x86_64 => zigcc.sse_param_regs,
518 },
519 .x86_64_sysv => SysV.c_abi_sse_param_regs,
520 .x86_64_win => Win64.c_abi_sse_param_regs,
521 else => unreachable,
522 };
523}
524
525pub fn getCAbiIntReturnRegs(cc: std.builtin.CallingConvention.Tag) []const Register {
526 return switch (cc) {
527 .auto => zigcc.int_return_regs,
528 .x86_64_sysv => &SysV.c_abi_int_return_regs,
529 .x86_64_win => &Win64.c_abi_int_return_regs,
530 else => unreachable,
531 };
532}
533
534pub fn getCAbiX87ReturnRegs(cc: std.builtin.CallingConvention.Tag) []const Register {
535 return switch (cc) {
536 .auto => zigcc.x87_return_regs,
537 .x86_64_sysv => SysV.c_abi_x87_return_regs,
538 .x86_64_win => Win64.c_abi_x87_return_regs,
539 else => unreachable,
540 };
541}
542
543pub fn getCAbiSseReturnRegs(cc: std.builtin.CallingConvention.Tag) []const Register {
544 return switch (cc) {
545 .auto => zigcc.sse_return_regs,
546 .x86_64_sysv => SysV.c_abi_sse_return_regs,
547 .x86_64_win => Win64.c_abi_sse_return_regs,
548 else => unreachable,
549 };
550}
551
552pub fn getCAbiLinkerScratchReg(cc: std.builtin.CallingConvention.Tag) Register {
553 return switch (cc) {
554 .auto => zigcc.int_return_regs[zigcc.int_return_regs.len - 1],
555 .x86_64_sysv => SysV.c_abi_int_return_regs[0],
556 .x86_64_win => Win64.c_abi_int_return_regs[0],
557 else => unreachable,
558 };
559}
560
561const gp_regs = [_]Register{
562 .rax, .rdx, .rbx, .rcx, .rsi, .rdi, .r8, .r9, .r10, .r11, .r12, .r13, .r14, .r15,
563};
564const x87_regs = [_]Register{
565 .st0, .st1, .st2, .st3, .st4, .st5, .st6, .st7,
566};
567const sse_avx_regs = [_]Register{
568 .ymm0, .ymm1, .ymm2, .ymm3, .ymm4, .ymm5, .ymm6, .ymm7,
569 .ymm8, .ymm9, .ymm10, .ymm11, .ymm12, .ymm13, .ymm14, .ymm15,
570};
571const allocatable_regs = gp_regs ++ x87_regs[0 .. x87_regs.len - 1] ++ sse_avx_regs;
572pub const RegisterManager = RegisterManagerFn(@import("CodeGen.zig"), Register, allocatable_regs);
573
574// Register classes
575const RegisterBitSet = RegisterManager.RegisterBitSet;
576pub const RegisterClass = struct {
577 pub const gp: RegisterBitSet = blk: {
578 var set = RegisterBitSet.initEmpty();
579 for (allocatable_regs, 0..) |reg, index| if (reg.isClass(.general_purpose)) set.set(index);
580 break :blk set;
581 };
582 pub const gphi: RegisterBitSet = blk: {
583 var set = RegisterBitSet.initEmpty();
584 for (allocatable_regs, 0..) |reg, index| if (reg.isClass(.gphi)) set.set(index);
585 break :blk set;
586 };
587 pub const x87: RegisterBitSet = blk: {
588 var set = RegisterBitSet.initEmpty();
589 for (allocatable_regs, 0..) |reg, index| if (reg.isClass(.x87)) set.set(index);
590 break :blk set;
591 };
592 pub const sse: RegisterBitSet = blk: {
593 var set = RegisterBitSet.initEmpty();
594 for (allocatable_regs, 0..) |reg, index| if (reg.isClass(.sse)) set.set(index);
595 break :blk set;
596 };
597};
598
599const builtin = @import("builtin");
600const std = @import("std");
601const assert = std.debug.assert;
602const testing = std.testing;
603
604const InternPool = @import("../../InternPool.zig");
605const Register = @import("bits.zig").Register;
606const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager;
607const Type = @import("../../Type.zig");
608const Value = @import("../../Value.zig");
609const Zcu = @import("../../Zcu.zig");