master
1//! This file contains the functionality for lowering RISC-V MIR to Instructions
2
3pt: Zcu.PerThread,
4output_mode: std.builtin.OutputMode,
5link_mode: std.builtin.LinkMode,
6pic: bool,
7allocator: Allocator,
8mir: Mir,
9cc: std.builtin.CallingConvention,
10err_msg: ?*ErrorMsg = null,
11src_loc: Zcu.LazySrcLoc,
12result_insts_len: u8 = undefined,
13result_relocs_len: u8 = undefined,
14result_insts: [
15 @max(
16 1, // non-pseudo instruction
17 abi.Registers.all_preserved.len, // spill / restore regs,
18 )
19]Instruction = undefined,
20result_relocs: [1]Reloc = undefined,
21
22pub const Error = error{
23 OutOfMemory,
24 LowerFail,
25 InvalidInstruction,
26};
27
28pub const Reloc = struct {
29 lowered_inst_index: u8,
30 target: Target,
31
32 const Target = union(enum) {
33 inst: Mir.Inst.Index,
34
35 /// Relocs the lowered_inst_index and the next instruction.
36 load_symbol_reloc: bits.Symbol,
37 /// Relocs the lowered_inst_index and the next two instructions.
38 load_tlv_reloc: bits.Symbol,
39 /// Relocs the lowered_inst_index and the next instruction.
40 call_extern_fn_reloc: bits.Symbol,
41 };
42};
43
44/// The returned slice is overwritten by the next call to lowerMir.
45pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index, options: struct {
46 allow_frame_locs: bool,
47}) Error!struct {
48 insts: []const Instruction,
49 relocs: []const Reloc,
50} {
51 const pt = lower.pt;
52 const zcu = pt.zcu;
53
54 lower.result_insts = undefined;
55 lower.result_relocs = undefined;
56 errdefer lower.result_insts = undefined;
57 errdefer lower.result_relocs = undefined;
58 lower.result_insts_len = 0;
59 lower.result_relocs_len = 0;
60 defer lower.result_insts_len = undefined;
61 defer lower.result_relocs_len = undefined;
62
63 const inst = lower.mir.instructions.get(index);
64 log.debug("lowerMir {f}", .{inst});
65 switch (inst.tag) {
66 else => try lower.generic(inst),
67 .pseudo_dbg_line_column,
68 .pseudo_dbg_epilogue_begin,
69 .pseudo_dbg_prologue_end,
70 .pseudo_dead,
71 => {},
72
73 .pseudo_load_rm, .pseudo_store_rm => {
74 const rm = inst.data.rm;
75
76 const frame_loc: Mir.FrameLoc = if (options.allow_frame_locs)
77 rm.m.toFrameLoc(lower.mir)
78 else
79 .{ .base = .s0, .disp = 0 };
80
81 switch (inst.tag) {
82 .pseudo_load_rm => {
83 const dest_reg = rm.r;
84 const dest_reg_class = dest_reg.class();
85
86 const src_size = rm.m.mod.size;
87 const unsigned = rm.m.mod.unsigned;
88
89 const mnem: Mnemonic = switch (dest_reg_class) {
90 .int => switch (src_size) {
91 .byte => if (unsigned) .lbu else .lb,
92 .hword => if (unsigned) .lhu else .lh,
93 .word => if (unsigned) .lwu else .lw,
94 .dword => .ld,
95 },
96 .float => switch (src_size) {
97 .byte => unreachable, // Zig does not support 8-bit floats
98 .hword => return lower.fail("TODO: lowerMir pseudo_load_rm support 16-bit floats", .{}),
99 .word => .flw,
100 .dword => .fld,
101 },
102 .vector => switch (src_size) {
103 .byte => .vle8v,
104 .hword => .vle32v,
105 .word => .vle32v,
106 .dword => .vle64v,
107 },
108 };
109
110 switch (dest_reg_class) {
111 .int, .float => {
112 try lower.emit(mnem, &.{
113 .{ .reg = rm.r },
114 .{ .reg = frame_loc.base },
115 .{ .imm = Immediate.s(frame_loc.disp) },
116 });
117 },
118 .vector => {
119 assert(frame_loc.disp == 0);
120 try lower.emit(mnem, &.{
121 .{ .reg = rm.r },
122 .{ .reg = frame_loc.base },
123 .{ .reg = .zero },
124 });
125 },
126 }
127 },
128 .pseudo_store_rm => {
129 const src_reg = rm.r;
130 const src_reg_class = src_reg.class();
131
132 const dest_size = rm.m.mod.size;
133
134 const mnem: Mnemonic = switch (src_reg_class) {
135 .int => switch (dest_size) {
136 .byte => .sb,
137 .hword => .sh,
138 .word => .sw,
139 .dword => .sd,
140 },
141 .float => switch (dest_size) {
142 .byte => unreachable, // Zig does not support 8-bit floats
143 .hword => return lower.fail("TODO: lowerMir pseudo_store_rm support 16-bit floats", .{}),
144 .word => .fsw,
145 .dword => .fsd,
146 },
147 .vector => switch (dest_size) {
148 .byte => .vse8v,
149 .hword => .vse16v,
150 .word => .vse32v,
151 .dword => .vse64v,
152 },
153 };
154
155 switch (src_reg_class) {
156 .int, .float => {
157 try lower.emit(mnem, &.{
158 .{ .reg = frame_loc.base },
159 .{ .reg = rm.r },
160 .{ .imm = Immediate.s(frame_loc.disp) },
161 });
162 },
163 .vector => {
164 assert(frame_loc.disp == 0);
165 try lower.emit(mnem, &.{
166 .{ .reg = rm.r },
167 .{ .reg = frame_loc.base },
168 .{ .reg = .zero },
169 });
170 },
171 }
172 },
173 else => unreachable,
174 }
175 },
176
177 .pseudo_mv => {
178 const rr = inst.data.rr;
179
180 const dst_class = rr.rd.class();
181 const src_class = rr.rs.class();
182
183 switch (src_class) {
184 .float => switch (dst_class) {
185 .float => {
186 try lower.emit(if (lower.hasFeature(.d)) .fsgnjnd else .fsgnjns, &.{
187 .{ .reg = rr.rd },
188 .{ .reg = rr.rs },
189 .{ .reg = rr.rs },
190 });
191 },
192 .int, .vector => return lower.fail("TODO: lowerMir pseudo_mv float -> {s}", .{@tagName(dst_class)}),
193 },
194 .int => switch (dst_class) {
195 .int => {
196 try lower.emit(.addi, &.{
197 .{ .reg = rr.rd },
198 .{ .reg = rr.rs },
199 .{ .imm = Immediate.s(0) },
200 });
201 },
202 .vector => {
203 try lower.emit(.vmvvx, &.{
204 .{ .reg = rr.rd },
205 .{ .reg = rr.rs },
206 .{ .reg = .x0 },
207 });
208 },
209 .float => return lower.fail("TODO: lowerMir pseudo_mv int -> {s}", .{@tagName(dst_class)}),
210 },
211 .vector => switch (dst_class) {
212 .int => {
213 try lower.emit(.vadcvv, &.{
214 .{ .reg = rr.rd },
215 .{ .reg = .zero },
216 .{ .reg = rr.rs },
217 });
218 },
219 .float, .vector => return lower.fail("TODO: lowerMir pseudo_mv vector -> {s}", .{@tagName(dst_class)}),
220 },
221 }
222 },
223
224 .pseudo_j => {
225 const j_type = inst.data.j_type;
226 try lower.emit(.jal, &.{
227 .{ .reg = j_type.rd },
228 .{ .imm = lower.reloc(.{ .inst = j_type.inst }) },
229 });
230 },
231
232 .pseudo_spill_regs => try lower.pushPopRegList(true, inst.data.reg_list),
233 .pseudo_restore_regs => try lower.pushPopRegList(false, inst.data.reg_list),
234
235 .pseudo_load_symbol => {
236 const payload = inst.data.reloc;
237 const dst_reg = payload.register;
238 assert(dst_reg.class() == .int);
239
240 try lower.emit(.lui, &.{
241 .{ .reg = dst_reg },
242 .{ .imm = lower.reloc(.{
243 .load_symbol_reloc = .{
244 .atom_index = payload.atom_index,
245 .sym_index = payload.sym_index,
246 },
247 }) },
248 });
249
250 // the reloc above implies this one
251 try lower.emit(.addi, &.{
252 .{ .reg = dst_reg },
253 .{ .reg = dst_reg },
254 .{ .imm = Immediate.s(0) },
255 });
256 },
257
258 .pseudo_load_tlv => {
259 const payload = inst.data.reloc;
260 const dst_reg = payload.register;
261 assert(dst_reg.class() == .int);
262
263 try lower.emit(.lui, &.{
264 .{ .reg = dst_reg },
265 .{ .imm = lower.reloc(.{
266 .load_tlv_reloc = .{
267 .atom_index = payload.atom_index,
268 .sym_index = payload.sym_index,
269 },
270 }) },
271 });
272
273 try lower.emit(.add, &.{
274 .{ .reg = dst_reg },
275 .{ .reg = dst_reg },
276 .{ .reg = .tp },
277 });
278
279 try lower.emit(.addi, &.{
280 .{ .reg = dst_reg },
281 .{ .reg = dst_reg },
282 .{ .imm = Immediate.s(0) },
283 });
284 },
285
286 .pseudo_lea_rm => {
287 const rm = inst.data.rm;
288 assert(rm.r.class() == .int);
289
290 const frame: Mir.FrameLoc = if (options.allow_frame_locs)
291 rm.m.toFrameLoc(lower.mir)
292 else
293 .{ .base = .s0, .disp = 0 };
294
295 try lower.emit(.addi, &.{
296 .{ .reg = rm.r },
297 .{ .reg = frame.base },
298 .{ .imm = Immediate.s(frame.disp) },
299 });
300 },
301
302 .pseudo_compare => {
303 const compare = inst.data.compare;
304 const op = compare.op;
305
306 const rd = compare.rd;
307 const rs1 = compare.rs1;
308 const rs2 = compare.rs2;
309
310 const class = rs1.class();
311 const ty = compare.ty;
312 const size = std.math.ceilPowerOfTwo(u64, ty.bitSize(zcu)) catch {
313 return lower.fail("pseudo_compare size {}", .{ty.bitSize(zcu)});
314 };
315
316 const is_unsigned = ty.isUnsignedInt(zcu);
317 const less_than: Mnemonic = if (is_unsigned) .sltu else .slt;
318
319 switch (class) {
320 .int => switch (op) {
321 .eq => {
322 try lower.emit(.xor, &.{
323 .{ .reg = rd },
324 .{ .reg = rs1 },
325 .{ .reg = rs2 },
326 });
327
328 try lower.emit(.sltiu, &.{
329 .{ .reg = rd },
330 .{ .reg = rd },
331 .{ .imm = Immediate.s(1) },
332 });
333 },
334 .neq => {
335 try lower.emit(.xor, &.{
336 .{ .reg = rd },
337 .{ .reg = rs1 },
338 .{ .reg = rs2 },
339 });
340
341 try lower.emit(.sltu, &.{
342 .{ .reg = rd },
343 .{ .reg = .zero },
344 .{ .reg = rd },
345 });
346 },
347 .gt => {
348 try lower.emit(less_than, &.{
349 .{ .reg = rd },
350 .{ .reg = rs2 },
351 .{ .reg = rs1 },
352 });
353 },
354 .gte => {
355 try lower.emit(less_than, &.{
356 .{ .reg = rd },
357 .{ .reg = rs1 },
358 .{ .reg = rs2 },
359 });
360 try lower.emit(.xori, &.{
361 .{ .reg = rd },
362 .{ .reg = rd },
363 .{ .imm = Immediate.s(1) },
364 });
365 },
366 .lt => {
367 try lower.emit(less_than, &.{
368 .{ .reg = rd },
369 .{ .reg = rs1 },
370 .{ .reg = rs2 },
371 });
372 },
373 .lte => {
374 try lower.emit(less_than, &.{
375 .{ .reg = rd },
376 .{ .reg = rs2 },
377 .{ .reg = rs1 },
378 });
379
380 try lower.emit(.xori, &.{
381 .{ .reg = rd },
382 .{ .reg = rd },
383 .{ .imm = Immediate.s(1) },
384 });
385 },
386 },
387 .float => switch (op) {
388 // eq
389 .eq => {
390 try lower.emit(if (size == 64) .feqd else .feqs, &.{
391 .{ .reg = rd },
392 .{ .reg = rs1 },
393 .{ .reg = rs2 },
394 });
395 },
396 // !(eq)
397 .neq => {
398 try lower.emit(if (size == 64) .feqd else .feqs, &.{
399 .{ .reg = rd },
400 .{ .reg = rs1 },
401 .{ .reg = rs2 },
402 });
403 try lower.emit(.xori, &.{
404 .{ .reg = rd },
405 .{ .reg = rd },
406 .{ .imm = Immediate.s(1) },
407 });
408 },
409 .lt => {
410 try lower.emit(if (size == 64) .fltd else .flts, &.{
411 .{ .reg = rd },
412 .{ .reg = rs1 },
413 .{ .reg = rs2 },
414 });
415 },
416 .lte => {
417 try lower.emit(if (size == 64) .fled else .fles, &.{
418 .{ .reg = rd },
419 .{ .reg = rs1 },
420 .{ .reg = rs2 },
421 });
422 },
423 .gt => {
424 try lower.emit(if (size == 64) .fltd else .flts, &.{
425 .{ .reg = rd },
426 .{ .reg = rs2 },
427 .{ .reg = rs1 },
428 });
429 },
430 .gte => {
431 try lower.emit(if (size == 64) .fled else .fles, &.{
432 .{ .reg = rd },
433 .{ .reg = rs2 },
434 .{ .reg = rs1 },
435 });
436 },
437 },
438 .vector => return lower.fail("TODO: lowerMir pseudo_cmp vector", .{}),
439 }
440 },
441
442 .pseudo_not => {
443 const rr = inst.data.rr;
444 assert(rr.rs.class() == .int and rr.rd.class() == .int);
445
446 // mask out any other bits that aren't the boolean
447 try lower.emit(.andi, &.{
448 .{ .reg = rr.rs },
449 .{ .reg = rr.rs },
450 .{ .imm = Immediate.s(1) },
451 });
452
453 try lower.emit(.sltiu, &.{
454 .{ .reg = rr.rd },
455 .{ .reg = rr.rs },
456 .{ .imm = Immediate.s(1) },
457 });
458 },
459
460 .pseudo_extern_fn_reloc => {
461 const inst_reloc = inst.data.reloc;
462 const link_reg = inst_reloc.register;
463
464 try lower.emit(.auipc, &.{
465 .{ .reg = link_reg },
466 .{ .imm = lower.reloc(
467 .{ .call_extern_fn_reloc = .{
468 .atom_index = inst_reloc.atom_index,
469 .sym_index = inst_reloc.sym_index,
470 } },
471 ) },
472 });
473
474 try lower.emit(.jalr, &.{
475 .{ .reg = link_reg },
476 .{ .reg = link_reg },
477 .{ .imm = Immediate.s(0) },
478 });
479 },
480 }
481
482 return .{
483 .insts = lower.result_insts[0..lower.result_insts_len],
484 .relocs = lower.result_relocs[0..lower.result_relocs_len],
485 };
486}
487
488fn generic(lower: *Lower, inst: Mir.Inst) Error!void {
489 const mnemonic = inst.tag;
490 try lower.emit(mnemonic, switch (inst.data) {
491 .none => &.{},
492 .u_type => |u| &.{
493 .{ .reg = u.rd },
494 .{ .imm = u.imm20 },
495 },
496 .i_type => |i| &.{
497 .{ .reg = i.rd },
498 .{ .reg = i.rs1 },
499 .{ .imm = i.imm12 },
500 },
501 .rr => |rr| &.{
502 .{ .reg = rr.rd },
503 .{ .reg = rr.rs },
504 },
505 .b_type => |b| &.{
506 .{ .reg = b.rs1 },
507 .{ .reg = b.rs2 },
508 .{ .imm = lower.reloc(.{ .inst = b.inst }) },
509 },
510 .r_type => |r| &.{
511 .{ .reg = r.rd },
512 .{ .reg = r.rs1 },
513 .{ .reg = r.rs2 },
514 },
515 .csr => |csr| &.{
516 .{ .csr = csr.csr },
517 .{ .reg = csr.rs1 },
518 .{ .reg = csr.rd },
519 },
520 .amo => |amo| &.{
521 .{ .reg = amo.rd },
522 .{ .reg = amo.rs1 },
523 .{ .reg = amo.rs2 },
524 .{ .barrier = amo.rl },
525 .{ .barrier = amo.aq },
526 },
527 .fence => |fence| &.{
528 .{ .barrier = fence.succ },
529 .{ .barrier = fence.pred },
530 },
531 else => return lower.fail("TODO: generic lower {s}", .{@tagName(inst.data)}),
532 });
533}
534
535fn emit(lower: *Lower, mnemonic: Mnemonic, ops: []const Instruction.Operand) !void {
536 const lir = encoding.Lir.fromMnem(mnemonic);
537 const inst = Instruction.fromLir(lir, ops);
538
539 lower.result_insts[lower.result_insts_len] = inst;
540 lower.result_insts_len += 1;
541}
542
543fn reloc(lower: *Lower, target: Reloc.Target) Immediate {
544 lower.result_relocs[lower.result_relocs_len] = .{
545 .lowered_inst_index = lower.result_insts_len,
546 .target = target,
547 };
548 lower.result_relocs_len += 1;
549 return Immediate.s(0);
550}
551
552fn pushPopRegList(lower: *Lower, comptime spilling: bool, reg_list: Mir.RegisterList) !void {
553 var it = reg_list.iterator(.{ .direction = .forward });
554
555 var reg_i: u31 = 0;
556 while (it.next()) |i| {
557 const frame = lower.mir.frame_locs.get(@intFromEnum(bits.FrameIndex.spill_frame));
558 const reg = abi.Registers.all_preserved[i];
559
560 const reg_class = reg.class();
561 const load_inst: Mnemonic, const store_inst: Mnemonic = switch (reg_class) {
562 .int => .{ .ld, .sd },
563 .float => .{ .fld, .fsd },
564 .vector => unreachable,
565 };
566
567 if (spilling) {
568 try lower.emit(store_inst, &.{
569 .{ .reg = frame.base },
570 .{ .reg = abi.Registers.all_preserved[i] },
571 .{ .imm = Immediate.s(frame.disp + reg_i) },
572 });
573 } else {
574 try lower.emit(load_inst, &.{
575 .{ .reg = abi.Registers.all_preserved[i] },
576 .{ .reg = frame.base },
577 .{ .imm = Immediate.s(frame.disp + reg_i) },
578 });
579 }
580
581 reg_i += 8;
582 }
583}
584
585pub fn fail(lower: *Lower, comptime format: []const u8, args: anytype) Error {
586 @branchHint(.cold);
587 assert(lower.err_msg == null);
588 lower.err_msg = try ErrorMsg.create(lower.allocator, lower.src_loc, format, args);
589 return error.LowerFail;
590}
591
592fn hasFeature(lower: *Lower, feature: std.Target.riscv.Feature) bool {
593 return lower.pt.zcu.getTarget().cpu.has(.riscv, feature);
594}
595
596const Lower = @This();
597const std = @import("std");
598const assert = std.debug.assert;
599const log = std.log.scoped(.lower);
600
601const Allocator = std.mem.Allocator;
602const ErrorMsg = Zcu.ErrorMsg;
603
604const link = @import("../../link.zig");
605const Air = @import("../../Air.zig");
606const Zcu = @import("../../Zcu.zig");
607
608const Mir = @import("Mir.zig");
609const abi = @import("abi.zig");
610const bits = @import("bits.zig");
611const encoding = @import("encoding.zig");
612
613const Mnemonic = @import("mnem.zig").Mnemonic;
614const Immediate = bits.Immediate;
615const Instruction = encoding.Instruction;