master
1//! This file contains the functionality for lowering SPARCv9 MIR into
2//! machine code
3
4const std = @import("std");
5const Endian = std.builtin.Endian;
6const assert = std.debug.assert;
7const link = @import("../../link.zig");
8const Zcu = @import("../../Zcu.zig");
9const ErrorMsg = Zcu.ErrorMsg;
10const log = std.log.scoped(.sparcv9_emit);
11
12const Emit = @This();
13const Mir = @import("Mir.zig");
14const bits = @import("bits.zig");
15const Instruction = bits.Instruction;
16const Register = bits.Register;
17
18mir: Mir,
19bin_file: *link.File,
20debug_output: link.File.DebugInfoOutput,
21target: *const std.Target,
22err_msg: ?*ErrorMsg = null,
23src_loc: Zcu.LazySrcLoc,
24w: *std.Io.Writer,
25
26prev_di_line: u32,
27prev_di_column: u32,
28/// Relative to the beginning of `code`.
29prev_di_pc: usize,
30
31/// The branch type of every branch
32branch_types: std.AutoHashMapUnmanaged(Mir.Inst.Index, BranchType) = .empty,
33/// For every forward branch, maps the target instruction to a list of
34/// branches which branch to this target instruction
35branch_forward_origins: std.AutoHashMapUnmanaged(Mir.Inst.Index, std.ArrayList(Mir.Inst.Index)) = .empty,
36/// For backward branches: stores the code offset of the target
37/// instruction
38///
39/// For forward branches: stores the code offset of the branch
40/// instruction
41code_offset_mapping: std.AutoHashMapUnmanaged(Mir.Inst.Index, usize) = .empty,
42
43const InnerError = std.Io.Writer.Error || error{
44 OutOfMemory,
45 EmitFail,
46};
47
48const BranchType = enum {
49 bpcc,
50 bpr,
51 fn default(tag: Mir.Inst.Tag) BranchType {
52 return switch (tag) {
53 .bpcc => .bpcc,
54 .bpr => .bpr,
55 else => unreachable,
56 };
57 }
58};
59
60pub fn emitMir(
61 emit: *Emit,
62) InnerError!void {
63 const mir_tags = emit.mir.instructions.items(.tag);
64
65 // Convert absolute addresses into offsets and
66 // find smallest lowerings for branch instructions
67 try emit.lowerBranches();
68
69 // Emit machine code
70 for (mir_tags, 0..) |tag, index| {
71 const inst = @as(u32, @intCast(index));
72 switch (tag) {
73 .dbg_line => try emit.mirDbgLine(inst),
74 .dbg_prologue_end => try emit.mirDebugPrologueEnd(),
75 .dbg_epilogue_begin => try emit.mirDebugEpilogueBegin(),
76
77 .add => try emit.mirArithmetic3Op(inst),
78 .addcc => try emit.mirArithmetic3Op(inst),
79
80 .bpr => try emit.mirConditionalBranch(inst),
81 .bpcc => try emit.mirConditionalBranch(inst),
82
83 .call => @panic("TODO implement sparc64 call"),
84
85 .jmpl => try emit.mirArithmetic3Op(inst),
86
87 .ldub => try emit.mirArithmetic3Op(inst),
88 .lduh => try emit.mirArithmetic3Op(inst),
89 .lduw => try emit.mirArithmetic3Op(inst),
90 .ldx => try emit.mirArithmetic3Op(inst),
91
92 .lduba => try emit.mirMemASI(inst),
93 .lduha => try emit.mirMemASI(inst),
94 .lduwa => try emit.mirMemASI(inst),
95 .ldxa => try emit.mirMemASI(inst),
96
97 .@"and" => try emit.mirArithmetic3Op(inst),
98 .@"or" => try emit.mirArithmetic3Op(inst),
99 .xor => try emit.mirArithmetic3Op(inst),
100 .xnor => try emit.mirArithmetic3Op(inst),
101
102 .membar => try emit.mirMembar(inst),
103
104 .movcc => try emit.mirConditionalMove(inst),
105
106 .movr => try emit.mirConditionalMove(inst),
107
108 .mulx => try emit.mirArithmetic3Op(inst),
109 .sdivx => try emit.mirArithmetic3Op(inst),
110 .udivx => try emit.mirArithmetic3Op(inst),
111
112 .nop => try emit.mirNop(),
113
114 .@"return" => try emit.mirArithmetic2Op(inst),
115
116 .save => try emit.mirArithmetic3Op(inst),
117 .restore => try emit.mirArithmetic3Op(inst),
118
119 .sethi => try emit.mirSethi(inst),
120
121 .sll => try emit.mirShift(inst),
122 .srl => try emit.mirShift(inst),
123 .sra => try emit.mirShift(inst),
124 .sllx => try emit.mirShift(inst),
125 .srlx => try emit.mirShift(inst),
126 .srax => try emit.mirShift(inst),
127
128 .stb => try emit.mirArithmetic3Op(inst),
129 .sth => try emit.mirArithmetic3Op(inst),
130 .stw => try emit.mirArithmetic3Op(inst),
131 .stx => try emit.mirArithmetic3Op(inst),
132
133 .stba => try emit.mirMemASI(inst),
134 .stha => try emit.mirMemASI(inst),
135 .stwa => try emit.mirMemASI(inst),
136 .stxa => try emit.mirMemASI(inst),
137
138 .sub => try emit.mirArithmetic3Op(inst),
139 .subcc => try emit.mirArithmetic3Op(inst),
140
141 .tcc => try emit.mirTrap(inst),
142
143 .cmp => try emit.mirArithmetic2Op(inst),
144
145 .mov => try emit.mirArithmetic2Op(inst),
146
147 .not => try emit.mirArithmetic2Op(inst),
148 }
149 }
150}
151
152pub fn deinit(emit: *Emit) void {
153 const comp = emit.bin_file.comp;
154 const gpa = comp.gpa;
155 var iter = emit.branch_forward_origins.valueIterator();
156 while (iter.next()) |origin_list| {
157 origin_list.deinit(gpa);
158 }
159
160 emit.branch_types.deinit(gpa);
161 emit.branch_forward_origins.deinit(gpa);
162 emit.code_offset_mapping.deinit(gpa);
163 emit.* = undefined;
164}
165
166fn mirDbgLine(emit: *Emit, inst: Mir.Inst.Index) !void {
167 const tag = emit.mir.instructions.items(.tag)[inst];
168 const dbg_line_column = emit.mir.instructions.items(.data)[inst].dbg_line_column;
169
170 switch (tag) {
171 .dbg_line => try emit.dbgAdvancePCAndLine(dbg_line_column.line, dbg_line_column.column),
172 else => unreachable,
173 }
174}
175
176fn mirDebugPrologueEnd(emit: *Emit) !void {
177 switch (emit.debug_output) {
178 .dwarf => |dbg_out| {
179 try dbg_out.setPrologueEnd();
180 try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column);
181 },
182 .none => {},
183 }
184}
185
186fn mirDebugEpilogueBegin(emit: *Emit) !void {
187 switch (emit.debug_output) {
188 .dwarf => |dbg_out| {
189 try dbg_out.setEpilogueBegin();
190 try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column);
191 },
192 .none => {},
193 }
194}
195
196fn mirArithmetic2Op(emit: *Emit, inst: Mir.Inst.Index) !void {
197 const tag = emit.mir.instructions.items(.tag)[inst];
198 const data = emit.mir.instructions.items(.data)[inst].arithmetic_2op;
199
200 const rs1 = data.rs1;
201
202 if (data.is_imm) {
203 const imm = data.rs2_or_imm.imm;
204 switch (tag) {
205 .@"return" => try emit.writeInstruction(Instruction.@"return"(i13, rs1, imm)),
206 .cmp => try emit.writeInstruction(Instruction.subcc(i13, rs1, imm, .g0)),
207 .mov => try emit.writeInstruction(Instruction.@"or"(i13, .g0, imm, rs1)),
208 .not => try emit.writeInstruction(Instruction.xnor(i13, .g0, imm, rs1)),
209 else => unreachable,
210 }
211 } else {
212 const rs2 = data.rs2_or_imm.rs2;
213 switch (tag) {
214 .@"return" => try emit.writeInstruction(Instruction.@"return"(Register, rs1, rs2)),
215 .cmp => try emit.writeInstruction(Instruction.subcc(Register, rs1, rs2, .g0)),
216 .mov => try emit.writeInstruction(Instruction.@"or"(Register, .g0, rs2, rs1)),
217 .not => try emit.writeInstruction(Instruction.xnor(Register, rs2, .g0, rs1)),
218 else => unreachable,
219 }
220 }
221}
222
223fn mirArithmetic3Op(emit: *Emit, inst: Mir.Inst.Index) !void {
224 const tag = emit.mir.instructions.items(.tag)[inst];
225 const data = emit.mir.instructions.items(.data)[inst].arithmetic_3op;
226
227 const rd = data.rd;
228 const rs1 = data.rs1;
229
230 if (data.is_imm) {
231 const imm = data.rs2_or_imm.imm;
232 switch (tag) {
233 .add => try emit.writeInstruction(Instruction.add(i13, rs1, imm, rd)),
234 .addcc => try emit.writeInstruction(Instruction.addcc(i13, rs1, imm, rd)),
235 .jmpl => try emit.writeInstruction(Instruction.jmpl(i13, rs1, imm, rd)),
236 .ldub => try emit.writeInstruction(Instruction.ldub(i13, rs1, imm, rd)),
237 .lduh => try emit.writeInstruction(Instruction.lduh(i13, rs1, imm, rd)),
238 .lduw => try emit.writeInstruction(Instruction.lduw(i13, rs1, imm, rd)),
239 .ldx => try emit.writeInstruction(Instruction.ldx(i13, rs1, imm, rd)),
240 .@"and" => try emit.writeInstruction(Instruction.@"and"(i13, rs1, imm, rd)),
241 .@"or" => try emit.writeInstruction(Instruction.@"or"(i13, rs1, imm, rd)),
242 .xor => try emit.writeInstruction(Instruction.xor(i13, rs1, imm, rd)),
243 .xnor => try emit.writeInstruction(Instruction.xnor(i13, rs1, imm, rd)),
244 .mulx => try emit.writeInstruction(Instruction.mulx(i13, rs1, imm, rd)),
245 .sdivx => try emit.writeInstruction(Instruction.sdivx(i13, rs1, imm, rd)),
246 .udivx => try emit.writeInstruction(Instruction.udivx(i13, rs1, imm, rd)),
247 .save => try emit.writeInstruction(Instruction.save(i13, rs1, imm, rd)),
248 .restore => try emit.writeInstruction(Instruction.restore(i13, rs1, imm, rd)),
249 .stb => try emit.writeInstruction(Instruction.stb(i13, rs1, imm, rd)),
250 .sth => try emit.writeInstruction(Instruction.sth(i13, rs1, imm, rd)),
251 .stw => try emit.writeInstruction(Instruction.stw(i13, rs1, imm, rd)),
252 .stx => try emit.writeInstruction(Instruction.stx(i13, rs1, imm, rd)),
253 .sub => try emit.writeInstruction(Instruction.sub(i13, rs1, imm, rd)),
254 .subcc => try emit.writeInstruction(Instruction.subcc(i13, rs1, imm, rd)),
255 else => unreachable,
256 }
257 } else {
258 const rs2 = data.rs2_or_imm.rs2;
259 switch (tag) {
260 .add => try emit.writeInstruction(Instruction.add(Register, rs1, rs2, rd)),
261 .addcc => try emit.writeInstruction(Instruction.addcc(Register, rs1, rs2, rd)),
262 .jmpl => try emit.writeInstruction(Instruction.jmpl(Register, rs1, rs2, rd)),
263 .ldub => try emit.writeInstruction(Instruction.ldub(Register, rs1, rs2, rd)),
264 .lduh => try emit.writeInstruction(Instruction.lduh(Register, rs1, rs2, rd)),
265 .lduw => try emit.writeInstruction(Instruction.lduw(Register, rs1, rs2, rd)),
266 .ldx => try emit.writeInstruction(Instruction.ldx(Register, rs1, rs2, rd)),
267 .@"and" => try emit.writeInstruction(Instruction.@"and"(Register, rs1, rs2, rd)),
268 .@"or" => try emit.writeInstruction(Instruction.@"or"(Register, rs1, rs2, rd)),
269 .xor => try emit.writeInstruction(Instruction.xor(Register, rs1, rs2, rd)),
270 .xnor => try emit.writeInstruction(Instruction.xnor(Register, rs1, rs2, rd)),
271 .mulx => try emit.writeInstruction(Instruction.mulx(Register, rs1, rs2, rd)),
272 .sdivx => try emit.writeInstruction(Instruction.sdivx(Register, rs1, rs2, rd)),
273 .udivx => try emit.writeInstruction(Instruction.udivx(Register, rs1, rs2, rd)),
274 .save => try emit.writeInstruction(Instruction.save(Register, rs1, rs2, rd)),
275 .restore => try emit.writeInstruction(Instruction.restore(Register, rs1, rs2, rd)),
276 .stb => try emit.writeInstruction(Instruction.stb(Register, rs1, rs2, rd)),
277 .sth => try emit.writeInstruction(Instruction.sth(Register, rs1, rs2, rd)),
278 .stw => try emit.writeInstruction(Instruction.stw(Register, rs1, rs2, rd)),
279 .stx => try emit.writeInstruction(Instruction.stx(Register, rs1, rs2, rd)),
280 .sub => try emit.writeInstruction(Instruction.sub(Register, rs1, rs2, rd)),
281 .subcc => try emit.writeInstruction(Instruction.subcc(Register, rs1, rs2, rd)),
282 else => unreachable,
283 }
284 }
285}
286
287fn mirConditionalBranch(emit: *Emit, inst: Mir.Inst.Index) !void {
288 const tag = emit.mir.instructions.items(.tag)[inst];
289 const branch_type = emit.branch_types.get(inst).?;
290
291 switch (branch_type) {
292 .bpcc => switch (tag) {
293 .bpcc => {
294 const branch_predict_int = emit.mir.instructions.items(.data)[inst].branch_predict_int;
295 const offset = @as(i64, @intCast(emit.code_offset_mapping.get(branch_predict_int.inst).?)) - @as(i64, @intCast(emit.w.end));
296 log.debug("mirConditionalBranch: {} offset={}", .{ inst, offset });
297
298 try emit.writeInstruction(
299 Instruction.bpcc(
300 branch_predict_int.cond,
301 branch_predict_int.annul,
302 branch_predict_int.pt,
303 branch_predict_int.ccr,
304 @as(i21, @intCast(offset)),
305 ),
306 );
307 },
308 else => unreachable,
309 },
310 .bpr => switch (tag) {
311 .bpr => {
312 const branch_predict_reg = emit.mir.instructions.items(.data)[inst].branch_predict_reg;
313 const offset = @as(i64, @intCast(emit.code_offset_mapping.get(branch_predict_reg.inst).?)) - @as(i64, @intCast(emit.w.end));
314 log.debug("mirConditionalBranch: {} offset={}", .{ inst, offset });
315
316 try emit.writeInstruction(
317 Instruction.bpr(
318 branch_predict_reg.cond,
319 branch_predict_reg.annul,
320 branch_predict_reg.pt,
321 branch_predict_reg.rs1,
322 @as(i18, @intCast(offset)),
323 ),
324 );
325 },
326 else => unreachable,
327 },
328 }
329}
330
331fn mirConditionalMove(emit: *Emit, inst: Mir.Inst.Index) !void {
332 const tag = emit.mir.instructions.items(.tag)[inst];
333
334 switch (tag) {
335 .movcc => {
336 const data = emit.mir.instructions.items(.data)[inst].conditional_move_int;
337 if (data.is_imm) {
338 try emit.writeInstruction(Instruction.movcc(
339 i11,
340 data.cond,
341 data.ccr,
342 data.rs2_or_imm.imm,
343 data.rd,
344 ));
345 } else {
346 try emit.writeInstruction(Instruction.movcc(
347 Register,
348 data.cond,
349 data.ccr,
350 data.rs2_or_imm.rs2,
351 data.rd,
352 ));
353 }
354 },
355 .movr => {
356 const data = emit.mir.instructions.items(.data)[inst].conditional_move_reg;
357 if (data.is_imm) {
358 try emit.writeInstruction(Instruction.movr(
359 i10,
360 data.cond,
361 data.rs1,
362 data.rs2_or_imm.imm,
363 data.rd,
364 ));
365 } else {
366 try emit.writeInstruction(Instruction.movr(
367 Register,
368 data.cond,
369 data.rs1,
370 data.rs2_or_imm.rs2,
371 data.rd,
372 ));
373 }
374 },
375 else => unreachable,
376 }
377}
378
379fn mirMemASI(emit: *Emit, inst: Mir.Inst.Index) !void {
380 const tag = emit.mir.instructions.items(.tag)[inst];
381 const data = emit.mir.instructions.items(.data)[inst].mem_asi;
382
383 const rd = data.rd;
384 const rs1 = data.rs1;
385 const rs2 = data.rs2;
386 const asi = data.asi;
387
388 switch (tag) {
389 .lduba => try emit.writeInstruction(Instruction.lduba(rs1, rs2, asi, rd)),
390 .lduha => try emit.writeInstruction(Instruction.lduha(rs1, rs2, asi, rd)),
391 .lduwa => try emit.writeInstruction(Instruction.lduwa(rs1, rs2, asi, rd)),
392 .ldxa => try emit.writeInstruction(Instruction.ldxa(rs1, rs2, asi, rd)),
393
394 .stba => try emit.writeInstruction(Instruction.stba(rs1, rs2, asi, rd)),
395 .stha => try emit.writeInstruction(Instruction.stha(rs1, rs2, asi, rd)),
396 .stwa => try emit.writeInstruction(Instruction.stwa(rs1, rs2, asi, rd)),
397 .stxa => try emit.writeInstruction(Instruction.stxa(rs1, rs2, asi, rd)),
398 else => unreachable,
399 }
400}
401
402fn mirMembar(emit: *Emit, inst: Mir.Inst.Index) !void {
403 const tag = emit.mir.instructions.items(.tag)[inst];
404 const mask = emit.mir.instructions.items(.data)[inst].membar_mask;
405 assert(tag == .membar);
406
407 try emit.writeInstruction(Instruction.membar(
408 mask.cmask,
409 mask.mmask,
410 ));
411}
412
413fn mirNop(emit: *Emit) !void {
414 try emit.writeInstruction(Instruction.nop());
415}
416
417fn mirSethi(emit: *Emit, inst: Mir.Inst.Index) !void {
418 const tag = emit.mir.instructions.items(.tag)[inst];
419 const data = emit.mir.instructions.items(.data)[inst].sethi;
420
421 const imm = data.imm;
422 const rd = data.rd;
423
424 assert(tag == .sethi);
425 try emit.writeInstruction(Instruction.sethi(imm, rd));
426}
427
428fn mirShift(emit: *Emit, inst: Mir.Inst.Index) !void {
429 const tag = emit.mir.instructions.items(.tag)[inst];
430 const data = emit.mir.instructions.items(.data)[inst].shift;
431
432 const rd = data.rd;
433 const rs1 = data.rs1;
434
435 if (data.is_imm) {
436 const imm = data.rs2_or_imm.imm;
437 switch (tag) {
438 .sll => try emit.writeInstruction(Instruction.sll(u5, rs1, @as(u5, @truncate(imm)), rd)),
439 .srl => try emit.writeInstruction(Instruction.srl(u5, rs1, @as(u5, @truncate(imm)), rd)),
440 .sra => try emit.writeInstruction(Instruction.sra(u5, rs1, @as(u5, @truncate(imm)), rd)),
441 .sllx => try emit.writeInstruction(Instruction.sllx(u6, rs1, imm, rd)),
442 .srlx => try emit.writeInstruction(Instruction.srlx(u6, rs1, imm, rd)),
443 .srax => try emit.writeInstruction(Instruction.srax(u6, rs1, imm, rd)),
444 else => unreachable,
445 }
446 } else {
447 const rs2 = data.rs2_or_imm.rs2;
448 switch (tag) {
449 .sll => try emit.writeInstruction(Instruction.sll(Register, rs1, rs2, rd)),
450 .srl => try emit.writeInstruction(Instruction.srl(Register, rs1, rs2, rd)),
451 .sra => try emit.writeInstruction(Instruction.sra(Register, rs1, rs2, rd)),
452 .sllx => try emit.writeInstruction(Instruction.sllx(Register, rs1, rs2, rd)),
453 .srlx => try emit.writeInstruction(Instruction.srlx(Register, rs1, rs2, rd)),
454 .srax => try emit.writeInstruction(Instruction.srax(Register, rs1, rs2, rd)),
455 else => unreachable,
456 }
457 }
458}
459
460fn mirTrap(emit: *Emit, inst: Mir.Inst.Index) !void {
461 const tag = emit.mir.instructions.items(.tag)[inst];
462 const data = emit.mir.instructions.items(.data)[inst].trap;
463
464 const cond = data.cond;
465 const ccr = data.ccr;
466 const rs1 = data.rs1;
467
468 if (data.is_imm) {
469 const imm = data.rs2_or_imm.imm;
470 switch (tag) {
471 .tcc => try emit.writeInstruction(Instruction.trap(u7, cond, ccr, rs1, imm)),
472 else => unreachable,
473 }
474 } else {
475 const rs2 = data.rs2_or_imm.rs2;
476 switch (tag) {
477 .tcc => try emit.writeInstruction(Instruction.trap(Register, cond, ccr, rs1, rs2)),
478 else => unreachable,
479 }
480 }
481}
482
483// Common helper functions
484
485fn branchTarget(emit: *Emit, inst: Mir.Inst.Index) Mir.Inst.Index {
486 const tag = emit.mir.instructions.items(.tag)[inst];
487
488 switch (tag) {
489 .bpcc => return emit.mir.instructions.items(.data)[inst].branch_predict_int.inst,
490 .bpr => return emit.mir.instructions.items(.data)[inst].branch_predict_reg.inst,
491 else => unreachable,
492 }
493}
494
495fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32) !void {
496 const delta_line = @as(i32, @intCast(line)) - @as(i32, @intCast(emit.prev_di_line));
497 const delta_pc: usize = emit.w.end - emit.prev_di_pc;
498 switch (emit.debug_output) {
499 .dwarf => |dbg_out| {
500 try dbg_out.advancePCAndLine(delta_line, delta_pc);
501 emit.prev_di_line = line;
502 emit.prev_di_column = column;
503 emit.prev_di_pc = emit.w.end;
504 },
505 else => {},
506 }
507}
508
509fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError {
510 @branchHint(.cold);
511 assert(emit.err_msg == null);
512 const comp = emit.bin_file.comp;
513 const gpa = comp.gpa;
514 emit.err_msg = try ErrorMsg.create(gpa, emit.src_loc, format, args);
515 return error.EmitFail;
516}
517
518fn instructionSize(emit: *Emit, inst: Mir.Inst.Index) usize {
519 const tag = emit.mir.instructions.items(.tag)[inst];
520
521 switch (tag) {
522 .dbg_line,
523 .dbg_epilogue_begin,
524 .dbg_prologue_end,
525 => return 0,
526 // Currently Mir instructions always map to single machine instruction.
527 else => return 4,
528 }
529}
530
531fn isBranch(tag: Mir.Inst.Tag) bool {
532 return switch (tag) {
533 .bpcc => true,
534 .bpr => true,
535 else => false,
536 };
537}
538
539fn lowerBranches(emit: *Emit) !void {
540 const comp = emit.bin_file.comp;
541 const gpa = comp.gpa;
542 const mir_tags = emit.mir.instructions.items(.tag);
543
544 // First pass: Note down all branches and their target
545 // instructions, i.e. populate branch_types,
546 // branch_forward_origins, and code_offset_mapping
547 //
548 // TODO optimization opportunity: do this in codegen while
549 // generating MIR
550 for (mir_tags, 0..) |tag, index| {
551 const inst = @as(u32, @intCast(index));
552 if (isBranch(tag)) {
553 const target_inst = emit.branchTarget(inst);
554
555 // Remember this branch instruction
556 try emit.branch_types.put(gpa, inst, BranchType.default(tag));
557
558 // Forward branches require some extra stuff: We only
559 // know their offset once we arrive at the target
560 // instruction. Therefore, we need to be able to
561 // access the branch instruction when we visit the
562 // target instruction in order to manipulate its type
563 // etc.
564 if (target_inst > inst) {
565 // Remember the branch instruction index
566 try emit.code_offset_mapping.put(gpa, inst, 0);
567
568 if (emit.branch_forward_origins.getPtr(target_inst)) |origin_list| {
569 try origin_list.append(gpa, inst);
570 } else {
571 var origin_list: std.ArrayList(Mir.Inst.Index) = .empty;
572 try origin_list.append(gpa, inst);
573 try emit.branch_forward_origins.put(gpa, target_inst, origin_list);
574 }
575 }
576
577 // Remember the target instruction index so that we
578 // update the real code offset in all future passes
579 //
580 // putNoClobber may not be used as the put operation
581 // may clobber the entry when multiple branches branch
582 // to the same target instruction
583 try emit.code_offset_mapping.put(gpa, target_inst, 0);
584 }
585 }
586
587 // Further passes: Until all branches are lowered, interate
588 // through all instructions and calculate new offsets and
589 // potentially new branch types
590 var all_branches_lowered = false;
591 while (!all_branches_lowered) {
592 all_branches_lowered = true;
593 var current_code_offset: usize = 0;
594
595 for (mir_tags, 0..) |tag, index| {
596 const inst = @as(u32, @intCast(index));
597
598 // If this instruction contained in the code offset
599 // mapping (when it is a target of a branch or if it is a
600 // forward branch), update the code offset
601 if (emit.code_offset_mapping.getPtr(inst)) |offset| {
602 offset.* = current_code_offset;
603 }
604
605 // If this instruction is a backward branch, calculate the
606 // offset, which may potentially update the branch type
607 if (isBranch(tag)) {
608 const target_inst = emit.branchTarget(inst);
609 if (target_inst < inst) {
610 const target_offset = emit.code_offset_mapping.get(target_inst).?;
611 const offset = @as(i64, @intCast(target_offset)) - @as(i64, @intCast(current_code_offset));
612 const branch_type = emit.branch_types.getPtr(inst).?;
613 const optimal_branch_type = try emit.optimalBranchType(tag, offset);
614 if (branch_type.* != optimal_branch_type) {
615 branch_type.* = optimal_branch_type;
616 all_branches_lowered = false;
617 }
618
619 log.debug("lowerBranches: branch {} has offset {}", .{ inst, offset });
620 }
621 }
622
623 // If this instruction is the target of one or more
624 // forward branches, calculate the offset, which may
625 // potentially update the branch type
626 if (emit.branch_forward_origins.get(inst)) |origin_list| {
627 for (origin_list.items) |forward_branch_inst| {
628 const branch_tag = emit.mir.instructions.items(.tag)[forward_branch_inst];
629 const forward_branch_inst_offset = emit.code_offset_mapping.get(forward_branch_inst).?;
630 const offset = @as(i64, @intCast(current_code_offset)) - @as(i64, @intCast(forward_branch_inst_offset));
631 const branch_type = emit.branch_types.getPtr(forward_branch_inst).?;
632 const optimal_branch_type = try emit.optimalBranchType(branch_tag, offset);
633 if (branch_type.* != optimal_branch_type) {
634 branch_type.* = optimal_branch_type;
635 all_branches_lowered = false;
636 }
637
638 log.debug("lowerBranches: branch {} has offset {}", .{ forward_branch_inst, offset });
639 }
640 }
641
642 // Increment code offset
643 current_code_offset += emit.instructionSize(inst);
644 }
645 }
646}
647
648fn optimalBranchType(emit: *Emit, tag: Mir.Inst.Tag, offset: i64) !BranchType {
649 assert(offset & 0b11 == 0);
650
651 switch (tag) {
652 // TODO use the following strategy to implement long branches:
653 // - Negate the conditional and target of the original instruction;
654 // - In the space immediately after the branch, load
655 // the address of the original target, preferrably in
656 // a PC-relative way, into %o7; and
657 // - jmpl %o7 + %g0, %g0
658
659 .bpcc => {
660 if (std.math.cast(i21, offset)) |_| {
661 return BranchType.bpcc;
662 } else {
663 return emit.fail("TODO support BPcc branches larger than +-1 MiB", .{});
664 }
665 },
666 .bpr => {
667 if (std.math.cast(i18, offset)) |_| {
668 return BranchType.bpr;
669 } else {
670 return emit.fail("TODO support BPr branches larger than +-128 KiB", .{});
671 }
672 },
673 else => unreachable,
674 }
675}
676
677fn writeInstruction(emit: *Emit, instruction: Instruction) !void {
678 // SPARCv9 instructions are always arranged in BE regardless of the
679 // endianness mode the CPU is running in (Section 3.1 of the ISA specification).
680 // This is to ease porting in case someone wants to do a LE SPARCv9 backend.
681 try emit.w.writeInt(u32, instruction.toU32(), .big);
682}