Commit dceff2592f
Changed files (4)
lib
std
src
arch
riscv64
lib/std/builtin.zig
@@ -759,6 +759,13 @@ else
pub fn default_panic(msg: []const u8, error_return_trace: ?*StackTrace, ret_addr: ?usize) noreturn {
@setCold(true);
+ // stage2_riscv64 backend doesn't support loops yet.
+ if (builtin.zig_backend == .stage2_riscv64 or
+ builtin.cpu.arch == .riscv64)
+ {
+ unreachable;
+ }
+
// For backends that cannot handle the language features depended on by the
// default panic handler, we have a simpler panic handler:
if (builtin.zig_backend == .stage2_wasm or
@@ -766,7 +773,6 @@ pub fn default_panic(msg: []const u8, error_return_trace: ?*StackTrace, ret_addr
builtin.zig_backend == .stage2_aarch64 or
builtin.zig_backend == .stage2_x86 or
(builtin.zig_backend == .stage2_x86_64 and (builtin.target.ofmt != .elf and builtin.target.ofmt != .macho)) or
- builtin.zig_backend == .stage2_riscv64 or
builtin.zig_backend == .stage2_sparc64 or
builtin.zig_backend == .stage2_spirv64)
{
src/arch/riscv64/CodeGen.zig
@@ -33,7 +33,6 @@ const abi = @import("abi.zig");
const Register = bits.Register;
const RegisterManager = abi.RegisterManager;
const RegisterLock = RegisterManager.RegisterLock;
-const Instruction = abi.Instruction;
const callee_preserved_regs = abi.callee_preserved_regs;
const gp = abi.RegisterClass.gp;
@@ -96,6 +95,8 @@ air_bookkeeping: @TypeOf(air_bookkeeping_init) = air_bookkeeping_init,
const air_bookkeeping_init = if (std.debug.runtime_safety) @as(usize, 0) else {};
+const SymbolOffset = struct { sym: u32, off: i32 = 0 };
+
const MCValue = union(enum) {
/// No runtime bits. `void` types, empty structs, u0, enums with 1 tag, etc.
/// TODO Look into deleting this tag and using `dead` instead, since every use
@@ -110,6 +111,9 @@ const MCValue = union(enum) {
/// A pointer-sized integer that fits in a register.
/// If the type is a pointer, this is the pointer address in virtual address space.
immediate: u64,
+ /// The value is in memory at an address not-yet-allocated by the linker.
+ /// This traditionally corresponds to a relocation emitted in a relocatable object file.
+ load_symbol: SymbolOffset,
/// The value is in a target-specific register.
register: Register,
/// The value is in memory at a hard-coded address.
@@ -145,6 +149,7 @@ const MCValue = union(enum) {
.memory,
.ptr_stack_offset,
.undef,
+ .load_symbol,
=> false,
.register,
@@ -165,12 +170,12 @@ const Branch = struct {
const StackAllocation = struct {
inst: Air.Inst.Index,
- /// TODO do we need size? should be determined by inst.ty.abiSize()
+ /// TODO: make the size inferred from the bits of the inst
size: u32,
};
const BlockData = struct {
- relocs: std.ArrayListUnmanaged(Reloc),
+ relocs: std.ArrayListUnmanaged(Mir.Inst.Index),
/// The first break instruction encounters `null` here and chooses a
/// machine code value for the block result, populating this field.
/// Following break instructions encounter that value and use it for
@@ -178,18 +183,6 @@ const BlockData = struct {
mcv: MCValue,
};
-const Reloc = union(enum) {
- /// The value is an offset into the `Function` `code` from the beginning.
- /// To perform the reloc, write 32-bit signed little-endian integer
- /// which is a relative jump, based on the address following the reloc.
- rel32: usize,
- /// A branch in the ARM instruction set
- arm_branch: struct {
- pos: usize,
- cond: @import("../arm/bits.zig").Condition,
- },
-};
-
const BigTomb = struct {
function: *Self,
inst: Air.Inst.Index,
@@ -272,6 +265,7 @@ pub fn generate(
},
else => |e| return e,
};
+
defer call_info.deinit(&function);
function.args = call_info.args;
@@ -328,6 +322,13 @@ fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index {
return result_index;
}
+fn addNop(self: *Self) error{OutOfMemory}!Mir.Inst.Index {
+ return try self.addInst(.{
+ .tag = .nop,
+ .data = .{ .nop = {} },
+ });
+}
+
pub fn addExtra(self: *Self, extra: anytype) Allocator.Error!u32 {
const fields = std.meta.fields(@TypeOf(extra));
try self.mir_extra.ensureUnusedCapacity(self.gpa, fields.len);
@@ -350,115 +351,45 @@ pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 {
fn gen(self: *Self) !void {
const mod = self.bin_file.comp.module.?;
const cc = self.fn_type.fnCallingConvention(mod);
- if (cc != .Naked) {
- // TODO Finish function prologue and epilogue for riscv64.
-
- // TODO Backpatch stack offset
- // addi sp, sp, -16
- _ = try self.addInst(.{
- .tag = .addi,
- .data = .{ .i_type = .{
- .rd = .sp,
- .rs1 = .sp,
- .imm12 = -16,
- } },
- });
- // sd ra, 8(sp)
- _ = try self.addInst(.{
- .tag = .sd,
- .data = .{ .i_type = .{
- .rd = .ra,
- .rs1 = .sp,
- .imm12 = 8,
- } },
- });
-
- // sd s0, 0(sp)
- _ = try self.addInst(.{
- .tag = .sd,
- .data = .{ .i_type = .{
- .rd = .s0,
- .rs1 = .sp,
- .imm12 = 0,
- } },
- });
+ if (cc == .Naked) return self.fail("TODO: gen support callconv(.{s})", .{@tagName(cc)});
- _ = try self.addInst(.{
- .tag = .dbg_prologue_end,
- .data = .{ .nop = {} },
- });
-
- try self.genBody(self.air.getMainBody());
-
- _ = try self.addInst(.{
- .tag = .dbg_epilogue_begin,
- .data = .{ .nop = {} },
- });
-
- // exitlude jumps
- if (self.exitlude_jump_relocs.items.len > 0 and
- self.exitlude_jump_relocs.items[self.exitlude_jump_relocs.items.len - 1] == self.mir_instructions.len - 2)
- {
- // If the last Mir instruction (apart from the
- // dbg_epilogue_begin) is the last exitlude jump
- // relocation (which would just jump one instruction
- // further), it can be safely removed
- self.mir_instructions.orderedRemove(self.exitlude_jump_relocs.pop());
- }
-
- for (self.exitlude_jump_relocs.items) |jmp_reloc| {
- _ = jmp_reloc;
- return self.fail("TODO add branches in RISCV64", .{});
- }
+ _ = try self.addInst(.{
+ .tag = .psuedo_prologue,
+ .data = .{ .imm12 = 0 }, // Backpatched later.
+ });
- // ld ra, 8(sp)
- _ = try self.addInst(.{
- .tag = .ld,
- .data = .{ .i_type = .{
- .rd = .ra,
- .rs1 = .sp,
- .imm12 = 8,
- } },
- });
+ _ = try self.addInst(.{
+ .tag = .dbg_prologue_end,
+ .data = .{ .nop = {} },
+ });
- // ld s0, 0(sp)
- _ = try self.addInst(.{
- .tag = .ld,
- .data = .{ .i_type = .{
- .rd = .s0,
- .rs1 = .sp,
- .imm12 = 0,
- } },
- });
+ try self.genBody(self.air.getMainBody());
- // addi sp, sp, 16
- _ = try self.addInst(.{
- .tag = .addi,
- .data = .{ .i_type = .{
- .rd = .sp,
- .rs1 = .sp,
- .imm12 = 16,
- } },
- });
+ // Backpatch prologue stack size
+ if (math.cast(i12, self.max_end_stack)) |casted_stack_size| {
+ self.mir_instructions.items(.data)[0].imm12 = casted_stack_size;
+ } else return self.fail("TODO support larger stack sizes, got {}", .{self.max_end_stack});
- // ret
- _ = try self.addInst(.{
- .tag = .ret,
- .data = .{ .nop = {} },
- });
- } else {
- _ = try self.addInst(.{
- .tag = .dbg_prologue_end,
- .data = .{ .nop = {} },
- });
+ _ = try self.addInst(.{
+ .tag = .dbg_epilogue_begin,
+ .data = .{ .nop = {} },
+ });
- try self.genBody(self.air.getMainBody());
+ // exitlude jumps
+ if (self.exitlude_jump_relocs.items.len > 0 and
+ self.exitlude_jump_relocs.items[self.exitlude_jump_relocs.items.len - 1] == self.mir_instructions.len - 2)
+ {
+ // If the last Mir instruction (apart from the
+ // dbg_epilogue_begin) is the last exitlude jump
+ // relocation (which would just jump one instruction
+ // further), it can be safely removed
+ self.mir_instructions.orderedRemove(self.exitlude_jump_relocs.pop());
+ }
- _ = try self.addInst(.{
- .tag = .dbg_epilogue_begin,
- .data = .{ .nop = {} },
- });
+ for (self.exitlude_jump_relocs.items) |jmp_reloc| {
+ _ = jmp_reloc;
+ return self.fail("TODO add branches in RISCV64", .{});
}
// Drop them off at the rbrace.
@@ -535,12 +466,12 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.div_float, .div_trunc, .div_floor, .div_exact => try self.airDiv(inst),
- .cmp_lt => try self.airCmp(inst, .lt),
- .cmp_lte => try self.airCmp(inst, .lte),
- .cmp_eq => try self.airCmp(inst, .eq),
- .cmp_gte => try self.airCmp(inst, .gte),
- .cmp_gt => try self.airCmp(inst, .gt),
- .cmp_neq => try self.airCmp(inst, .neq),
+ .cmp_lt => try self.airCmp(inst),
+ .cmp_lte => try self.airCmp(inst),
+ .cmp_eq => try self.airCmp(inst),
+ .cmp_gte => try self.airCmp(inst),
+ .cmp_gt => try self.airCmp(inst),
+ .cmp_neq => try self.airCmp(inst),
.cmp_vector => try self.airCmpVector(inst),
.cmp_lt_errors_len => try self.airCmpLtErrorsLen(inst),
@@ -565,6 +496,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.frame_addr => try self.airFrameAddress(inst),
.fence => try self.airFence(),
.cond_br => try self.airCondBr(inst),
+ .dbg_stmt => try self.airDbgStmt(inst),
.fptrunc => try self.airFptrunc(inst),
.fpext => try self.airFpext(inst),
.intcast => try self.airIntCast(inst),
@@ -617,17 +549,17 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.union_init => try self.airUnionInit(inst),
.prefetch => try self.airPrefetch(inst),
.mul_add => try self.airMulAdd(inst),
- .addrspace_cast => @panic("TODO"),
+ .addrspace_cast => return self.fail("TODO: addrspace_cast", .{}),
- .@"try" => @panic("TODO"),
- .try_ptr => @panic("TODO"),
+ .@"try" => return self.fail("TODO: try", .{}),
+ .try_ptr => return self.fail("TODO: try_ptr", .{}),
- .dbg_stmt => try self.airDbgStmt(inst),
- .dbg_inline_block => try self.airDbgInlineBlock(inst),
.dbg_var_ptr,
.dbg_var_val,
=> try self.airDbgVar(inst),
+ .dbg_inline_block => try self.airDbgInlineBlock(inst),
+
.call => try self.airCall(inst, .auto),
.call_always_tail => try self.airCall(inst, .always_tail),
.call_never_tail => try self.airCall(inst, .never_tail),
@@ -1019,17 +951,20 @@ fn binOpRegister(
const mir_tag: Mir.Inst.Tag = switch (tag) {
.add => .add,
.sub => .sub,
- else => unreachable,
+ .cmp_eq => .cmp_eq,
+ .cmp_gt => .cmp_gt,
+ else => return self.fail("TODO: binOpRegister {s}", .{@tagName(tag)}),
};
const mir_data: Mir.Inst.Data = switch (tag) {
.add,
.sub,
+ .cmp_eq,
=> .{ .r_type = .{
.rd = dest_reg,
.rs1 = lhs_reg,
.rs2 = rhs_reg,
} },
- else => unreachable,
+ else => return self.fail("TODO: binOpRegister {s}", .{@tagName(tag)}),
};
_ = try self.addInst(.{
@@ -1052,6 +987,8 @@ fn binOpRegister(
/// looks at the lhs and rhs and determines which kind of lowering
/// would be best suitable and then delegates the lowering to other
/// functions.
+///
+/// `maybe_inst` **needs** to be a bin_op, make sure of that.
fn binOp(
self: *Self,
tag: Air.Inst.Tag,
@@ -1066,6 +1003,12 @@ fn binOp(
// Arithmetic operations on integers and floats
.add,
.sub,
+ .cmp_eq,
+ .cmp_neq,
+ .cmp_gt,
+ .cmp_gte,
+ .cmp_lt,
+ .cmp_lte,
=> {
switch (lhs_ty.zigTypeTag(mod)) {
.Float => return self.fail("TODO binary operations on floats", .{}),
@@ -1180,8 +1123,19 @@ fn airMulSat(self: *Self, inst: Air.Inst.Index) !void {
}
fn airAddWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
- _ = inst;
- return self.fail("TODO implement airAddWithOverflow for {}", .{self.target.cpu.arch});
+ const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+ const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
+
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ const lhs = try self.resolveInst(extra.lhs);
+ const rhs = try self.resolveInst(extra.rhs);
+ const lhs_ty = self.typeOf(extra.lhs);
+ const rhs_ty = self.typeOf(extra.rhs);
+
+ break :result try self.binOp(.add, null, lhs, rhs, lhs_ty, rhs_ty);
+ };
+
+ return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none });
}
fn airSubWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
@@ -1352,13 +1306,30 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
fn airSlicePtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement slice_ptr for {}", .{self.target.cpu.arch});
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
+ const mcv = try self.resolveInst(ty_op.operand);
+ break :result try self.slicePtr(mcv);
+ };
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
+fn slicePtr(self: *Self, mcv: MCValue) !MCValue {
+ switch (mcv) {
+ .dead, .unreach, .none => unreachable,
+ .register => unreachable, // a slice doesn't fit in one register
+ .stack_offset => |off| {
+ return MCValue{ .stack_offset = off };
+ },
+ .memory => |addr| {
+ return MCValue{ .memory = addr };
+ },
+ else => return self.fail("TODO slicePtr {s}", .{@tagName(mcv)}),
+ }
+}
+
fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
- const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement slice_len for {}", .{self.target.cpu.arch});
+ const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airSliceLen for {}", .{self.target.cpu.arch});
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
@@ -1500,6 +1471,7 @@ fn reuseOperand(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, op_ind
fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void {
const mod = self.bin_file.comp.module.?;
const elem_ty = ptr_ty.childType(mod);
+
switch (ptr) {
.none => unreachable,
.undef => unreachable,
@@ -1507,9 +1479,7 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
.dead => unreachable,
.immediate => |imm| try self.setRegOrMem(elem_ty, dst_mcv, .{ .memory = imm }),
.ptr_stack_offset => |off| try self.setRegOrMem(elem_ty, dst_mcv, .{ .stack_offset = off }),
- .register => {
- return self.fail("TODO implement loading from MCValue.register", .{});
- },
+ .register => |src_reg| try self.setRegOrMem(elem_ty, dst_mcv, .{ .register = src_reg }),
.memory,
.stack_offset,
=> {
@@ -1520,6 +1490,10 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
try self.genSetReg(ptr_ty, reg, ptr);
try self.load(dst_mcv, .{ .register = reg }, ptr_ty);
},
+ .load_symbol => {
+ const reg = try self.copyToTmpRegister(ptr_ty, ptr);
+ try self.load(dst_mcv, .{ .register = reg }, ptr_ty);
+ },
}
}
@@ -1553,6 +1527,8 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type) !void {
_ = ptr_ty;
+ log.debug("storing {s}", .{@tagName(ptr)});
+
switch (ptr) {
.none => unreachable,
.undef => unreachable,
@@ -1573,6 +1549,9 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
.stack_offset => {
return self.fail("TODO implement storing to MCValue.stack_offset", .{});
},
+ .load_symbol => {
+ return self.fail("TODO implement storing to MCValue.load_symbol", .{});
+ },
}
}
@@ -1596,27 +1575,32 @@ fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
fn airStructFieldPtr(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.StructField, ty_pl.payload).data;
- return self.structFieldPtr(extra.struct_operand, ty_pl.ty, extra.field_index);
+ const result = try self.structFieldPtr(inst, extra.struct_operand, ty_pl.ty, extra.field_index);
+ return self.finishAir(inst, result, .{ extra.struct_operand, .none, .none });
}
fn airStructFieldPtrIndex(self: *Self, inst: Air.Inst.Index, index: u8) !void {
const ty_op = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_op;
- return self.structFieldPtr(ty_op.operand, ty_op.ty, index);
+ const result = try self.structFieldPtr(inst, ty_op.operand, ty_op.ty, index);
+ return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
-fn structFieldPtr(self: *Self, operand: Air.Inst.Ref, ty: Air.Inst.Ref, index: u32) !void {
+
+fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, ty: Air.Inst.Ref, index: u32) !MCValue {
+ _ = inst;
_ = operand;
_ = ty;
_ = index;
- return self.fail("TODO implement codegen struct_field_ptr", .{});
- //return self.finishAir(inst, result, .{ extra.struct_ptr, .none, .none });
+
+ return self.fail("TODO: structFieldPtr", .{});
}
fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
- const extra = self.air.extraData(Air.StructField, ty_pl.payload).data;
- _ = extra;
- return self.fail("TODO implement codegen struct_field_val", .{});
- //return self.finishAir(inst, result, .{ extra.struct_ptr, .none, .none });
+ _ = ty_pl;
+
+ return self.fail("TODO: airStructFieldVal", .{});
+
+ // return self.finishAir(inst, result, .{ extra.struct_operand, .none, .none });
}
fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void {
@@ -1732,12 +1716,13 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
try self.register_manager.getReg(reg, null);
try self.genSetReg(arg_ty, reg, arg_mcv);
},
- .stack_offset => {
- return self.fail("TODO implement calling with parameters in memory", .{});
- },
+ .stack_offset => |off| try self.genSetStack(arg_ty, off, arg_mcv),
.ptr_stack_offset => {
return self.fail("TODO implement calling with MCValue.ptr_stack_offset arg", .{});
},
+ .load_symbol => {
+ return self.fail("TODO implement calling with MCValue.load_symbol", .{});
+ },
}
}
@@ -1747,7 +1732,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const sym_index = try elf_file.zigObjectPtr().?.getOrCreateMetadataForDecl(elf_file, func.owner_decl);
const sym = elf_file.symbol(sym_index);
_ = try sym.getOrCreateZigGotEntry(sym_index, elf_file);
- const got_addr: u32 = @intCast(sym.zigGotAddress(elf_file));
+ const got_addr = sym.zigGotAddress(elf_file);
try self.genSetReg(Type.usize, .ra, .{ .memory = got_addr });
_ = try self.addInst(.{
.tag = .jalr,
@@ -1830,7 +1815,8 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
//return self.finishAir(inst, .dead, .{ un_op, .none, .none });
}
-fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
+fn airCmp(self: *Self, inst: Air.Inst.Index) !void {
+ const tag = self.air.instructions.items(.tag)[@intFromEnum(inst)];
const bin_op = self.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;
if (self.liveness.isUnused(inst))
return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none });
@@ -1842,12 +1828,12 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
- _ = op;
- _ = lhs;
- _ = rhs;
+ const lhs_ty = self.typeOf(bin_op.lhs);
+ const rhs_ty = self.typeOf(bin_op.rhs);
- return self.fail("TODO implement cmp for {}", .{self.target.cpu.arch});
- // return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
+ const result = try self.binOp(tag, null, lhs, rhs, lhs_ty, rhs_ty);
+
+ return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airCmpVector(self: *Self, inst: Air.Inst.Index) !void {
@@ -1878,13 +1864,11 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void {
}
fn airDbgInlineBlock(self: *Self, inst: Air.Inst.Index) !void {
- const mod = self.bin_file.comp.module.?;
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const extra = self.air.extraData(Air.DbgInlineBlock, ty_pl.payload);
- const func = mod.funcInfo(extra.data.func);
- // TODO emit debug info for function change
- _ = func;
- try self.lowerBlock(inst, @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]));
+ _ = extra;
+ // TODO: emit debug info for this block
+ return self.finishAir(inst, .dead, .{ .none, .none, .none });
}
fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void {
@@ -1897,10 +1881,165 @@ fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void {
}
fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
- _ = inst;
+ const pl_op = self.air.instructions.items(.data)[@intFromEnum(inst)].pl_op;
+ const cond = try self.resolveInst(pl_op.operand);
+ const cond_ty = self.typeOf(pl_op.operand);
+ const extra = self.air.extraData(Air.CondBr, pl_op.payload);
+ const then_body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end..][0..extra.data.then_body_len]);
+ const else_body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]);
+ const liveness_condbr = self.liveness.getCondBr(inst);
+
+ // A branch to the false section. Uses beq
+ const reloc = try self.condBr(cond_ty, cond);
+
+ // If the condition dies here in this condbr instruction, process
+ // that death now instead of later as this has an effect on
+ // whether it needs to be spilled in the branches
+ if (self.liveness.operandDies(inst, 0)) {
+ if (pl_op.operand.toIndex()) |op_index| {
+ self.processDeath(op_index);
+ }
+ }
+
+ // Save state
+ const parent_next_stack_offset = self.next_stack_offset;
+ const parent_free_registers = self.register_manager.free_registers;
+ var parent_stack = try self.stack.clone(self.gpa);
+ defer parent_stack.deinit(self.gpa);
+ const parent_registers = self.register_manager.registers;
+
+ try self.branch_stack.append(.{});
+ errdefer {
+ _ = self.branch_stack.pop();
+ }
+
+ try self.ensureProcessDeathCapacity(liveness_condbr.then_deaths.len);
+ for (liveness_condbr.then_deaths) |operand| {
+ self.processDeath(operand);
+ }
+ try self.genBody(then_body);
+
+ // Revert to the previous register and stack allocation state.
+
+ var saved_then_branch = self.branch_stack.pop();
+ defer saved_then_branch.deinit(self.gpa);
+
+ self.register_manager.registers = parent_registers;
+
+ self.stack.deinit(self.gpa);
+ self.stack = parent_stack;
+ parent_stack = .{};
+
+ self.next_stack_offset = parent_next_stack_offset;
+ self.register_manager.free_registers = parent_free_registers;
+
+ try self.performReloc(reloc);
+ const else_branch = self.branch_stack.addOneAssumeCapacity();
+ else_branch.* = .{};
+
+ try self.ensureProcessDeathCapacity(liveness_condbr.else_deaths.len);
+ for (liveness_condbr.else_deaths) |operand| {
+ self.processDeath(operand);
+ }
+ try self.genBody(else_body);
+
+ // At this point, each branch will possibly have conflicting values for where
+ // each instruction is stored. They agree, however, on which instructions are alive/dead.
+ // We use the first ("then") branch as canonical, and here emit
+ // instructions into the second ("else") branch to make it conform.
+ // We continue respect the data structure semantic guarantees of the else_branch so
+ // that we can use all the code emitting abstractions. This is why at the bottom we
+ // assert that parent_branch.free_registers equals the saved_then_branch.free_registers
+ // rather than assigning it.
+ const parent_branch = &self.branch_stack.items[self.branch_stack.items.len - 2];
+ try parent_branch.inst_table.ensureUnusedCapacity(self.gpa, else_branch.inst_table.count());
+ const else_slice = else_branch.inst_table.entries.slice();
+ const else_keys = else_slice.items(.key);
+ const else_values = else_slice.items(.value);
+ for (else_keys, 0..) |else_key, else_idx| {
+ const else_value = else_values[else_idx];
+ const canon_mcv = if (saved_then_branch.inst_table.fetchSwapRemove(else_key)) |then_entry| blk: {
+ // The instruction's MCValue is overridden in both branches.
+ log.debug("condBr put branch table (key = %{d}, value = {})", .{ else_key, then_entry.value });
+ parent_branch.inst_table.putAssumeCapacity(else_key, then_entry.value);
+ if (else_value == .dead) {
+ assert(then_entry.value == .dead);
+ continue;
+ }
+ break :blk then_entry.value;
+ } else blk: {
+ if (else_value == .dead)
+ continue;
+ // The instruction is only overridden in the else branch.
+ var i: usize = self.branch_stack.items.len - 2;
+ while (true) {
+ i -= 1; // If this overflows, the question is: why wasn't the instruction marked dead?
+ if (self.branch_stack.items[i].inst_table.get(else_key)) |mcv| {
+ assert(mcv != .dead);
+ break :blk mcv;
+ }
+ }
+ };
+ log.debug("consolidating else_entry {d} {}=>{}", .{ else_key, else_value, canon_mcv });
+ // TODO make sure the destination stack offset / register does not already have something
+ // going on there.
+ try self.setRegOrMem(self.typeOfIndex(else_key), canon_mcv, else_value);
+ // TODO track the new register / stack allocation
+ }
+ try parent_branch.inst_table.ensureUnusedCapacity(self.gpa, saved_then_branch.inst_table.count());
+ const then_slice = saved_then_branch.inst_table.entries.slice();
+ const then_keys = then_slice.items(.key);
+ const then_values = then_slice.items(.value);
+ for (then_keys, 0..) |then_key, then_idx| {
+ const then_value = then_values[then_idx];
+ // We already deleted the items from this table that matched the else_branch.
+ // So these are all instructions that are only overridden in the then branch.
+ parent_branch.inst_table.putAssumeCapacity(then_key, then_value);
+ if (then_value == .dead)
+ continue;
+ const parent_mcv = blk: {
+ var i: usize = self.branch_stack.items.len - 2;
+ while (true) {
+ i -= 1;
+ if (self.branch_stack.items[i].inst_table.get(then_key)) |mcv| {
+ assert(mcv != .dead);
+ break :blk mcv;
+ }
+ }
+ };
+ log.debug("consolidating then_entry {d} {}=>{}", .{ then_key, parent_mcv, then_value });
+ // TODO make sure the destination stack offset / register does not already have something
+ // going on there.
+ try self.setRegOrMem(self.typeOfIndex(then_key), parent_mcv, then_value);
+ // TODO track the new register / stack allocation
+ }
- return self.fail("TODO implement condbr {}", .{self.target.cpu.arch});
- // return self.finishAir(inst, .unreach, .{ pl_op.operand, .none, .none });
+ {
+ var item = self.branch_stack.pop();
+ item.deinit(self.gpa);
+ }
+
+ return self.finishAir(inst, .unreach, .{ .none, .none, .none });
+}
+
+fn condBr(self: *Self, cond_ty: Type, condition: MCValue) !Mir.Inst.Index {
+ _ = cond_ty;
+
+ const reg = switch (condition) {
+ .register => |r| r,
+ else => try self.copyToTmpRegister(Type.bool, condition),
+ };
+
+ return try self.addInst(.{
+ .tag = .beq,
+ .data = .{
+ .b_type = .{
+ .rs1 = reg,
+ .rs2 = .zero,
+ .imm12 = 0, // patched later.
+ },
+ },
+ });
}
fn isNull(self: *Self, operand: MCValue) !MCValue {
@@ -2044,25 +2183,26 @@ fn airLoop(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
const loop = self.air.extraData(Air.Block, ty_pl.payload);
const body: []const Air.Inst.Index = @ptrCast(self.air.extra[loop.end..][0..loop.data.body_len]);
- const start_index = self.code.items.len;
+
+ const start_index: Mir.Inst.Index = @intCast(self.code.items.len);
+
try self.genBody(body);
try self.jump(start_index);
+
return self.finishAirBookkeeping();
}
/// Send control flow to the `index` of `self.code`.
-fn jump(self: *Self, index: usize) !void {
- _ = index;
- return self.fail("TODO implement jump for {}", .{self.target.cpu.arch});
+fn jump(self: *Self, index: Mir.Inst.Index) !void {
+ _ = try self.addInst(.{
+ .tag = .psuedo_jump,
+ .data = .{
+ .inst = index,
+ },
+ });
}
fn airBlock(self: *Self, inst: Air.Inst.Index) !void {
- const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
- const extra = self.air.extraData(Air.Block, ty_pl.payload);
- try self.lowerBlock(inst, @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]));
-}
-
-fn lowerBlock(self: *Self, inst: Air.Inst.Index, body: []const Air.Inst.Index) !void {
try self.blocks.putNoClobber(self.gpa, inst, .{
// A block is a setup to be able to jump to the end.
.relocs = .{},
@@ -2074,10 +2214,16 @@ fn lowerBlock(self: *Self, inst: Air.Inst.Index, body: []const Air.Inst.Index) !
.mcv = MCValue{ .none = {} },
});
defer self.blocks.getPtr(inst).?.relocs.deinit(self.gpa);
+
+ const ty_pl = self.air.instructions.items(.data)[@intFromEnum(inst)].ty_pl;
+ const extra = self.air.extraData(Air.Block, ty_pl.payload);
+ const body: []const Air.Inst.Index = @ptrCast(self.air.extra[extra.end..][0..extra.data.body_len]);
// TODO emit debug info lexical block
try self.genBody(body);
- for (self.blocks.getPtr(inst).?.relocs.items) |reloc| try self.performReloc(reloc);
+ for (self.blocks.getPtr(inst).?.relocs.items) |reloc| {
+ try self.performReloc(reloc);
+ }
const result = self.blocks.getPtr(inst).?.mcv;
return self.finishAir(inst, result, .{ .none, .none, .none });
@@ -2091,11 +2237,12 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void {
// return self.finishAir(inst, .dead, .{ condition, .none, .none });
}
-fn performReloc(self: *Self, reloc: Reloc) !void {
- _ = self;
- switch (reloc) {
- .rel32 => unreachable,
- .arm_branch => unreachable,
+fn performReloc(self: *Self, inst: Mir.Inst.Index) !void {
+ const tag = self.mir_instructions.items(.tag)[inst];
+
+ switch (tag) {
+ .beq => self.mir_instructions.items(.data)[inst].b_type.imm12 = @intCast(inst),
+ else => return self.fail("TODO: performReloc {s}", .{@tagName(tag)}),
}
}
@@ -2135,7 +2282,15 @@ fn brVoid(self: *Self, block: Air.Inst.Index) !void {
// Emit a jump with a relocation. It will be patched up after the block ends.
try block_data.relocs.ensureUnusedCapacity(self.gpa, 1);
- return self.fail("TODO implement brvoid for {}", .{self.target.cpu.arch});
+ block_data.relocs.appendAssumeCapacity(try self.addInst(.{
+ .tag = .jal,
+ .data = .{
+ .j_type = .{
+ .rd = .ra,
+ .imm21 = undefined, // populated later through performReloc
+ },
+ },
+ }));
}
fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
@@ -2261,28 +2416,138 @@ fn iterateBigTomb(self: *Self, inst: Air.Inst.Index, operand_count: usize) !BigT
/// Sets the value without any modifications to register allocation metadata or stack allocation metadata.
fn setRegOrMem(self: *Self, ty: Type, loc: MCValue, val: MCValue) !void {
+ if (!loc.isMutable()) {
+ return std.debug.panic("tried to setRegOrMem immutable: {s}", .{@tagName(loc)});
+ }
+
switch (loc) {
.none => return,
.register => |reg| return self.genSetReg(ty, reg, val),
.stack_offset => |off| return self.genSetStack(ty, off, val),
- .memory => {
- return self.fail("TODO implement setRegOrMem for memory", .{});
- },
- else => unreachable,
+ else => return self.fail("TODO: setRegOrMem {s}", .{@tagName(loc)}),
}
}
fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void {
- _ = ty;
- _ = stack_offset;
- _ = mcv;
- return self.fail("TODO implement getSetStack for {}", .{self.target.cpu.arch});
+ const mod = self.bin_file.comp.module.?;
+ const abi_size: u32 = @intCast(ty.abiSize(mod));
+
+ switch (mcv) {
+ .none => return,
+ .dead => unreachable,
+ .immediate => {
+ const reg = try self.copyToTmpRegister(ty, mcv);
+ return self.genSetStack(ty, stack_offset, .{ .register = reg });
+ },
+ .register => |reg| {
+ switch (abi_size) {
+ 1, 2, 4, 8 => {
+ assert(std.mem.isAlignedGeneric(u32, stack_offset, abi_size));
+
+ const tag: Mir.Inst.Tag = switch (abi_size) {
+ 1 => .sb,
+ 2 => .sh,
+ 4 => .sw,
+ 8 => .sd,
+ else => unreachable,
+ };
+
+ _ = try self.addInst(.{
+ .tag = tag,
+ .data = .{ .i_type = .{
+ .rd = reg,
+ .rs1 = .sp,
+ .imm12 = @intCast(stack_offset),
+ } },
+ });
+ },
+ else => return self.fail("TODO: genSetStack for size={d}", .{abi_size}),
+ }
+ },
+ .stack_offset, .load_symbol => {
+ if (abi_size <= 8) {
+ const reg = try self.copyToTmpRegister(ty, mcv);
+ return self.genSetStack(ty, stack_offset, MCValue{ .register = reg });
+ }
+
+ const ptr_ty = try mod.singleMutPtrType(ty);
+
+ // TODO call extern memcpy
+ const regs = try self.register_manager.allocRegs(5, .{ null, null, null, null, null }, gp);
+ const regs_locks = self.register_manager.lockRegsAssumeUnused(5, regs);
+ defer for (regs_locks) |reg| {
+ self.register_manager.unlockReg(reg);
+ };
+
+ const src_reg = regs[0];
+ const dst_reg = regs[1];
+ const len_reg = regs[2];
+ const count_reg = regs[3];
+ const tmp_reg = regs[4];
+
+ switch (mcv) {
+ .stack_offset => |offset| {
+ if (offset == stack_offset) return;
+ try self.genSetReg(ptr_ty, src_reg, .{ .ptr_stack_offset = offset });
+ },
+ .load_symbol => |sym_off| {
+ const atom_index = atom: {
+ const decl_index = mod.funcOwnerDeclIndex(self.func_index);
+
+ if (self.bin_file.cast(link.File.Elf)) |elf_file| {
+ const atom_index = try elf_file.zigObjectPtr().?.getOrCreateMetadataForDecl(elf_file, decl_index);
+ break :atom atom_index;
+ } else return self.fail("TODO genSetStack for {s}", .{@tagName(self.bin_file.tag)});
+ };
+
+ _ = try self.addInst(.{
+ .tag = .load_symbol,
+ .data = .{
+ .payload = try self.addExtra(Mir.LoadSymbolPayload{
+ .register = @intFromEnum(src_reg),
+ .atom_index = atom_index,
+ .sym_index = sym_off.sym,
+ }),
+ },
+ });
+ },
+ else => return self.fail("TODO: genSetStack unreachable {s}", .{@tagName(mcv)}),
+ }
+
+ try self.genSetReg(ptr_ty, dst_reg, .{ .ptr_stack_offset = stack_offset });
+ try self.genSetReg(Type.usize, len_reg, .{ .immediate = abi_size });
+
+ // memcpy(src, dst, len)
+ try self.genInlineMemcpy(src_reg, dst_reg, len_reg, count_reg, tmp_reg);
+ },
+ else => return self.fail("TODO: genSetStack {s}", .{@tagName(mcv)}),
+ }
+}
+
+fn genInlineMemcpy(
+ self: *Self,
+ src: Register,
+ dst: Register,
+ len: Register,
+ count: Register,
+ tmp: Register,
+) !void {
+ _ = src;
+ _ = dst;
+ _ = len;
+ _ = count;
+ _ = tmp;
+
+ return self.fail("TODO: genInlineMemcpy", .{});
}
fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void {
+ const mod = self.bin_file.comp.module.?;
+ const abi_size: u32 = @intCast(ty.abiSize(mod));
+
switch (mcv) {
.dead => unreachable,
- .ptr_stack_offset => unreachable,
+ .ptr_stack_offset => return self.fail("TODO genSetReg ptr_stack_offset", .{}),
.unreach, .none => return, // Nothing to do.
.undef => {
if (!self.wantSafety())
@@ -2343,8 +2608,6 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
});
},
.memory => |addr| {
- // The value is in memory at a hard-coded address.
- // If the type is a pointer, it means the pointer address is at this memory location.
try self.genSetReg(ty, reg, .{ .immediate = addr });
_ = try self.addInst(.{
@@ -2355,11 +2618,51 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.imm12 = 0,
} },
});
- // LOAD imm=[i12 offset = 0], rs1 =
- // return self.fail("TODO implement genSetReg memory for riscv64");
+ // LOAD imm=[i12 offset = 0], rs1
+ },
+ .stack_offset => |off| {
+ const tag: Mir.Inst.Tag = switch (abi_size) {
+ 1 => .lb,
+ 2 => .lh,
+ 4 => .lw,
+ 8 => .ld,
+ else => return self.fail("TODO: genSetReg for size {d}", .{abi_size}),
+ };
+
+ _ = try self.addInst(.{
+ .tag = tag,
+ .data = .{ .i_type = .{
+ .rd = reg,
+ .rs1 = .sp,
+ .imm12 = @intCast(off),
+ } },
+ });
+ },
+ .load_symbol => |sym_off| {
+ assert(sym_off.off == 0);
+
+ const decl_index = mod.funcOwnerDeclIndex(self.func_index);
+
+ const atom_index = switch (self.bin_file.tag) {
+ .elf => blk: {
+ const elf_file = self.bin_file.cast(link.File.Elf).?;
+ const atom_index = try elf_file.zigObjectPtr().?.getOrCreateMetadataForDecl(elf_file, decl_index);
+ break :blk atom_index;
+ },
+ else => return self.fail("TODO genSetReg load_symbol for {s}", .{@tagName(self.bin_file.tag)}),
+ };
+ _ = try self.addInst(.{
+ .tag = .load_symbol,
+ .data = .{
+ .payload = try self.addExtra(Mir.LoadSymbolPayload{
+ .register = @intFromEnum(reg),
+ .atom_index = atom_index,
+ .sym_index = sym_off.sym,
+ }),
+ },
+ });
},
- else => return self.fail("TODO implement getSetReg for riscv64 {}", .{mcv}),
}
}
@@ -2579,9 +2882,12 @@ fn genTypedValue(self: *Self, val: Value) InnerError!MCValue {
.mcv => |mcv| switch (mcv) {
.none => .none,
.undef => .undef,
- .load_got, .load_symbol, .load_direct, .load_tlv => unreachable, // TODO
+ .load_symbol => |sym_index| .{ .load_symbol = .{ .sym = sym_index } },
.immediate => |imm| .{ .immediate = imm },
.memory => |addr| .{ .memory = addr },
+ .load_got, .load_direct, .load_tlv => {
+ return self.fail("TODO: genTypedValue {s}", .{@tagName(mcv)});
+ },
},
.fail => |msg| {
self.err_msg = msg;
@@ -2634,41 +2940,17 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
// TODO make this generic with other ABIs, in particular
// with different hardware floating-point calling
// conventions
- var next_register: usize = 0;
- var next_stack_offset: u32 = 0;
- // TODO: this is never assigned, which is a bug, but I don't know how this code works
- // well enough to try and fix it. I *think* `next_register += next_stack_offset` is
- // supposed to be `next_stack_offset += param_size` in every case where it appears.
- _ = &next_stack_offset;
-
- const argument_registers = [_]Register{ .a0, .a1, .a2, .a3, .a4, .a5, .a6, .a7 };
+ var stack_offset: u32 = 0;
for (fn_info.param_types.get(ip), result.args) |ty, *result_arg| {
- const param_size: u32 = @intCast(Type.fromInterned(ty).abiSize(mod));
- if (param_size <= 8) {
- if (next_register < argument_registers.len) {
- result_arg.* = .{ .register = argument_registers[next_register] };
- next_register += 1;
- } else {
- result_arg.* = .{ .stack_offset = next_stack_offset };
- next_register += next_stack_offset;
- }
- } else if (param_size <= 16) {
- if (next_register < argument_registers.len - 1) {
- return self.fail("TODO MCValues with 2 registers", .{});
- } else if (next_register < argument_registers.len) {
- return self.fail("TODO MCValues split register + stack", .{});
- } else {
- result_arg.* = .{ .stack_offset = next_stack_offset };
- next_register += next_stack_offset;
- }
- } else {
- result_arg.* = .{ .stack_offset = next_stack_offset };
- next_register += next_stack_offset;
- }
+ const param_type = Type.fromInterned(ty);
+ const param_size: u32 = @intCast(param_type.abiSize(mod));
+
+ result_arg.* = .{ .stack_offset = stack_offset };
+ stack_offset += param_size;
}
- result.stack_byte_count = next_stack_offset;
+ result.stack_byte_count = stack_offset;
result.stack_align = .@"16";
},
else => return self.fail("TODO implement function parameters for {} on riscv64", .{cc}),
src/arch/riscv64/Emit.zig
@@ -27,6 +27,8 @@ prev_di_column: u32,
/// Relative to the beginning of `code`.
prev_di_pc: usize,
+const log = std.log.scoped(.emit);
+
const InnerError = error{
OutOfMemory,
EmitFail,
@@ -37,33 +39,57 @@ pub fn emitMir(
) InnerError!void {
const mir_tags = emit.mir.instructions.items(.tag);
+ // TODO: compute branch offsets
+ // try emit.lowerMir();
+
// Emit machine code
for (mir_tags, 0..) |tag, index| {
const inst = @as(u32, @intCast(index));
+ log.debug("emitMir: {s}", .{@tagName(tag)});
switch (tag) {
.add => try emit.mirRType(inst),
.sub => try emit.mirRType(inst),
+ .cmp_eq => try emit.mirRType(inst),
+ .cmp_gt => try emit.mirRType(inst),
+
+ .beq => try emit.mirBType(inst),
+ .bne => try emit.mirBType(inst),
+
.addi => try emit.mirIType(inst),
.jalr => try emit.mirIType(inst),
- .ld => try emit.mirIType(inst),
- .sd => try emit.mirIType(inst),
+
+ .jal => try emit.mirJType(inst),
.ebreak => try emit.mirSystem(inst),
.ecall => try emit.mirSystem(inst),
.unimp => try emit.mirSystem(inst),
.dbg_line => try emit.mirDbgLine(inst),
-
.dbg_prologue_end => try emit.mirDebugPrologueEnd(),
.dbg_epilogue_begin => try emit.mirDebugEpilogueBegin(),
+ .psuedo_prologue => try emit.mirPsuedo(inst),
+ .psuedo_jump => try emit.mirPsuedo(inst),
+
.mv => try emit.mirRR(inst),
.nop => try emit.mirNop(inst),
.ret => try emit.mirNop(inst),
.lui => try emit.mirUType(inst),
+
+ .ld => try emit.mirIType(inst),
+ .sd => try emit.mirIType(inst),
+ .lw => try emit.mirIType(inst),
+ .sw => try emit.mirIType(inst),
+ .lh => try emit.mirIType(inst),
+ .sh => try emit.mirIType(inst),
+ .lb => try emit.mirIType(inst),
+ .sb => try emit.mirIType(inst),
+ .ldr_ptr_stack => try emit.mirIType(inst),
+
+ .load_symbol => try emit.mirLoadSymbol(inst),
}
}
}
@@ -86,15 +112,19 @@ fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError {
return error.EmitFail;
}
-fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void {
- const delta_line = @as(i32, @intCast(line)) - @as(i32, @intCast(self.prev_di_line));
- const delta_pc: usize = self.code.items.len - self.prev_di_pc;
- switch (self.debug_output) {
+fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32) !void {
+ log.debug("Line: {} {}\n", .{ line, emit.prev_di_line });
+ const delta_line = @as(i32, @intCast(line)) - @as(i32, @intCast(emit.prev_di_line));
+ const delta_pc: usize = emit.code.items.len - emit.prev_di_pc;
+ log.debug("(advance pc={d} and line={d})", .{ delta_pc, delta_line });
+ switch (emit.debug_output) {
.dwarf => |dw| {
+ if (column != emit.prev_di_column) try dw.setColumn(column);
+ if (delta_line == 0) return; // TODO: remove this
try dw.advancePCAndLine(delta_line, delta_pc);
- self.prev_di_line = line;
- self.prev_di_column = column;
- self.prev_di_pc = self.code.items.len;
+ emit.prev_di_line = line;
+ emit.prev_di_column = column;
+ emit.prev_di_pc = emit.code.items.len;
},
.plan9 => |dbg_out| {
if (delta_pc <= 0) return; // only do this when the pc changes
@@ -113,12 +143,12 @@ fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void {
// we don't need to do anything, because adding the pc quanta does it for us
} else unreachable;
if (dbg_out.start_line == null)
- dbg_out.start_line = self.prev_di_line;
+ dbg_out.start_line = emit.prev_di_line;
dbg_out.end_line = line;
// only do this if the pc changed
- self.prev_di_line = line;
- self.prev_di_column = column;
- self.prev_di_pc = self.code.items.len;
+ emit.prev_di_line = line;
+ emit.prev_di_column = column;
+ emit.prev_di_pc = emit.code.items.len;
},
.none => {},
}
@@ -131,6 +161,19 @@ fn mirRType(emit: *Emit, inst: Mir.Inst.Index) !void {
switch (tag) {
.add => try emit.writeInstruction(Instruction.add(r_type.rd, r_type.rs1, r_type.rs2)),
.sub => try emit.writeInstruction(Instruction.sub(r_type.rd, r_type.rs1, r_type.rs2)),
+ .cmp_eq => try emit.writeInstruction(Instruction.slt(r_type.rd, r_type.rs1, r_type.rs2)),
+ else => unreachable,
+ }
+}
+
+fn mirBType(emit: *Emit, inst: Mir.Inst.Index) !void {
+ const tag = emit.mir.instructions.items(.tag)[inst];
+ const b_type = emit.mir.instructions.items(.data)[inst].b_type;
+
+ // const inst = b_type.imm12;
+
+ switch (tag) {
+ .beq => try emit.writeInstruction(Instruction.beq(b_type.rs1, b_type.rs2, b_type.imm12)),
else => unreachable,
}
}
@@ -142,8 +185,30 @@ fn mirIType(emit: *Emit, inst: Mir.Inst.Index) !void {
switch (tag) {
.addi => try emit.writeInstruction(Instruction.addi(i_type.rd, i_type.rs1, i_type.imm12)),
.jalr => try emit.writeInstruction(Instruction.jalr(i_type.rd, i_type.imm12, i_type.rs1)),
+
.ld => try emit.writeInstruction(Instruction.ld(i_type.rd, i_type.imm12, i_type.rs1)),
.sd => try emit.writeInstruction(Instruction.sd(i_type.rd, i_type.imm12, i_type.rs1)),
+ .lw => try emit.writeInstruction(Instruction.lw(i_type.rd, i_type.imm12, i_type.rs1)),
+ .sw => try emit.writeInstruction(Instruction.sw(i_type.rd, i_type.imm12, i_type.rs1)),
+ .lh => try emit.writeInstruction(Instruction.lh(i_type.rd, i_type.imm12, i_type.rs1)),
+ .sh => try emit.writeInstruction(Instruction.sh(i_type.rd, i_type.imm12, i_type.rs1)),
+ .lb => try emit.writeInstruction(Instruction.lb(i_type.rd, i_type.imm12, i_type.rs1)),
+ .sb => try emit.writeInstruction(Instruction.sb(i_type.rd, i_type.imm12, i_type.rs1)),
+
+ .ldr_ptr_stack => try emit.writeInstruction(Instruction.add(i_type.rd, i_type.rs1, .sp)),
+
+ else => unreachable,
+ }
+}
+
+fn mirJType(emit: *Emit, inst: Mir.Inst.Index) !void {
+ const tag = emit.mir.instructions.items(.tag)[inst];
+ const j_type = emit.mir.instructions.items(.data)[inst].j_type;
+
+ switch (tag) {
+ .jal => {
+ try emit.writeInstruction(Instruction.jal(j_type.rd, j_type.imm21));
+ },
else => unreachable,
}
}
@@ -169,28 +234,55 @@ fn mirDbgLine(emit: *Emit, inst: Mir.Inst.Index) !void {
}
}
-fn mirDebugPrologueEnd(self: *Emit) !void {
- switch (self.debug_output) {
+fn mirDebugPrologueEnd(emit: *Emit) !void {
+ switch (emit.debug_output) {
.dwarf => |dw| {
try dw.setPrologueEnd();
- try self.dbgAdvancePCAndLine(self.prev_di_line, self.prev_di_column);
+ try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column);
},
.plan9 => {},
.none => {},
}
}
-fn mirDebugEpilogueBegin(self: *Emit) !void {
- switch (self.debug_output) {
+fn mirDebugEpilogueBegin(emit: *Emit) !void {
+ switch (emit.debug_output) {
.dwarf => |dw| {
try dw.setEpilogueBegin();
- try self.dbgAdvancePCAndLine(self.prev_di_line, self.prev_di_column);
+ try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column);
},
.plan9 => {},
.none => {},
}
}
+fn mirPsuedo(emit: *Emit, inst: Mir.Inst.Index) !void {
+ const tag = emit.mir.instructions.items(.tag)[inst];
+ const data = emit.mir.instructions.items(.data)[inst];
+
+ switch (tag) {
+ .psuedo_prologue => {
+ const imm12 = data.imm12;
+ const stack_size: i12 = @max(32, imm12);
+
+ try emit.writeInstruction(Instruction.addi(.sp, .sp, -stack_size));
+ try emit.writeInstruction(Instruction.sd(.ra, stack_size - 8, .sp));
+ try emit.writeInstruction(Instruction.sd(.s0, stack_size - 16, .sp));
+ try emit.writeInstruction(Instruction.addi(.s0, .sp, stack_size));
+ },
+
+ .psuedo_jump => {
+ const target = data.inst;
+ const offset: i12 = @intCast(emit.code.items.len);
+ _ = target;
+
+ try emit.writeInstruction(Instruction.jal(.s0, offset));
+ },
+
+ else => unreachable,
+ }
+}
+
fn mirRR(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const rr = emit.mir.instructions.items(.data)[inst].rr;
@@ -200,6 +292,7 @@ fn mirRR(emit: *Emit, inst: Mir.Inst.Index) !void {
else => unreachable,
}
}
+
fn mirUType(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const u_type = emit.mir.instructions.items(.data)[inst].u_type;
@@ -219,3 +312,63 @@ fn mirNop(emit: *Emit, inst: Mir.Inst.Index) !void {
else => unreachable,
}
}
+
+fn mirLoadSymbol(emit: *Emit, inst: Mir.Inst.Index) !void {
+ // const tag = emit.mir.instructions.items(.tag)[inst];
+ const payload = emit.mir.instructions.items(.data)[inst].payload;
+ const data = emit.mir.extraData(Mir.LoadSymbolPayload, payload).data;
+ const reg = @as(Register, @enumFromInt(data.register));
+
+ const end_offset = @as(u32, @intCast(emit.code.items.len));
+ try emit.writeInstruction(Instruction.lui(reg, 0));
+ try emit.writeInstruction(Instruction.lw(reg, 0, reg));
+
+ switch (emit.bin_file.tag) {
+ .elf => {
+ const elf_file = emit.bin_file.cast(link.File.Elf).?;
+ const atom_ptr = elf_file.symbol(data.atom_index).atom(elf_file).?;
+
+ const hi_r_type = @intFromEnum(std.elf.R_RISCV.HI20);
+
+ try atom_ptr.addReloc(elf_file, .{
+ .r_offset = end_offset,
+ .r_info = (@as(u64, @intCast(data.sym_index)) << 32) | hi_r_type,
+ .r_addend = 0,
+ });
+
+ const lo_r_type = @intFromEnum(std.elf.R_RISCV.LO12_I);
+
+ try atom_ptr.addReloc(elf_file, .{
+ .r_offset = end_offset + 4,
+ .r_info = (@as(u64, @intCast(data.sym_index)) << 32) | lo_r_type,
+ .r_addend = 0,
+ });
+ },
+ else => unreachable,
+ }
+}
+
+fn isBranch(tag: Mir.Inst.Tag) bool {
+ switch (tag) {
+ .psuedo_jump => true,
+ else => false,
+ }
+}
+
+fn lowerMir(emit: *Emit) !void {
+ const comp = emit.bin_file.comp;
+ const gpa = comp.gpa;
+ const mir_tags = emit.mir.instructions.items(.tag);
+
+ _ = gpa;
+
+ for (mir_tags, 0..) |tag, index| {
+ const inst: u32 = @intCast(index);
+
+ if (isBranch(tag)) {
+ const target_inst = emit.mir.instructions.items(.data)[inst].inst;
+
+ _ = target_inst;
+ }
+ }
+}
src/arch/riscv64/Mir.zig
@@ -24,25 +24,72 @@ pub const Inst = struct {
data: Data,
pub const Tag = enum(u16) {
- add,
addi,
- /// Pseudo-instruction: End of prologue
- dbg_prologue_end,
- /// Pseudo-instruction: Beginning of epilogue
- dbg_epilogue_begin,
- /// Pseudo-instruction: Update debug line
- dbg_line,
- unimp,
- ebreak,
- ecall,
jalr,
- ld,
lui,
mv,
+
+ unimp,
+ ebreak,
+ ecall,
+
+ /// Addition
+ add,
+ /// Subtraction
+ sub,
+
+ jal,
+
+ // TODO: Maybe create a special data for compares that includes the ops
+ /// Compare equal, uses r_type
+ cmp_eq,
+ /// Compare greater than, uses r_type
+ cmp_gt,
+
+ /// Branch if equal Uses b_type
+ beq,
+ /// Branch if not eql Uses b_type
+ bne,
+
nop,
ret,
+
+ /// Load double (64 bits)
+ ld,
+ /// Store double (64 bits)
sd,
- sub,
+ /// Load word (32 bits)
+ lw,
+ /// Store word (32 bits)
+ sw,
+ /// Load half (16 bits)
+ lh,
+ /// Store half (16 bits)
+ sh,
+ /// Load byte (8 bits)
+ lb,
+ /// Store byte (8 bits)
+ sb,
+
+ /// Pseudo-instruction: End of prologue
+ dbg_prologue_end,
+ /// Pseudo-instruction: Beginning of epilogue
+ dbg_epilogue_begin,
+ /// Pseudo-instruction: Update debug line
+ dbg_line,
+
+ /// Psuedo-instruction that will generate a backpatched
+ /// function prologue.
+ psuedo_prologue,
+ /// Jumps. Uses `inst` payload.
+ psuedo_jump,
+
+ // TODO: add description
+ load_symbol,
+
+ // TODO: add description
+ // this is bad, remove this
+ ldr_ptr_stack,
};
/// The position of an MIR instruction within the `Mir` instructions array.
@@ -63,7 +110,11 @@ pub const Inst = struct {
/// A 16-bit immediate value.
///
/// Used by e.g. svc
- imm16: u16,
+ imm16: i16,
+ /// A 12-bit immediate value.
+ ///
+ /// Used by e.g. psuedo_prologue
+ imm12: i12,
/// Index into `extra`. Meaning of what can be found there is context-dependent.
///
/// Used by e.g. load_memory
@@ -95,6 +146,21 @@ pub const Inst = struct {
rs1: Register,
rs2: Register,
},
+ /// B-Type
+ ///
+ /// Used by e.g. beq
+ b_type: struct {
+ rs1: Register,
+ rs2: Register,
+ imm12: i13,
+ },
+ /// J-Type
+ ///
+ /// Used by e.g. jal
+ j_type: struct {
+ rd: Register,
+ imm21: i21,
+ },
/// U-Type
///
/// Used by e.g. lui
@@ -111,10 +177,19 @@ pub const Inst = struct {
},
};
+ const CompareOp = enum {
+ eq,
+ neq,
+ gt,
+ gte,
+ lt,
+ lte,
+ };
+
// Make sure we don't accidentally make instructions bigger than expected.
- // Note that in safety builds, Zig is allowed to insert a secret field for safety checks.
+ // Note that in Debug builds, Zig is allowed to insert a secret field for safety checks.
// comptime {
- // if (!std.debug.runtime_safety) {
+ // if (builtin.mode != .Debug) {
// assert(@sizeOf(Inst) == 8);
// }
// }
@@ -145,3 +220,9 @@ pub fn extraData(mir: Mir, comptime T: type, index: usize) struct { data: T, end
.end = i,
};
}
+
+pub const LoadSymbolPayload = struct {
+ register: u32,
+ atom_index: u32,
+ sym_index: u32,
+};