Commit 6e882d730b
Changed files (4)
src
arch
src/arch/x86_64/CodeGen.zig
@@ -374,71 +374,99 @@ pub fn generate(
fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index {
const gpa = self.gpa;
try self.mir_instructions.ensureUnusedCapacity(gpa, 1);
- const result_index = @intCast(Air.Inst.Index, self.mir_instructions.len);
+ const result_index = @intCast(Mir.Inst.Index, self.mir_instructions.len);
self.mir_instructions.appendAssumeCapacity(inst);
return result_index;
}
-pub fn addExtra(self: *Self, extra: anytype) Allocator.Error!u32 {
+fn addExtra(self: *Self, extra: anytype) Allocator.Error!u32 {
const fields = std.meta.fields(@TypeOf(extra));
try self.mir_extra.ensureUnusedCapacity(self.gpa, fields.len);
return self.addExtraAssumeCapacity(extra);
}
-fn extraData(self: *Self, comptime T: type, index: u32) struct { data: T, end: u32 } {
- const fields = std.meta.fields(T);
- var i: u32 = index;
- var result: T = undefined;
- inline for (fields) |field| {
- @field(result, field.name) = switch (field.type) {
- u32 => self.mir_extra.items[i],
- i32 => @bitCast(i32, self.mir_extra.items[i]),
- else => @compileError("bad field type"),
- };
- i += 1;
- }
- return .{
- .data = result,
- .end = i,
- };
-}
-
-pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 {
+fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 {
const fields = std.meta.fields(@TypeOf(extra));
const result = @intCast(u32, self.mir_extra.items.len);
inline for (fields) |field| {
self.mir_extra.appendAssumeCapacity(switch (field.type) {
u32 => @field(extra, field.name),
i32 => @bitCast(u32, @field(extra, field.name)),
- else => @compileError("bad field type"),
+ else => @compileError("bad field type: " ++ field.name ++ ": " ++ @typeName(field.type)),
});
}
return result;
}
+fn assemble(self: *Self, tag: Mir.Inst.Tag, args: struct {
+ op1: Mir.Operand = .none,
+ op2: Mir.Operand = .none,
+ op3: Mir.Operand = .none,
+ op4: Mir.Operand = .none,
+}) !void {
+ const ops: Mir.Inst.Ops = blk: {
+ if (args.op1 == .none and args.op2 == .none and args.op3 == .none and args.op4 == .none)
+ break :blk .none;
+
+ if (args.op1 == .reg and args.op2 == .reg)
+ break :blk .rr;
+ if (args.op1 == .reg and args.op2 == .imm) switch (args.op2.imm) {
+ .signed => break :blk .ri_s,
+ .unsigned => break :blk .ri_u,
+ };
+ if (args.op1 == .reg)
+ break :blk .r;
+ if (args.op1 == .imm) switch (args.op1.imm) {
+ .signed => break :blk .imm_s,
+ .unsigned => break :blk .imm_u, // TODO 64bits
+ };
+
+ unreachable;
+ };
+ const data: Mir.Inst.Data = switch (ops) {
+ .none => undefined,
+ .imm_s => .{ .imm_s = args.op1.imm.signed },
+ .imm_u => .{ .imm_u = @intCast(u32, args.op1.imm.unsigned) },
+ .r => .{ .r = args.op1.reg },
+ .rr => .{ .rr = .{
+ .r1 = args.op1.reg,
+ .r2 = args.op2.reg,
+ } },
+ .ri_s => .{ .ri_s = .{
+ .r1 = args.op1.reg,
+ .imm = args.op2.imm.signed,
+ } },
+ .ri_u => .{ .ri_u = .{
+ .r1 = args.op1.reg,
+ .imm = @intCast(u32, args.op2.imm.unsigned),
+ } },
+ else => unreachable,
+ };
+ _ = try self.addInst(.{
+ .tag = tag,
+ .ops = ops,
+ .data = data,
+ });
+}
+
fn gen(self: *Self) InnerError!void {
const cc = self.fn_type.fnCallingConvention();
if (cc != .Naked) {
- _ = try self.addInst(.{
- .tag = .push,
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rbp }),
- .data = undefined, // unused for push reg,
+ try self.assemble(.push, .{
+ .op1 = .{ .reg = .rbp },
});
- _ = try self.addInst(.{
- .tag = .mov,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = .rbp,
- .reg2 = .rsp,
- }),
- .data = undefined,
+ try self.assemble(.mov, .{
+ .op1 = .{ .reg = .rbp },
+ .op2 = .{ .reg = .rsp },
});
+
// We want to subtract the aligned stack frame size from rsp here, but we don't
// yet know how big it will be, so we leave room for a 4-byte stack size.
// TODO During semantic analysis, check if there are no function calls. If there
// are none, here we can omit the part where we subtract and then add rsp.
const backpatch_stack_sub = try self.addInst(.{
.tag = .nop,
- .ops = undefined,
+ .ops = .none,
.data = undefined,
});
@@ -465,7 +493,7 @@ fn gen(self: *Self) InnerError!void {
// Push callee-preserved regs that were used actually in use.
const backpatch_push_callee_preserved_regs = try self.addInst(.{
.tag = .nop,
- .ops = undefined,
+ .ops = .none,
.data = undefined,
});
@@ -496,7 +524,7 @@ fn gen(self: *Self) InnerError!void {
// Pop saved callee-preserved regs.
const backpatch_pop_callee_preserved_regs = try self.addInst(.{
.tag = .nop,
- .ops = undefined,
+ .ops = .none,
.data = undefined,
});
@@ -509,21 +537,12 @@ fn gen(self: *Self) InnerError!void {
// Maybe add rsp, x if required. This is backpatched later.
const backpatch_stack_add = try self.addInst(.{
.tag = .nop,
- .ops = undefined,
+ .ops = .none,
.data = undefined,
});
- _ = try self.addInst(.{
- .tag = .pop,
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rbp }),
- .data = undefined,
- });
-
- _ = try self.addInst(.{
- .tag = .ret,
- .ops = Mir.Inst.Ops.encode(.{ .flags = 0b11 }),
- .data = undefined,
- });
+ try self.assemble(.pop, .{ .op1 = .{ .reg = .rbp } });
+ try self.assemble(.ret, .{});
// Adjust the stack
if (self.max_end_stack > math.maxInt(i32)) {
@@ -537,27 +556,34 @@ fn gen(self: *Self) InnerError!void {
if (aligned_stack_end > 0) {
self.mir_instructions.set(backpatch_stack_sub, .{
.tag = .sub,
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rsp }),
- .data = .{ .imm = aligned_stack_end },
+ .ops = .ri_u,
+ .data = .{ .ri_u = .{
+ .r1 = .rsp,
+ .imm = aligned_stack_end,
+ } },
});
self.mir_instructions.set(backpatch_stack_add, .{
.tag = .add,
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rsp }),
- .data = .{ .imm = aligned_stack_end },
+ .ops = .ri_u,
+ .data = .{ .ri_u = .{
+ .r1 = .rsp,
+ .imm = aligned_stack_end,
+ } },
});
const save_reg_list = try self.addExtra(Mir.SaveRegisterList{
+ .base_reg = @enumToInt(Register.rbp),
.register_list = reg_list.asInt(),
.stack_end = aligned_stack_end,
});
self.mir_instructions.set(backpatch_push_callee_preserved_regs, .{
.tag = .push_regs,
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rbp }),
+ .ops = undefined,
.data = .{ .payload = save_reg_list },
});
self.mir_instructions.set(backpatch_pop_callee_preserved_regs, .{
.tag = .pop_regs,
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rbp }),
+ .ops = undefined,
.data = .{ .payload = save_reg_list },
});
}
@@ -1306,14 +1332,15 @@ fn airMin(self: *Self, inst: Air.Inst.Index) !void {
.unsigned => .b,
.signed => .l,
};
- _ = try self.addInst(.{
- .tag = .cond_mov,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = dst_mcv.register,
- .reg2 = lhs_reg,
- }),
- .data = .{ .cc = cc },
- });
+ _ = cc;
+ // _ = try self.addInst(.{
+ // .tag = .cond_mov,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = dst_mcv.register,
+ // .reg2 = lhs_reg,
+ // }),
+ // .data = .{ .cc = cc },
+ // });
break :result dst_mcv;
};
@@ -1513,13 +1540,14 @@ fn genSetStackTruncatedOverflowCompare(
.signed => .o,
.unsigned => .c,
};
- _ = try self.addInst(.{
- .tag = .cond_set_byte,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = overflow_reg.to8(),
- }),
- .data = .{ .cc = cc },
- });
+ _ = cc;
+ // _ = try self.addInst(.{
+ // .tag = .cond_set_byte,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = overflow_reg.to8(),
+ // }),
+ // .data = .{ .cc = cc },
+ // });
const scratch_reg = temp_regs[1];
try self.genSetReg(extended_ty, scratch_reg, .{ .register = reg });
@@ -1532,11 +1560,11 @@ fn genSetStackTruncatedOverflowCompare(
);
const eq_reg = temp_regs[2];
- _ = try self.addInst(.{
- .tag = .cond_set_byte,
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = eq_reg.to8() }),
- .data = .{ .cc = .ne },
- });
+ // _ = try self.addInst(.{
+ // .tag = .cond_set_byte,
+ // .ops = Mir.Inst.Ops.encode(.{ .reg1 = eq_reg.to8() }),
+ // .data = .{ .cc = .ne },
+ // });
try self.genBinOpMir(
.@"or",
@@ -1680,25 +1708,26 @@ fn genIntMulDivOpMir(
try self.genSetReg(ty, .rax, lhs);
}
- switch (signedness) {
- .signed => {
- _ = try self.addInst(.{
- .tag = .cwd,
- .ops = Mir.Inst.Ops.encode(.{ .flags = 0b11 }),
- .data = undefined,
- });
- },
- .unsigned => {
- _ = try self.addInst(.{
- .tag = .xor,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = .rdx,
- .reg2 = .rdx,
- }),
- .data = undefined,
- });
- },
- }
+ _ = signedness;
+ // switch (signedness) {
+ // .signed => {
+ // _ = try self.addInst(.{
+ // .tag = .cwd,
+ // .ops = Mir.Inst.Ops.encode(.{ .flags = 0b11 }),
+ // .data = undefined,
+ // });
+ // },
+ // .unsigned => {
+ // _ = try self.addInst(.{
+ // .tag = .xor,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = .rdx,
+ // .reg2 = .rdx,
+ // }),
+ // .data = undefined,
+ // });
+ // },
+ // }
const factor = switch (rhs) {
.register => rhs,
@@ -1708,33 +1737,35 @@ fn genIntMulDivOpMir(
break :blk MCValue{ .register = reg };
},
};
-
- switch (factor) {
- .register => |reg| {
- _ = try self.addInst(.{
- .tag = tag,
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = reg }),
- .data = undefined,
- });
- },
- .stack_offset => |off| {
- _ = try self.addInst(.{
- .tag = tag,
- .ops = Mir.Inst.Ops.encode(.{
- .reg2 = .rbp,
- .flags = switch (abi_size) {
- 1 => 0b00,
- 2 => 0b01,
- 4 => 0b10,
- 8 => 0b11,
- else => unreachable,
- },
- }),
- .data = .{ .disp = -off },
- });
- },
- else => unreachable,
- }
+ _ = factor;
+ _ = tag;
+
+ // switch (factor) {
+ // .register => |reg| {
+ // _ = try self.addInst(.{
+ // .tag = tag,
+ // .ops = Mir.Inst.Ops.encode(.{ .reg1 = reg }),
+ // .data = undefined,
+ // });
+ // },
+ // .stack_offset => |off| {
+ // _ = try self.addInst(.{
+ // .tag = tag,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg2 = .rbp,
+ // .flags = switch (abi_size) {
+ // 1 => 0b00,
+ // 2 => 0b01,
+ // 4 => 0b10,
+ // 8 => 0b11,
+ // else => unreachable,
+ // },
+ // }),
+ // .data = .{ .disp = -off },
+ // });
+ // },
+ // else => unreachable,
+ // }
}
/// Always returns a register.
@@ -1760,38 +1791,38 @@ fn genInlineIntDivFloor(self: *Self, ty: Type, lhs: MCValue, rhs: MCValue) !MCVa
.unsigned => .div,
}, Type.isize, signedness, .{ .register = dividend }, .{ .register = divisor });
- _ = try self.addInst(.{
- .tag = .xor,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = divisor.to64(),
- .reg2 = dividend.to64(),
- }),
- .data = undefined,
- });
- _ = try self.addInst(.{
- .tag = .sar,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = divisor.to64(),
- .flags = 0b10,
- }),
- .data = .{ .imm = 63 },
- });
- _ = try self.addInst(.{
- .tag = .@"test",
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = .rdx,
- .reg2 = .rdx,
- }),
- .data = undefined,
- });
- _ = try self.addInst(.{
- .tag = .cond_mov,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = divisor.to64(),
- .reg2 = .rdx,
- }),
- .data = .{ .cc = .e },
- });
+ // _ = try self.addInst(.{
+ // .tag = .xor,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = divisor.to64(),
+ // .reg2 = dividend.to64(),
+ // }),
+ // .data = undefined,
+ // });
+ // _ = try self.addInst(.{
+ // .tag = .sar,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = divisor.to64(),
+ // .flags = 0b10,
+ // }),
+ // .data = .{ .imm = 63 },
+ // });
+ // _ = try self.addInst(.{
+ // .tag = .@"test",
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = .rdx,
+ // .reg2 = .rdx,
+ // }),
+ // .data = undefined,
+ // });
+ // _ = try self.addInst(.{
+ // .tag = .cond_mov,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = divisor.to64(),
+ // .reg2 = .rdx,
+ // }),
+ // .data = .{ .cc = .e },
+ // });
try self.genBinOpMir(.add, Type.isize, .{ .register = divisor }, .{ .register = .rax });
return MCValue{ .register = divisor };
}
@@ -2226,16 +2257,17 @@ fn genSliceElemPtr(self: *Self, lhs: Air.Inst.Ref, rhs: Air.Inst.Ref) !MCValue {
const addr_reg = try self.register_manager.allocReg(null, gp);
switch (slice_mcv) {
.stack_offset => |off| {
+ _ = off;
// mov reg, [rbp - 8]
- _ = try self.addInst(.{
- .tag = .mov,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = addr_reg.to64(),
- .reg2 = .rbp,
- .flags = 0b01,
- }),
- .data = .{ .disp = -@intCast(i32, off) },
- });
+ // _ = try self.addInst(.{
+ // .tag = .mov,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = addr_reg.to64(),
+ // .reg2 = .rbp,
+ // .flags = 0b01,
+ // }),
+ // .data = .{ .disp = -@intCast(i32, off) },
+ // });
},
else => return self.fail("TODO implement slice_elem_ptr when slice is {}", .{slice_mcv}),
}
@@ -2312,25 +2344,26 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
));
try self.genSetStack(array_ty, off, array, .{});
// lea reg, [rbp]
- _ = try self.addInst(.{
- .tag = .lea,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = addr_reg.to64(),
- .reg2 = .rbp,
- }),
- .data = .{ .disp = -off },
- });
+ // _ = try self.addInst(.{
+ // .tag = .lea,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = addr_reg.to64(),
+ // .reg2 = .rbp,
+ // }),
+ // .data = .{ .disp = -off },
+ // });
},
.stack_offset => |off| {
+ _ = off;
// lea reg, [rbp]
- _ = try self.addInst(.{
- .tag = .lea,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = addr_reg.to64(),
- .reg2 = .rbp,
- }),
- .data = .{ .disp = -off },
- });
+ // _ = try self.addInst(.{
+ // .tag = .lea,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = addr_reg.to64(),
+ // .reg2 = .rbp,
+ // }),
+ // .data = .{ .disp = -off },
+ // });
},
.memory, .linker_load => {
try self.loadMemPtrIntoRegister(addr_reg, Type.usize, array);
@@ -2388,15 +2421,15 @@ fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void {
return self.fail("TODO copy value with size {} from pointer", .{elem_abi_size});
} else {
// mov dst_mcv, [dst_mcv]
- _ = try self.addInst(.{
- .tag = .mov,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = registerAlias(dst_mcv.register, @intCast(u32, elem_abi_size)),
- .reg2 = dst_mcv.register,
- .flags = 0b01,
- }),
- .data = .{ .disp = 0 },
- });
+ // _ = try self.addInst(.{
+ // .tag = .mov,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = registerAlias(dst_mcv.register, @intCast(u32, elem_abi_size)),
+ // .reg2 = dst_mcv.register,
+ // .flags = 0b01,
+ // }),
+ // .data = .{ .disp = 0 },
+ // });
break :result .{ .register = registerAlias(dst_mcv.register, @intCast(u32, elem_abi_size)) };
}
};
@@ -2650,16 +2683,17 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
.undef => unreachable,
.eflags => unreachable,
.register => |dst_reg| {
+ _ = dst_reg;
// mov dst_reg, [reg]
- _ = try self.addInst(.{
- .tag = .mov,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = registerAlias(dst_reg, @intCast(u32, abi_size)),
- .reg2 = reg,
- .flags = 0b01,
- }),
- .data = .{ .disp = 0 },
- });
+ // _ = try self.addInst(.{
+ // .tag = .mov,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = registerAlias(dst_reg, @intCast(u32, abi_size)),
+ // .reg2 = reg,
+ // .flags = 0b01,
+ // }),
+ // .data = .{ .disp = 0 },
+ // });
},
.stack_offset => |off| {
if (abi_size <= 8) {
@@ -2724,19 +2758,22 @@ fn loadMemPtrIntoRegister(self: *Self, reg: Register, ptr_ty: Type, ptr: MCValue
.direct => 0b01,
.import => 0b10,
};
- _ = try self.addInst(.{
- .tag = .lea_pic,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = registerAlias(reg, abi_size),
- .flags = flags,
- }),
- .data = .{
- .relocation = .{
- .atom_index = atom_index,
- .sym_index = load_struct.sym_index,
- },
- },
- });
+ _ = abi_size;
+ _ = atom_index;
+ _ = flags;
+ // _ = try self.addInst(.{
+ // .tag = .lea_pic,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = registerAlias(reg, abi_size),
+ // .flags = flags,
+ // }),
+ // .data = .{
+ // .relocation = .{
+ // .atom_index = atom_index,
+ // .sym_index = load_struct.sym_index,
+ // },
+ // },
+ // });
},
.memory => |addr| {
// TODO: in case the address fits in an imm32 we can use [ds:imm32]
@@ -2779,27 +2816,28 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
try self.genSetReg(value_ty, reg, value);
},
.immediate => |imm| {
+ _ = imm;
switch (abi_size) {
1, 2, 4 => {
// TODO this is wasteful!
// introduce new MIR tag specifically for mov [reg + 0], imm
- const payload = try self.addExtra(Mir.ImmPair{
- .dest_off = 0,
- .operand = @truncate(u32, imm),
- });
- _ = try self.addInst(.{
- .tag = .mov_mem_imm,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = reg.to64(),
- .flags = switch (abi_size) {
- 1 => 0b00,
- 2 => 0b01,
- 4 => 0b10,
- else => unreachable,
- },
- }),
- .data = .{ .payload = payload },
- });
+ // const payload = try self.addExtra(Mir.ImmPair{
+ // .dest_off = 0,
+ // .operand = @truncate(u32, imm),
+ // });
+ // _ = try self.addInst(.{
+ // .tag = .mov_mem_imm,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = reg.to64(),
+ // .flags = switch (abi_size) {
+ // 1 => 0b00,
+ // 2 => 0b01,
+ // 4 => 0b10,
+ // else => unreachable,
+ // },
+ // }),
+ // .data = .{ .payload = payload },
+ // });
},
8 => {
// TODO: optimization: if the imm is only using the lower
@@ -2829,13 +2867,13 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
const overflow_bit_ty = value_ty.structFieldType(1);
const overflow_bit_offset = value_ty.structFieldOffset(1, self.target.*);
const tmp_reg = try self.register_manager.allocReg(null, gp);
- _ = try self.addInst(.{
- .tag = .cond_set_byte,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = tmp_reg.to8(),
- }),
- .data = .{ .cc = ro.eflags },
- });
+ // _ = try self.addInst(.{
+ // .tag = .cond_set_byte,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = tmp_reg.to8(),
+ // }),
+ // .data = .{ .cc = ro.eflags },
+ // });
try self.genInlineMemcpyRegisterRegister(
overflow_bit_ty,
reg,
@@ -2878,15 +2916,15 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
// to get the actual address of the value we want to modify we have to go through the GOT
// mov reg, [reg]
- _ = try self.addInst(.{
- .tag = .mov,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = addr_reg.to64(),
- .reg2 = addr_reg.to64(),
- .flags = 0b01,
- }),
- .data = .{ .disp = 0 },
- });
+ // _ = try self.addInst(.{
+ // .tag = .mov,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = addr_reg.to64(),
+ // .reg2 = addr_reg.to64(),
+ // .flags = 0b01,
+ // }),
+ // .data = .{ .disp = 0 },
+ // });
const new_ptr = MCValue{ .register = addr_reg.to64() };
@@ -2896,11 +2934,11 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
return self.fail("TODO saving imm to memory for abi_size {}", .{abi_size});
}
- const payload = try self.addExtra(Mir.ImmPair{
- .dest_off = 0,
- // TODO check if this logic is correct
- .operand = @intCast(u32, imm),
- });
+ // const payload = try self.addExtra(Mir.ImmPair{
+ // .dest_off = 0,
+ // // TODO check if this logic is correct
+ // .operand = @intCast(u32, imm),
+ // });
const flags: u2 = switch (abi_size) {
1 => 0b00,
2 => 0b01,
@@ -2919,14 +2957,14 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
return self.fail("TODO imm64 would get incorrectly sign extended", .{});
}
}
- _ = try self.addInst(.{
- .tag = .mov_mem_imm,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = addr_reg.to64(),
- .flags = flags,
- }),
- .data = .{ .payload = payload },
- });
+ // _ = try self.addInst(.{
+ // .tag = .mov_mem_imm,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = addr_reg.to64(),
+ // .flags = flags,
+ // }),
+ // .data = .{ .payload = payload },
+ // });
},
.register => {
return self.store(new_ptr, value, ptr_ty, value_ty);
@@ -2939,15 +2977,15 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
try self.loadMemPtrIntoRegister(tmp_reg, value_ty, value);
- _ = try self.addInst(.{
- .tag = .mov,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = tmp_reg,
- .reg2 = tmp_reg,
- .flags = 0b01,
- }),
- .data = .{ .disp = 0 },
- });
+ // _ = try self.addInst(.{
+ // .tag = .mov,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = tmp_reg,
+ // .reg2 = tmp_reg,
+ // .flags = 0b01,
+ // }),
+ // .data = .{ .disp = 0 },
+ // });
return self.store(new_ptr, .{ .register = tmp_reg }, ptr_ty, value_ty);
}
@@ -3109,14 +3147,14 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
};
const field_size = @intCast(u32, struct_field_ty.abiSize(self.target.*));
if (signedness == .signed and field_size < 8) {
- _ = try self.addInst(.{
- .tag = .mov_sign_extend,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = dst_mcv.register,
- .reg2 = registerAlias(dst_mcv.register, field_size),
- }),
- .data = undefined,
- });
+ // _ = try self.addInst(.{
+ // .tag = .mov_sign_extend,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = dst_mcv.register,
+ // .reg2 = registerAlias(dst_mcv.register, field_size),
+ // }),
+ // .data = undefined,
+ // });
}
break :result dst_mcv;
@@ -3133,13 +3171,13 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
defer self.register_manager.unlockReg(reg_lock);
const dst_reg = try self.register_manager.allocReg(inst, gp);
- _ = try self.addInst(.{
- .tag = .cond_set_byte,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = dst_reg.to8(),
- }),
- .data = .{ .cc = ro.eflags },
- });
+ // _ = try self.addInst(.{
+ // .tag = .cond_set_byte,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = dst_reg.to8(),
+ // }),
+ // .data = .{ .cc = ro.eflags },
+ // });
break :result MCValue{ .register = dst_reg.to8() };
},
else => unreachable,
@@ -3176,22 +3214,22 @@ fn genShiftBinOpMir(self: *Self, tag: Mir.Inst.Tag, ty: Type, reg: Register, shi
.immediate => |imm| switch (imm) {
0 => return,
1 => {
- _ = try self.addInst(.{
- .tag = tag,
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = registerAlias(reg, abi_size) }),
- .data = undefined,
- });
+ // _ = try self.addInst(.{
+ // .tag = tag,
+ // .ops = Mir.Inst.Ops.encode(.{ .reg1 = registerAlias(reg, abi_size) }),
+ // .data = undefined,
+ // });
return;
},
else => {
- _ = try self.addInst(.{
- .tag = tag,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = registerAlias(reg, abi_size),
- .flags = 0b10,
- }),
- .data = .{ .imm = @intCast(u8, imm) },
- });
+ // _ = try self.addInst(.{
+ // .tag = tag,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = registerAlias(reg, abi_size),
+ // .flags = 0b10,
+ // }),
+ // .data = .{ .imm = @intCast(u8, imm) },
+ // });
return;
},
},
@@ -3204,15 +3242,16 @@ fn genShiftBinOpMir(self: *Self, tag: Mir.Inst.Tag, ty: Type, reg: Register, shi
try self.register_manager.getReg(.rcx, null);
try self.genSetReg(Type.u8, .rcx, shift);
}
+ _ = abi_size;
- _ = try self.addInst(.{
- .tag = tag,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = registerAlias(reg, abi_size),
- .flags = 0b01,
- }),
- .data = undefined,
- });
+ // _ = try self.addInst(.{
+ // .tag = tag,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = registerAlias(reg, abi_size),
+ // .flags = 0b01,
+ // }),
+ // .data = undefined,
+ // });
}
/// Result is always a register.
@@ -3583,49 +3622,51 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MCValu
.register => |src_reg| switch (dst_ty.zigTypeTag()) {
.Float => {
if (intrinsicsAllowed(self.target.*, dst_ty)) {
- const actual_tag: Mir.Inst.Tag = switch (dst_ty.tag()) {
- .f32 => switch (mir_tag) {
- .add => Mir.Inst.Tag.add_f32,
- .cmp => Mir.Inst.Tag.cmp_f32,
- else => return self.fail("TODO genBinOpMir for f32 register-register with MIR tag {}", .{mir_tag}),
- },
- .f64 => switch (mir_tag) {
- .add => Mir.Inst.Tag.add_f64,
- .cmp => Mir.Inst.Tag.cmp_f64,
- else => return self.fail("TODO genBinOpMir for f64 register-register with MIR tag {}", .{mir_tag}),
- },
- else => return self.fail("TODO genBinOpMir for float register-register and type {}", .{dst_ty.fmtDebug()}),
- };
- _ = try self.addInst(.{
- .tag = actual_tag,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = dst_reg.to128(),
- .reg2 = src_reg.to128(),
- }),
- .data = undefined,
- });
+ // const actual_tag: Mir.Inst.Tag = switch (dst_ty.tag()) {
+ // .f32 => switch (mir_tag) {
+ // .add => Mir.Inst.Tag.add_f32,
+ // .cmp => Mir.Inst.Tag.cmp_f32,
+ // else => return self.fail("TODO genBinOpMir for f32 register-register with MIR tag {}", .{mir_tag}),
+ // },
+ // .f64 => switch (mir_tag) {
+ // .add => Mir.Inst.Tag.add_f64,
+ // .cmp => Mir.Inst.Tag.cmp_f64,
+ // else => return self.fail("TODO genBinOpMir for f64 register-register with MIR tag {}", .{mir_tag}),
+ // },
+ // else => return self.fail("TODO genBinOpMir for float register-register and type {}", .{dst_ty.fmtDebug()}),
+ // };
+ // _ = try self.addInst(.{
+ // .tag = actual_tag,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = dst_reg.to128(),
+ // .reg2 = src_reg.to128(),
+ // }),
+ // .data = undefined,
+ // });
return;
}
return self.fail("TODO genBinOpMir for float register-register and no intrinsics", .{});
},
else => {
- _ = try self.addInst(.{
- .tag = mir_tag,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = registerAlias(dst_reg, abi_size),
- .reg2 = registerAlias(src_reg, abi_size),
- }),
- .data = undefined,
- });
+ _ = src_reg;
+ // _ = try self.addInst(.{
+ // .tag = mir_tag,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = registerAlias(dst_reg, abi_size),
+ // .reg2 = registerAlias(src_reg, abi_size),
+ // }),
+ // .data = undefined,
+ // });
},
},
.immediate => |imm| {
- _ = try self.addInst(.{
- .tag = mir_tag,
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = registerAlias(dst_reg, abi_size) }),
- .data = .{ .imm = @intCast(u32, imm) },
- });
+ _ = imm;
+ // _ = try self.addInst(.{
+ // .tag = mir_tag,
+ // .ops = Mir.Inst.Ops.encode(.{ .reg1 = registerAlias(dst_reg, abi_size) }),
+ // .data = .{ .imm = @intCast(u32, imm) },
+ // });
},
.memory,
.linker_load,
@@ -3642,15 +3683,15 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MCValu
if (off > math.maxInt(i32)) {
return self.fail("stack offset too large", .{});
}
- _ = try self.addInst(.{
- .tag = mir_tag,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = registerAlias(dst_reg, abi_size),
- .reg2 = .rbp,
- .flags = 0b01,
- }),
- .data = .{ .disp = -off },
- });
+ // _ = try self.addInst(.{
+ // .tag = mir_tag,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = registerAlias(dst_reg, abi_size),
+ // .reg2 = .rbp,
+ // .flags = 0b01,
+ // }),
+ // .data = .{ .disp = -off },
+ // });
},
}
},
@@ -3668,26 +3709,28 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MCValu
.dead, .unreach => unreachable,
.register_overflow => unreachable,
.register => |src_reg| {
- _ = try self.addInst(.{
- .tag = mir_tag,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = .rbp,
- .reg2 = registerAlias(src_reg, abi_size),
- .flags = 0b10,
- }),
- .data = .{ .disp = -off },
- });
+ _ = src_reg;
+ // _ = try self.addInst(.{
+ // .tag = mir_tag,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = .rbp,
+ // .reg2 = registerAlias(src_reg, abi_size),
+ // .flags = 0b10,
+ // }),
+ // .data = .{ .disp = -off },
+ // });
},
.immediate => |imm| {
- const tag: Mir.Inst.Tag = switch (mir_tag) {
- .add => .add_mem_imm,
- .@"or" => .or_mem_imm,
- .@"and" => .and_mem_imm,
- .sub => .sub_mem_imm,
- .xor => .xor_mem_imm,
- .cmp => .cmp_mem_imm,
- else => unreachable,
- };
+ _ = imm;
+ // const tag: Mir.Inst.Tag = switch (mir_tag) {
+ // .add => .add_mem_imm,
+ // .@"or" => .or_mem_imm,
+ // .@"and" => .and_mem_imm,
+ // .sub => .sub_mem_imm,
+ // .xor => .xor_mem_imm,
+ // .cmp => .cmp_mem_imm,
+ // else => unreachable,
+ // };
const flags: u2 = switch (abi_size) {
1 => 0b00,
2 => 0b01,
@@ -3695,18 +3738,19 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MCValu
8 => 0b11,
else => unreachable,
};
- const payload = try self.addExtra(Mir.ImmPair{
- .dest_off = -off,
- .operand = @intCast(u32, imm),
- });
- _ = try self.addInst(.{
- .tag = tag,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = .rbp,
- .flags = flags,
- }),
- .data = .{ .payload = payload },
- });
+ // const payload = try self.addExtra(Mir.ImmPair{
+ // .dest_off = -off,
+ // .operand = @intCast(u32, imm),
+ // });
+ _ = flags;
+ // _ = try self.addInst(.{
+ // .tag = tag,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = .rbp,
+ // .flags = flags,
+ // }),
+ // .data = .{ .payload = payload },
+ // });
},
.memory,
.stack_offset,
@@ -3735,6 +3779,7 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MCValu
/// Does not support byte-size operands.
fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: MCValue) InnerError!void {
const abi_size = @intCast(u32, dst_ty.abiSize(self.target.*));
+ _ = abi_size;
switch (dst_mcv) {
.none => unreachable,
.undef => unreachable,
@@ -3750,29 +3795,30 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M
.ptr_stack_offset => unreachable,
.register_overflow => unreachable,
.register => |src_reg| {
+ _ = src_reg;
// register, register
- _ = try self.addInst(.{
- .tag = .imul_complex,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = registerAlias(dst_reg, abi_size),
- .reg2 = registerAlias(src_reg, abi_size),
- }),
- .data = undefined,
- });
+ // _ = try self.addInst(.{
+ // .tag = .imul_complex,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = registerAlias(dst_reg, abi_size),
+ // .reg2 = registerAlias(src_reg, abi_size),
+ // }),
+ // .data = undefined,
+ // });
},
.immediate => |imm| {
// TODO take into account the type's ABI size when selecting the register alias
// register, immediate
if (math.minInt(i32) <= imm and imm <= math.maxInt(i32)) {
- _ = try self.addInst(.{
- .tag = .imul_complex,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = dst_reg.to32(),
- .reg2 = dst_reg.to32(),
- .flags = 0b10,
- }),
- .data = .{ .imm = @intCast(u32, imm) },
- });
+ // _ = try self.addInst(.{
+ // .tag = .imul_complex,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = dst_reg.to32(),
+ // .reg2 = dst_reg.to32(),
+ // .flags = 0b10,
+ // }),
+ // .data = .{ .imm = @intCast(u32, imm) },
+ // });
} else {
// TODO verify we don't spill and assign to the same register as dst_mcv
const src_reg = try self.copyToTmpRegister(dst_ty, src_mcv);
@@ -3780,15 +3826,16 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M
}
},
.stack_offset => |off| {
- _ = try self.addInst(.{
- .tag = .imul_complex,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = registerAlias(dst_reg, abi_size),
- .reg2 = .rbp,
- .flags = 0b01,
- }),
- .data = .{ .disp = -off },
- });
+ _ = off;
+ // _ = try self.addInst(.{
+ // .tag = .imul_complex,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = registerAlias(dst_reg, abi_size),
+ // .reg2 = .rbp,
+ // .flags = 0b01,
+ // }),
+ // .data = .{ .disp = -off },
+ // });
},
.memory => {
return self.fail("TODO implement x86 multiply source memory", .{});
@@ -3811,16 +3858,17 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M
.register => |src_reg| {
// copy dst to a register
const dst_reg = try self.copyToTmpRegister(dst_ty, dst_mcv);
+ _ = src_reg;
// multiply into dst_reg
// register, register
- _ = try self.addInst(.{
- .tag = .imul_complex,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = registerAlias(dst_reg, abi_size),
- .reg2 = registerAlias(src_reg, abi_size),
- }),
- .data = undefined,
- });
+ // _ = try self.addInst(.{
+ // .tag = .imul_complex,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = registerAlias(dst_reg, abi_size),
+ // .reg2 = registerAlias(src_reg, abi_size),
+ // }),
+ // .data = undefined,
+ // });
// copy dst_reg back out
return self.genSetStack(dst_ty, off, .{ .register = dst_reg }, .{});
},
@@ -3946,20 +3994,20 @@ fn genVarDbgInfo(
}
fn airTrap(self: *Self) !void {
- _ = try self.addInst(.{
- .tag = .ud,
- .ops = Mir.Inst.Ops.encode(.{}),
- .data = undefined,
- });
+ // _ = try self.addInst(.{
+ // .tag = .ud,
+ // .ops = Mir.Inst.Ops.encode(.{}),
+ // .data = undefined,
+ // });
return self.finishAirBookkeeping();
}
fn airBreakpoint(self: *Self) !void {
- _ = try self.addInst(.{
- .tag = .interrupt,
- .ops = Mir.Inst.Ops.encode(.{}),
- .data = undefined,
- });
+ // _ = try self.addInst(.{
+ // .tag = .interrupt,
+ // .ops = Mir.Inst.Ops.encode(.{}),
+ // .data = undefined,
+ // });
return self.finishAirBookkeeping();
}
@@ -4054,11 +4102,11 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (info.stack_byte_count > 0) {
// Adjust the stack
- _ = try self.addInst(.{
- .tag = .sub,
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rsp }),
- .data = .{ .imm = info.stack_byte_count },
- });
+ // _ = try self.addInst(.{
+ // .tag = .sub,
+ // .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rsp }),
+ // .data = .{ .imm = info.stack_byte_count },
+ // });
}
// Due to incremental compilation, how function calls are generated depends
@@ -4072,11 +4120,12 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
const atom = elf_file.getAtom(atom_index);
const got_addr = @intCast(i32, atom.getOffsetTableAddress(elf_file));
- _ = try self.addInst(.{
- .tag = .call,
- .ops = Mir.Inst.Ops.encode(.{ .flags = 0b01 }),
- .data = .{ .disp = got_addr },
- });
+ _ = got_addr;
+ // _ = try self.addInst(.{
+ // .tag = .call,
+ // .ops = Mir.Inst.Ops.encode(.{ .flags = 0b01 }),
+ // .data = .{ .disp = got_addr },
+ // });
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
const atom_index = try coff_file.getOrCreateAtomForDecl(func.owner_decl);
const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
@@ -4086,14 +4135,14 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
.sym_index = sym_index,
},
});
- _ = try self.addInst(.{
- .tag = .call,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = .rax,
- .flags = 0b01,
- }),
- .data = undefined,
- });
+ // _ = try self.addInst(.{
+ // .tag = .call,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = .rax,
+ // .flags = 0b01,
+ // }),
+ // .data = undefined,
+ // });
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
const atom_index = try macho_file.getOrCreateAtomForDecl(func.owner_decl);
const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?;
@@ -4103,14 +4152,14 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
.sym_index = sym_index,
},
});
- _ = try self.addInst(.{
- .tag = .call,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = .rax,
- .flags = 0b01,
- }),
- .data = undefined,
- });
+ // _ = try self.addInst(.{
+ // .tag = .call,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = .rax,
+ // .flags = 0b01,
+ // }),
+ // .data = undefined,
+ // });
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
const decl_block_index = try p9.seeDecl(func.owner_decl);
const decl_block = p9.getDeclBlock(decl_block_index);
@@ -4119,11 +4168,12 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
const got_addr = p9.bases.data;
const got_index = decl_block.got_index.?;
const fn_got_addr = got_addr + got_index * ptr_bytes;
- _ = try self.addInst(.{
- .tag = .call,
- .ops = Mir.Inst.Ops.encode(.{ .flags = 0b01 }),
- .data = .{ .disp = @intCast(i32, fn_got_addr) },
- });
+ _ = fn_got_addr;
+ // _ = try self.addInst(.{
+ // .tag = .call,
+ // .ops = Mir.Inst.Ops.encode(.{ .flags = 0b01 }),
+ // .data = .{ .disp = @intCast(i32, fn_got_addr) },
+ // });
} else unreachable;
} else if (func_value.castTag(.extern_fn)) |func_payload| {
const extern_fn = func_payload.data;
@@ -4143,26 +4193,28 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
.sym_index = sym_index,
},
});
- _ = try self.addInst(.{
- .tag = .call,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = .rax,
- .flags = 0b01,
- }),
- .data = undefined,
- });
+ // _ = try self.addInst(.{
+ // .tag = .call,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = .rax,
+ // .flags = 0b01,
+ // }),
+ // .data = undefined,
+ // });
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
const sym_index = try macho_file.getGlobalSymbol(mem.sliceTo(decl_name, 0));
const atom = try macho_file.getOrCreateAtomForDecl(self.mod_fn.owner_decl);
const atom_index = macho_file.getAtom(atom).getSymbolIndex().?;
- _ = try self.addInst(.{
- .tag = .call_extern,
- .ops = undefined,
- .data = .{ .relocation = .{
- .atom_index = atom_index,
- .sym_index = sym_index,
- } },
- });
+ _ = sym_index;
+ _ = atom_index;
+ // _ = try self.addInst(.{
+ // .tag = .call_extern,
+ // .ops = undefined,
+ // .data = .{ .relocation = .{
+ // .atom_index = atom_index,
+ // .sym_index = sym_index,
+ // } },
+ // });
} else {
return self.fail("TODO implement calling extern functions", .{});
}
@@ -4173,23 +4225,23 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
assert(ty.zigTypeTag() == .Pointer);
const mcv = try self.resolveInst(callee);
try self.genSetReg(Type.initTag(.usize), .rax, mcv);
- _ = try self.addInst(.{
- .tag = .call,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = .rax,
- .flags = 0b01,
- }),
- .data = undefined,
- });
+ // _ = try self.addInst(.{
+ // .tag = .call,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = .rax,
+ // .flags = 0b01,
+ // }),
+ // .data = undefined,
+ // });
}
if (info.stack_byte_count > 0) {
// Readjust the stack
- _ = try self.addInst(.{
- .tag = .add,
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rsp }),
- .data = .{ .imm = info.stack_byte_count },
- });
+ // _ = try self.addInst(.{
+ // .tag = .add,
+ // .ops = Mir.Inst.Ops.encode(.{ .reg1 = .rsp }),
+ // .data = .{ .imm = info.stack_byte_count },
+ // });
}
const result: MCValue = result: {
@@ -4246,12 +4298,12 @@ fn airRet(self: *Self, inst: Air.Inst.Index) !void {
// TODO when implementing defer, this will need to jump to the appropriate defer expression.
// TODO optimization opportunity: figure out when we can emit this as a 2 byte instruction
// which is available if the jump is 127 bytes or less forward.
- const jmp_reloc = try self.addInst(.{
- .tag = .jmp,
- .ops = Mir.Inst.Ops.encode(.{}),
- .data = .{ .inst = undefined },
- });
- try self.exitlude_jump_relocs.append(self.gpa, jmp_reloc);
+ // const jmp_reloc = try self.addInst(.{
+ // .tag = .jmp,
+ // .ops = Mir.Inst.Ops.encode(.{}),
+ // .data = .{ .inst = undefined },
+ // });
+ // try self.exitlude_jump_relocs.append(self.gpa, jmp_reloc);
return self.finishAir(inst, .dead, .{ un_op, .none, .none });
}
@@ -4282,12 +4334,12 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
// TODO when implementing defer, this will need to jump to the appropriate defer expression.
// TODO optimization opportunity: figure out when we can emit this as a 2 byte instruction
// which is available if the jump is 127 bytes or less forward.
- const jmp_reloc = try self.addInst(.{
- .tag = .jmp,
- .ops = Mir.Inst.Ops.encode(.{}),
- .data = .{ .inst = undefined },
- });
- try self.exitlude_jump_relocs.append(self.gpa, jmp_reloc);
+ // const jmp_reloc = try self.addInst(.{
+ // .tag = .jmp,
+ // .ops = Mir.Inst.Ops.encode(.{}),
+ // .data = .{ .inst = undefined },
+ // });
+ // try self.exitlude_jump_relocs.append(self.gpa, jmp_reloc);
return self.finishAir(inst, .dead, .{ un_op, .none, .none });
}
@@ -4461,33 +4513,35 @@ fn genCondBrMir(self: *Self, ty: Type, mcv: MCValue) !u32 {
const abi_size = ty.abiSize(self.target.*);
switch (mcv) {
.eflags => |cc| {
- return self.addInst(.{
- .tag = .cond_jmp,
- .ops = Mir.Inst.Ops.encode(.{}),
- .data = .{
- .inst_cc = .{
- .inst = undefined,
- // Here we map the opposites since the jump is to the false branch.
- .cc = cc.negate(),
- },
- },
- });
+ _ = cc;
+ // return self.addInst(.{
+ // .tag = .cond_jmp,
+ // .ops = Mir.Inst.Ops.encode(.{}),
+ // .data = .{
+ // .inst_cc = .{
+ // .inst = undefined,
+ // // Here we map the opposites since the jump is to the false branch.
+ // .cc = cc.negate(),
+ // },
+ // },
+ // });
},
.register => |reg| {
+ _ = reg;
try self.spillEflagsIfOccupied();
- _ = try self.addInst(.{
- .tag = .@"test",
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = reg }),
- .data = .{ .imm = 1 },
- });
- return self.addInst(.{
- .tag = .cond_jmp,
- .ops = Mir.Inst.Ops.encode(.{}),
- .data = .{ .inst_cc = .{
- .inst = undefined,
- .cc = .e,
- } },
- });
+ // _ = try self.addInst(.{
+ // .tag = .@"test",
+ // .ops = Mir.Inst.Ops.encode(.{ .reg1 = reg }),
+ // .data = .{ .imm = 1 },
+ // });
+ // return self.addInst(.{
+ // .tag = .cond_jmp,
+ // .ops = Mir.Inst.Ops.encode(.{}),
+ // .data = .{ .inst_cc = .{
+ // .inst = undefined,
+ // .cc = .e,
+ // } },
+ // });
},
.immediate,
.stack_offset,
@@ -4501,6 +4555,7 @@ fn genCondBrMir(self: *Self, ty: Type, mcv: MCValue) !u32 {
},
else => return self.fail("TODO implement condbr when condition is {s}", .{@tagName(mcv)}),
}
+ return 0; // TODO
}
fn airCondBr(self: *Self, inst: Air.Inst.Index) !void {
@@ -4825,12 +4880,13 @@ fn airLoop(self: *Self, inst: Air.Inst.Index) !void {
const loop = self.air.extraData(Air.Block, ty_pl.payload);
const body = self.air.extra[loop.end..][0..loop.data.body_len];
const jmp_target = @intCast(u32, self.mir_instructions.len);
+ _ = jmp_target;
try self.genBody(body);
- _ = try self.addInst(.{
- .tag = .jmp,
- .ops = Mir.Inst.Ops.encode(.{}),
- .data = .{ .inst = jmp_target },
- });
+ // _ = try self.addInst(.{
+ // .tag = .jmp,
+ // .ops = Mir.Inst.Ops.encode(.{}),
+ // .data = .{ .inst = jmp_target },
+ // });
return self.finishAirBookkeeping();
}
@@ -4876,21 +4932,23 @@ fn genCondSwitchMir(self: *Self, ty: Type, condition: MCValue, case: MCValue) !u
.undef => unreachable,
.dead, .unreach => unreachable,
.immediate => |imm| {
- _ = try self.addInst(.{
- .tag = .xor,
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = registerAlias(cond_reg, abi_size) }),
- .data = .{ .imm = @intCast(u32, imm) },
- });
+ _ = imm;
+ // _ = try self.addInst(.{
+ // .tag = .xor,
+ // .ops = Mir.Inst.Ops.encode(.{ .reg1 = registerAlias(cond_reg, abi_size) }),
+ // .data = .{ .imm = @intCast(u32, imm) },
+ // });
},
.register => |reg| {
- _ = try self.addInst(.{
- .tag = .xor,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = registerAlias(cond_reg, abi_size),
- .reg2 = registerAlias(reg, abi_size),
- }),
- .data = undefined,
- });
+ _ = reg;
+ // _ = try self.addInst(.{
+ // .tag = .xor,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = registerAlias(cond_reg, abi_size),
+ // .reg2 = registerAlias(reg, abi_size),
+ // }),
+ // .data = undefined,
+ // });
},
.stack_offset => {
if (abi_size <= 8) {
@@ -4905,22 +4963,22 @@ fn genCondSwitchMir(self: *Self, ty: Type, condition: MCValue, case: MCValue) !u
},
}
- _ = try self.addInst(.{
- .tag = .@"test",
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = registerAlias(cond_reg, abi_size),
- .reg2 = registerAlias(cond_reg, abi_size),
- }),
- .data = undefined,
- });
- return self.addInst(.{
- .tag = .cond_jmp,
- .ops = Mir.Inst.Ops.encode(.{}),
- .data = .{ .inst_cc = .{
- .inst = undefined,
- .cc = .ne,
- } },
- });
+ // _ = try self.addInst(.{
+ // .tag = .@"test",
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = registerAlias(cond_reg, abi_size),
+ // .reg2 = registerAlias(cond_reg, abi_size),
+ // }),
+ // .data = undefined,
+ // });
+ // return self.addInst(.{
+ // .tag = .cond_jmp,
+ // .ops = Mir.Inst.Ops.encode(.{}),
+ // .data = .{ .inst_cc = .{
+ // .inst = undefined,
+ // .cc = .ne,
+ // } },
+ // });
},
.stack_offset => {
try self.spillEflagsIfOccupied();
@@ -4938,6 +4996,7 @@ fn genCondSwitchMir(self: *Self, ty: Type, condition: MCValue, case: MCValue) !u
return self.fail("TODO implemenent switch mir when condition is {}", .{condition});
},
}
+ return 0; // TODO
}
fn airSwitch(self: *Self, inst: Air.Inst.Index) !void {
@@ -5132,9 +5191,9 @@ fn canonicaliseBranches(self: *Self, parent_branch: *Branch, canon_branch: *Bran
fn performReloc(self: *Self, reloc: Mir.Inst.Index) !void {
const next_inst = @intCast(u32, self.mir_instructions.len);
switch (self.mir_instructions.items(.tag)[reloc]) {
- .cond_jmp => {
- self.mir_instructions.items(.data)[reloc].inst_cc.inst = next_inst;
- },
+ // .cond_jmp => {
+ // self.mir_instructions.items(.data)[reloc].inst_cc.inst = next_inst;
+ // },
.jmp => {
self.mir_instructions.items(.data)[reloc].inst = next_inst;
},
@@ -5177,12 +5236,12 @@ fn brVoid(self: *Self, block: Air.Inst.Index) !void {
// Emit a jump with a relocation. It will be patched up after the block ends.
try block_data.relocs.ensureUnusedCapacity(self.gpa, 1);
// Leave the jump offset undefined
- const jmp_reloc = try self.addInst(.{
- .tag = .jmp,
- .ops = Mir.Inst.Ops.encode(.{}),
- .data = .{ .inst = undefined },
- });
- block_data.relocs.appendAssumeCapacity(jmp_reloc);
+ // const jmp_reloc = try self.addInst(.{
+ // .tag = .jmp,
+ // .ops = Mir.Inst.Ops.encode(.{}),
+ // .data = .{ .inst = undefined },
+ // });
+ // block_data.relocs.appendAssumeCapacity(jmp_reloc);
}
fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
@@ -5254,30 +5313,22 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
var iter = std.mem.tokenize(u8, asm_source, "\n\r");
while (iter.next()) |ins| {
if (mem.eql(u8, ins, "syscall")) {
- _ = try self.addInst(.{
- .tag = .syscall,
- .ops = undefined,
- .data = undefined,
- });
+ try self.assemble(.syscall, .{});
} else if (mem.indexOf(u8, ins, "push")) |_| {
const arg = ins[4..];
if (mem.indexOf(u8, arg, "$")) |l| {
const n = std.fmt.parseInt(u8, ins[4 + l + 1 ..], 10) catch {
return self.fail("TODO implement more inline asm int parsing", .{});
};
- _ = try self.addInst(.{
- .tag = .push,
- .ops = Mir.Inst.Ops.encode(.{ .flags = 0b10 }),
- .data = .{ .imm = n },
+ try self.assemble(.push, .{
+ .op1 = .{ .imm = Mir.Operand.Immediate.u(n) },
});
} else if (mem.indexOf(u8, arg, "%%")) |l| {
const reg_name = ins[4 + l + 2 ..];
const reg = parseRegName(reg_name) orelse
return self.fail("unrecognized register: '{s}'", .{reg_name});
- _ = try self.addInst(.{
- .tag = .push,
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = reg }),
- .data = undefined,
+ try self.assemble(.push, .{
+ .op1 = .{ .reg = reg },
});
} else return self.fail("TODO more push operands", .{});
} else if (mem.indexOf(u8, ins, "pop")) |_| {
@@ -5286,10 +5337,8 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
const reg_name = ins[3 + l + 2 ..];
const reg = parseRegName(reg_name) orelse
return self.fail("unrecognized register: '{s}'", .{reg_name});
- _ = try self.addInst(.{
- .tag = .pop,
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = reg }),
- .data = undefined,
+ try self.assemble(.pop, .{
+ .op1 = .{ .reg = reg },
});
} else return self.fail("TODO more pop operands", .{});
} else {
@@ -5433,39 +5482,40 @@ fn genSetStackArg(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerE
switch (ty.zigTypeTag()) {
.Float => {
if (intrinsicsAllowed(self.target.*, ty)) {
- const tag: Mir.Inst.Tag = switch (ty.tag()) {
- .f32 => Mir.Inst.Tag.mov_f32,
- .f64 => Mir.Inst.Tag.mov_f64,
- else => return self.fail("TODO genSetStackArg for register for type {}", .{ty.fmtDebug()}),
- };
- _ = try self.addInst(.{
- .tag = tag,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = switch (ty.tag()) {
- .f32 => .esp,
- .f64 => .rsp,
- else => unreachable,
- },
- .reg2 = reg.to128(),
- .flags = 0b01,
- }),
- .data = .{ .disp = -stack_offset },
- });
+ // const tag: Mir.Inst.Tag = switch (ty.tag()) {
+ // .f32 => Mir.Inst.Tag.mov_f32,
+ // .f64 => Mir.Inst.Tag.mov_f64,
+ // else => return self.fail("TODO genSetStackArg for register for type {}", .{ty.fmtDebug()}),
+ // };
+ _ = reg;
+ // _ = try self.addInst(.{
+ // .tag = tag,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = switch (ty.tag()) {
+ // .f32 => .esp,
+ // .f64 => .rsp,
+ // else => unreachable,
+ // },
+ // .reg2 = reg.to128(),
+ // .flags = 0b01,
+ // }),
+ // .data = .{ .disp = -stack_offset },
+ // });
return;
}
return self.fail("TODO genSetStackArg for register with no intrinsics", .{});
},
else => {
- _ = try self.addInst(.{
- .tag = .mov,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = .rsp,
- .reg2 = registerAlias(reg, @intCast(u32, abi_size)),
- .flags = 0b10,
- }),
- .data = .{ .disp = -stack_offset },
- });
+ // _ = try self.addInst(.{
+ // .tag = .mov,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = .rsp,
+ // .reg2 = registerAlias(reg, @intCast(u32, abi_size)),
+ // .flags = 0b10,
+ // }),
+ // .data = .{ .disp = -stack_offset },
+ // });
},
}
},
@@ -5519,13 +5569,13 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue, opts: Inl
const overflow_bit_ty = ty.structFieldType(1);
const overflow_bit_offset = ty.structFieldOffset(1, self.target.*);
const tmp_reg = try self.register_manager.allocReg(null, gp);
- _ = try self.addInst(.{
- .tag = .cond_set_byte,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = tmp_reg.to8(),
- }),
- .data = .{ .cc = ro.eflags },
- });
+ // _ = try self.addInst(.{
+ // .tag = .cond_set_byte,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = tmp_reg.to8(),
+ // }),
+ // .data = .{ .cc = ro.eflags },
+ // });
return self.genSetStack(
overflow_bit_ty,
@@ -5539,72 +5589,74 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue, opts: Inl
return self.genSetStack(ty, stack_offset, .{ .register = reg }, opts);
},
.immediate => |x_big| {
+ _ = x_big;
const base_reg = opts.dest_stack_base orelse .rbp;
+ _ = base_reg;
switch (abi_size) {
0 => {
assert(ty.isError());
- const payload = try self.addExtra(Mir.ImmPair{
- .dest_off = -stack_offset,
- .operand = @truncate(u32, x_big),
- });
- _ = try self.addInst(.{
- .tag = .mov_mem_imm,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = base_reg,
- .flags = 0b00,
- }),
- .data = .{ .payload = payload },
- });
+ // const payload = try self.addExtra(Mir.ImmPair{
+ // .dest_off = -stack_offset,
+ // .operand = @truncate(u32, x_big),
+ // });
+ // _ = try self.addInst(.{
+ // .tag = .mov_mem_imm,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = base_reg,
+ // .flags = 0b00,
+ // }),
+ // .data = .{ .payload = payload },
+ // });
},
1, 2, 4 => {
- const payload = try self.addExtra(Mir.ImmPair{
- .dest_off = -stack_offset,
- .operand = @truncate(u32, x_big),
- });
- _ = try self.addInst(.{
- .tag = .mov_mem_imm,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = base_reg,
- .flags = switch (abi_size) {
- 1 => 0b00,
- 2 => 0b01,
- 4 => 0b10,
- else => unreachable,
- },
- }),
- .data = .{ .payload = payload },
- });
+ // const payload = try self.addExtra(Mir.ImmPair{
+ // .dest_off = -stack_offset,
+ // .operand = @truncate(u32, x_big),
+ // });
+ // _ = try self.addInst(.{
+ // .tag = .mov_mem_imm,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = base_reg,
+ // .flags = switch (abi_size) {
+ // 1 => 0b00,
+ // 2 => 0b01,
+ // 4 => 0b10,
+ // else => unreachable,
+ // },
+ // }),
+ // .data = .{ .payload = payload },
+ // });
},
8 => {
// 64 bit write to memory would take two mov's anyways so we
// insted just use two 32 bit writes to avoid register allocation
{
- const payload = try self.addExtra(Mir.ImmPair{
- .dest_off = -stack_offset + 4,
- .operand = @truncate(u32, x_big >> 32),
- });
- _ = try self.addInst(.{
- .tag = .mov_mem_imm,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = base_reg,
- .flags = 0b10,
- }),
- .data = .{ .payload = payload },
- });
+ // const payload = try self.addExtra(Mir.ImmPair{
+ // .dest_off = -stack_offset + 4,
+ // .operand = @truncate(u32, x_big >> 32),
+ // });
+ // _ = try self.addInst(.{
+ // .tag = .mov_mem_imm,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = base_reg,
+ // .flags = 0b10,
+ // }),
+ // .data = .{ .payload = payload },
+ // });
}
{
- const payload = try self.addExtra(Mir.ImmPair{
- .dest_off = -stack_offset,
- .operand = @truncate(u32, x_big),
- });
- _ = try self.addInst(.{
- .tag = .mov_mem_imm,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = base_reg,
- .flags = 0b10,
- }),
- .data = .{ .payload = payload },
- });
+ // const payload = try self.addExtra(Mir.ImmPair{
+ // .dest_off = -stack_offset,
+ // .operand = @truncate(u32, x_big),
+ // });
+ // _ = try self.addInst(.{
+ // .tag = .mov_mem_imm,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = base_reg,
+ // .flags = 0b10,
+ // }),
+ // .data = .{ .payload = payload },
+ // });
}
},
else => {
@@ -5622,24 +5674,24 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue, opts: Inl
switch (ty.zigTypeTag()) {
.Float => {
if (intrinsicsAllowed(self.target.*, ty)) {
- const tag: Mir.Inst.Tag = switch (ty.tag()) {
- .f32 => Mir.Inst.Tag.mov_f32,
- .f64 => Mir.Inst.Tag.mov_f64,
- else => return self.fail("TODO genSetStack for register for type {}", .{ty.fmtDebug()}),
- };
- _ = try self.addInst(.{
- .tag = tag,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = switch (ty.tag()) {
- .f32 => base_reg.to32(),
- .f64 => base_reg.to64(),
- else => unreachable,
- },
- .reg2 = reg.to128(),
- .flags = 0b01,
- }),
- .data = .{ .disp = -stack_offset },
- });
+ // const tag: Mir.Inst.Tag = switch (ty.tag()) {
+ // .f32 => Mir.Inst.Tag.mov_f32,
+ // .f64 => Mir.Inst.Tag.mov_f64,
+ // else => return self.fail("TODO genSetStack for register for type {}", .{ty.fmtDebug()}),
+ // };
+ // _ = try self.addInst(.{
+ // .tag = tag,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = switch (ty.tag()) {
+ // .f32 => base_reg.to32(),
+ // .f64 => base_reg.to64(),
+ // else => unreachable,
+ // },
+ // .reg2 = reg.to128(),
+ // .flags = 0b01,
+ // }),
+ // .data = .{ .disp = -stack_offset },
+ // });
return;
}
@@ -5706,15 +5758,15 @@ fn genInlineMemcpyRegisterRegister(
while (remainder > 0) {
const nearest_power_of_two = @as(u6, 1) << math.log2_int(u3, @intCast(u3, remainder));
- _ = try self.addInst(.{
- .tag = .mov,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = dst_reg,
- .reg2 = registerAlias(tmp_reg, nearest_power_of_two),
- .flags = 0b10,
- }),
- .data = .{ .disp = -next_offset },
- });
+ // _ = try self.addInst(.{
+ // .tag = .mov,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = dst_reg,
+ // .reg2 = registerAlias(tmp_reg, nearest_power_of_two),
+ // .flags = 0b10,
+ // }),
+ // .data = .{ .disp = -next_offset },
+ // });
if (nearest_power_of_two > 1) {
try self.genShiftBinOpMir(.shr, ty, tmp_reg, .{
@@ -5726,15 +5778,15 @@ fn genInlineMemcpyRegisterRegister(
next_offset -= nearest_power_of_two;
}
} else {
- _ = try self.addInst(.{
- .tag = .mov,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = dst_reg,
- .reg2 = registerAlias(src_reg, @intCast(u32, abi_size)),
- .flags = 0b10,
- }),
- .data = .{ .disp = -offset },
- });
+ // _ = try self.addInst(.{
+ // .tag = .mov,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = dst_reg,
+ // .reg2 = registerAlias(src_reg, @intCast(u32, abi_size)),
+ // .flags = 0b10,
+ // }),
+ // .data = .{ .disp = -offset },
+ // });
}
}
@@ -5768,30 +5820,34 @@ fn genInlineMemcpy(
const index_reg = regs[2].to64();
const count_reg = regs[3].to64();
const tmp_reg = regs[4].to8();
+ _ = index_reg;
+ _ = tmp_reg;
switch (dst_ptr) {
.memory, .linker_load => {
try self.loadMemPtrIntoRegister(dst_addr_reg, Type.usize, dst_ptr);
},
.ptr_stack_offset, .stack_offset => |off| {
- _ = try self.addInst(.{
- .tag = .lea,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = dst_addr_reg.to64(),
- .reg2 = opts.dest_stack_base orelse .rbp,
- }),
- .data = .{ .disp = -off },
- });
+ _ = off;
+ // _ = try self.addInst(.{
+ // .tag = .lea,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = dst_addr_reg.to64(),
+ // .reg2 = opts.dest_stack_base orelse .rbp,
+ // }),
+ // .data = .{ .disp = -off },
+ // });
},
.register => |reg| {
- _ = try self.addInst(.{
- .tag = .mov,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = registerAlias(dst_addr_reg, @intCast(u32, @divExact(reg.bitSize(), 8))),
- .reg2 = reg,
- }),
- .data = undefined,
- });
+ _ = reg;
+ // _ = try self.addInst(.{
+ // .tag = .mov,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = registerAlias(dst_addr_reg, @intCast(u32, @divExact(reg.bitSize(), 8))),
+ // .reg2 = reg,
+ // }),
+ // .data = undefined,
+ // });
},
else => {
return self.fail("TODO implement memcpy for setting stack when dest is {}", .{dst_ptr});
@@ -5803,24 +5859,26 @@ fn genInlineMemcpy(
try self.loadMemPtrIntoRegister(src_addr_reg, Type.usize, src_ptr);
},
.ptr_stack_offset, .stack_offset => |off| {
- _ = try self.addInst(.{
- .tag = .lea,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = src_addr_reg.to64(),
- .reg2 = opts.source_stack_base orelse .rbp,
- }),
- .data = .{ .disp = -off },
- });
+ _ = off;
+ // _ = try self.addInst(.{
+ // .tag = .lea,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = src_addr_reg.to64(),
+ // .reg2 = opts.source_stack_base orelse .rbp,
+ // }),
+ // .data = .{ .disp = -off },
+ // });
},
.register => |reg| {
- _ = try self.addInst(.{
- .tag = .mov,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = registerAlias(src_addr_reg, @intCast(u32, @divExact(reg.bitSize(), 8))),
- .reg2 = reg,
- }),
- .data = undefined,
- });
+ _ = reg;
+ // _ = try self.addInst(.{
+ // .tag = .mov,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = registerAlias(src_addr_reg, @intCast(u32, @divExact(reg.bitSize(), 8))),
+ // .reg2 = reg,
+ // }),
+ // .data = undefined,
+ // });
},
else => {
return self.fail("TODO implement memcpy for setting stack when src is {}", .{src_ptr});
@@ -5830,73 +5888,73 @@ fn genInlineMemcpy(
try self.genSetReg(Type.usize, count_reg, len);
// mov index_reg, 0
- _ = try self.addInst(.{
- .tag = .mov,
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = index_reg }),
- .data = .{ .imm = 0 },
- });
+ // _ = try self.addInst(.{
+ // .tag = .mov,
+ // .ops = Mir.Inst.Ops.encode(.{ .reg1 = index_reg }),
+ // .data = .{ .imm = 0 },
+ // });
// loop:
// cmp count, 0
- const loop_start = try self.addInst(.{
- .tag = .cmp,
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = count_reg }),
- .data = .{ .imm = 0 },
- });
+ // const loop_start = try self.addInst(.{
+ // .tag = .cmp,
+ // .ops = Mir.Inst.Ops.encode(.{ .reg1 = count_reg }),
+ // .data = .{ .imm = 0 },
+ // });
// je end
- const loop_reloc = try self.addInst(.{
- .tag = .cond_jmp,
- .ops = Mir.Inst.Ops.encode(.{}),
- .data = .{ .inst_cc = .{
- .inst = undefined,
- .cc = .e,
- } },
- });
+ // const loop_reloc = try self.addInst(.{
+ // .tag = .cond_jmp,
+ // .ops = Mir.Inst.Ops.encode(.{}),
+ // .data = .{ .inst_cc = .{
+ // .inst = undefined,
+ // .cc = .e,
+ // } },
+ // });
// mov tmp, [addr + index_reg]
- _ = try self.addInst(.{
- .tag = .mov_scale_src,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = tmp_reg.to8(),
- .reg2 = src_addr_reg,
- }),
- .data = .{ .payload = try self.addExtra(Mir.IndexRegisterDisp.encode(index_reg, 0)) },
- });
+ // _ = try self.addInst(.{
+ // .tag = .mov_scale_src,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = tmp_reg.to8(),
+ // .reg2 = src_addr_reg,
+ // }),
+ // .data = .{ .payload = try self.addExtra(Mir.IndexRegisterDisp.encode(index_reg, 0)) },
+ // });
// mov [stack_offset + index_reg], tmp
- _ = try self.addInst(.{
- .tag = .mov_scale_dst,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = dst_addr_reg,
- .reg2 = tmp_reg.to8(),
- }),
- .data = .{ .payload = try self.addExtra(Mir.IndexRegisterDisp.encode(index_reg, 0)) },
- });
+ // _ = try self.addInst(.{
+ // .tag = .mov_scale_dst,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = dst_addr_reg,
+ // .reg2 = tmp_reg.to8(),
+ // }),
+ // .data = .{ .payload = try self.addExtra(Mir.IndexRegisterDisp.encode(index_reg, 0)) },
+ // });
// add index_reg, 1
- _ = try self.addInst(.{
- .tag = .add,
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = index_reg }),
- .data = .{ .imm = 1 },
- });
+ // _ = try self.addInst(.{
+ // .tag = .add,
+ // .ops = Mir.Inst.Ops.encode(.{ .reg1 = index_reg }),
+ // .data = .{ .imm = 1 },
+ // });
// sub count, 1
- _ = try self.addInst(.{
- .tag = .sub,
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = count_reg }),
- .data = .{ .imm = 1 },
- });
+ // _ = try self.addInst(.{
+ // .tag = .sub,
+ // .ops = Mir.Inst.Ops.encode(.{ .reg1 = count_reg }),
+ // .data = .{ .imm = 1 },
+ // });
// jmp loop
- _ = try self.addInst(.{
- .tag = .jmp,
- .ops = Mir.Inst.Ops.encode(.{}),
- .data = .{ .inst = loop_start },
- });
+ // _ = try self.addInst(.{
+ // .tag = .jmp,
+ // .ops = Mir.Inst.Ops.encode(.{}),
+ // .data = .{ .inst = loop_start },
+ // });
// end:
- try self.performReloc(loop_reloc);
+ // try self.performReloc(loop_reloc);
}
fn genInlineMemset(
@@ -5927,24 +5985,26 @@ fn genInlineMemset(
try self.loadMemPtrIntoRegister(addr_reg, Type.usize, dst_ptr);
},
.ptr_stack_offset, .stack_offset => |off| {
- _ = try self.addInst(.{
- .tag = .lea,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = addr_reg.to64(),
- .reg2 = opts.dest_stack_base orelse .rbp,
- }),
- .data = .{ .disp = -off },
- });
+ _ = off;
+ // _ = try self.addInst(.{
+ // .tag = .lea,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = addr_reg.to64(),
+ // .reg2 = opts.dest_stack_base orelse .rbp,
+ // }),
+ // .data = .{ .disp = -off },
+ // });
},
.register => |reg| {
- _ = try self.addInst(.{
- .tag = .mov,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = registerAlias(addr_reg, @intCast(u32, @divExact(reg.bitSize(), 8))),
- .reg2 = reg,
- }),
- .data = undefined,
- });
+ _ = reg;
+ // _ = try self.addInst(.{
+ // .tag = .mov,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = registerAlias(addr_reg, @intCast(u32, @divExact(reg.bitSize(), 8))),
+ // .reg2 = reg,
+ // }),
+ // .data = undefined,
+ // });
},
else => {
return self.fail("TODO implement memcpy for setting stack when dest is {}", .{dst_ptr});
@@ -5956,24 +6016,24 @@ fn genInlineMemset(
// loop:
// cmp index_reg, -1
- const loop_start = try self.addInst(.{
- .tag = .cmp,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = index_reg,
- .flags = 0b11,
- }),
- .data = .{ .imm_s = -1 },
- });
+ // const loop_start = try self.addInst(.{
+ // .tag = .cmp,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = index_reg,
+ // .flags = 0b11,
+ // }),
+ // .data = .{ .imm_s = -1 },
+ // });
// je end
- const loop_reloc = try self.addInst(.{
- .tag = .cond_jmp,
- .ops = Mir.Inst.Ops.encode(.{}),
- .data = .{ .inst_cc = .{
- .inst = undefined,
- .cc = .e,
- } },
- });
+ // const loop_reloc = try self.addInst(.{
+ // .tag = .cond_jmp,
+ // .ops = Mir.Inst.Ops.encode(.{}),
+ // .data = .{ .inst_cc = .{
+ // .inst = undefined,
+ // .cc = .e,
+ // } },
+ // });
switch (value) {
.immediate => |x| {
@@ -5981,37 +6041,37 @@ fn genInlineMemset(
return self.fail("TODO inline memset for value immediate larger than 32bits", .{});
}
// mov byte ptr [rbp + index_reg + stack_offset], imm
- _ = try self.addInst(.{
- .tag = .mov_mem_index_imm,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = addr_reg,
- }),
- .data = .{ .payload = try self.addExtra(Mir.IndexRegisterDispImm.encode(
- index_reg,
- 0,
- @intCast(u32, x),
- )) },
- });
+ // _ = try self.addInst(.{
+ // .tag = .mov_mem_index_imm,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = addr_reg,
+ // }),
+ // .data = .{ .payload = try self.addExtra(Mir.IndexRegisterDispImm.encode(
+ // index_reg,
+ // 0,
+ // @intCast(u32, x),
+ // )) },
+ // });
},
else => return self.fail("TODO inline memset for value of type {}", .{value}),
}
// sub index_reg, 1
- _ = try self.addInst(.{
- .tag = .sub,
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = index_reg }),
- .data = .{ .imm = 1 },
- });
+ // _ = try self.addInst(.{
+ // .tag = .sub,
+ // .ops = Mir.Inst.Ops.encode(.{ .reg1 = index_reg }),
+ // .data = .{ .imm = 1 },
+ // });
// jmp loop
- _ = try self.addInst(.{
- .tag = .jmp,
- .ops = Mir.Inst.Ops.encode(.{}),
- .data = .{ .inst = loop_start },
- });
+ // _ = try self.addInst(.{
+ // .tag = .jmp,
+ // .ops = Mir.Inst.Ops.encode(.{}),
+ // .data = .{ .inst = loop_start },
+ // });
// end:
- try self.performReloc(loop_reloc);
+ // try self.performReloc(loop_reloc);
}
fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void {
@@ -6023,14 +6083,14 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
if (off < std.math.minInt(i32) or off > std.math.maxInt(i32)) {
return self.fail("stack offset too large", .{});
}
- _ = try self.addInst(.{
- .tag = .lea,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = registerAlias(reg, abi_size),
- .reg2 = .rbp,
- }),
- .data = .{ .disp = -off },
- });
+ // _ = try self.addInst(.{
+ // .tag = .lea,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = registerAlias(reg, abi_size),
+ // .reg2 = .rbp,
+ // }),
+ // .data = .{ .disp = -off },
+ // });
},
.unreach, .none => return, // Nothing to do.
.undef => {
@@ -6046,34 +6106,30 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
}
},
.eflags => |cc| {
- _ = try self.addInst(.{
- .tag = .cond_set_byte,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = reg.to8(),
- }),
- .data = .{ .cc = cc },
- });
+ _ = cc;
+ // _ = try self.addInst(.{
+ // .tag = .cond_set_byte,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = reg.to8(),
+ // }),
+ // .data = .{ .cc = cc },
+ // });
},
.immediate => |x| {
// 32-bit moves zero-extend to 64-bit, so xoring the 32-bit
// register is the fastest way to zero a register.
if (x == 0) {
- _ = try self.addInst(.{
- .tag = .xor,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = reg.to32(),
- .reg2 = reg.to32(),
- }),
- .data = undefined,
+ try self.assemble(.xor, .{
+ .op1 = .{ .reg = reg.to32() },
+ .op2 = .{ .reg = reg.to32() },
});
return;
}
if (x <= math.maxInt(i32)) {
// Next best case: if we set the lower four bytes, the upper four will be zeroed.
- _ = try self.addInst(.{
- .tag = .mov,
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = registerAlias(reg, abi_size) }),
- .data = .{ .imm = @intCast(u32, x) },
+ try self.assemble(.mov, .{
+ .op1 = .{ .reg = registerAlias(reg, abi_size) },
+ .op2 = .{ .imm = Mir.Operand.Immediate.u(@intCast(u32, x)) },
});
return;
}
@@ -6084,12 +6140,12 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
// This encoding is, in fact, the *same* as the one used for 32-bit loads. The only
// difference is that we set REX.W before the instruction, which extends the load to
// 64-bit and uses the full bit-width of the register.
- const payload = try self.addExtra(Mir.Imm64.encode(x));
- _ = try self.addInst(.{
- .tag = .movabs,
- .ops = Mir.Inst.Ops.encode(.{ .reg1 = reg.to64() }),
- .data = .{ .payload = payload },
- });
+ // const payload = try self.addExtra(Mir.Imm64.encode(x));
+ // _ = try self.addInst(.{
+ // .tag = .movabs,
+ // .ops = Mir.Inst.Ops.encode(.{ .reg1 = reg.to64() }),
+ // .data = .{ .payload = payload },
+ // });
},
.register => |src_reg| {
// If the registers are the same, nothing to do.
@@ -6100,47 +6156,47 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
.Int => switch (ty.intInfo(self.target.*).signedness) {
.signed => {
if (abi_size <= 4) {
- _ = try self.addInst(.{
- .tag = .mov_sign_extend,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = reg.to64(),
- .reg2 = registerAlias(src_reg, abi_size),
- }),
- .data = undefined,
- });
+ // _ = try self.addInst(.{
+ // .tag = .mov_sign_extend,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = reg.to64(),
+ // .reg2 = registerAlias(src_reg, abi_size),
+ // }),
+ // .data = undefined,
+ // });
return;
}
},
.unsigned => {
if (abi_size <= 2) {
- _ = try self.addInst(.{
- .tag = .mov_zero_extend,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = reg.to64(),
- .reg2 = registerAlias(src_reg, abi_size),
- }),
- .data = undefined,
- });
+ // _ = try self.addInst(.{
+ // .tag = .mov_zero_extend,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = reg.to64(),
+ // .reg2 = registerAlias(src_reg, abi_size),
+ // }),
+ // .data = undefined,
+ // });
return;
}
},
},
.Float => {
if (intrinsicsAllowed(self.target.*, ty)) {
- const tag: Mir.Inst.Tag = switch (ty.tag()) {
- .f32 => Mir.Inst.Tag.mov_f32,
- .f64 => Mir.Inst.Tag.mov_f64,
- else => return self.fail("TODO genSetReg from register for {}", .{ty.fmtDebug()}),
- };
- _ = try self.addInst(.{
- .tag = tag,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = reg.to128(),
- .reg2 = src_reg.to128(),
- .flags = 0b10,
- }),
- .data = undefined,
- });
+ // const tag: Mir.Inst.Tag = switch (ty.tag()) {
+ // .f32 => Mir.Inst.Tag.mov_f32,
+ // .f64 => Mir.Inst.Tag.mov_f64,
+ // else => return self.fail("TODO genSetReg from register for {}", .{ty.fmtDebug()}),
+ // };
+ // _ = try self.addInst(.{
+ // .tag = tag,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = reg.to128(),
+ // .reg2 = src_reg.to128(),
+ // .flags = 0b10,
+ // }),
+ // .data = undefined,
+ // });
return;
}
@@ -6149,14 +6205,14 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
else => {},
}
- _ = try self.addInst(.{
- .tag = .mov,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = registerAlias(reg, abi_size),
- .reg2 = registerAlias(src_reg, abi_size),
- }),
- .data = undefined,
- });
+ // _ = try self.addInst(.{
+ // .tag = .mov,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = registerAlias(reg, abi_size),
+ // .reg2 = registerAlias(src_reg, abi_size),
+ // }),
+ // .data = undefined,
+ // });
},
.linker_load => {
switch (ty.zigTypeTag()) {
@@ -6165,24 +6221,24 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
try self.loadMemPtrIntoRegister(base_reg, Type.usize, mcv);
if (intrinsicsAllowed(self.target.*, ty)) {
- const tag: Mir.Inst.Tag = switch (ty.tag()) {
- .f32 => Mir.Inst.Tag.mov_f32,
- .f64 => Mir.Inst.Tag.mov_f64,
- else => return self.fail("TODO genSetReg from memory for {}", .{ty.fmtDebug()}),
- };
-
- _ = try self.addInst(.{
- .tag = tag,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = reg.to128(),
- .reg2 = switch (ty.tag()) {
- .f32 => base_reg.to32(),
- .f64 => base_reg.to64(),
- else => unreachable,
- },
- }),
- .data = .{ .disp = 0 },
- });
+ // const tag: Mir.Inst.Tag = switch (ty.tag()) {
+ // .f32 => Mir.Inst.Tag.mov_f32,
+ // .f64 => Mir.Inst.Tag.mov_f64,
+ // else => return self.fail("TODO genSetReg from memory for {}", .{ty.fmtDebug()}),
+ // };
+
+ // _ = try self.addInst(.{
+ // .tag = tag,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = reg.to128(),
+ // .reg2 = switch (ty.tag()) {
+ // .f32 => base_reg.to32(),
+ // .f64 => base_reg.to64(),
+ // else => unreachable,
+ // },
+ // }),
+ // .data = .{ .disp = 0 },
+ // });
return;
}
@@ -6190,15 +6246,15 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
},
else => {
try self.loadMemPtrIntoRegister(reg, Type.usize, mcv);
- _ = try self.addInst(.{
- .tag = .mov,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = registerAlias(reg, abi_size),
- .reg2 = reg.to64(),
- .flags = 0b01,
- }),
- .data = .{ .disp = 0 },
- });
+ // _ = try self.addInst(.{
+ // .tag = .mov,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = registerAlias(reg, abi_size),
+ // .reg2 = reg.to64(),
+ // .flags = 0b01,
+ // }),
+ // .data = .{ .disp = 0 },
+ // });
},
}
},
@@ -6208,24 +6264,24 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
try self.loadMemPtrIntoRegister(base_reg, Type.usize, mcv);
if (intrinsicsAllowed(self.target.*, ty)) {
- const tag: Mir.Inst.Tag = switch (ty.tag()) {
- .f32 => Mir.Inst.Tag.mov_f32,
- .f64 => Mir.Inst.Tag.mov_f64,
- else => return self.fail("TODO genSetReg from memory for {}", .{ty.fmtDebug()}),
- };
-
- _ = try self.addInst(.{
- .tag = tag,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = reg.to128(),
- .reg2 = switch (ty.tag()) {
- .f32 => base_reg.to32(),
- .f64 => base_reg.to64(),
- else => unreachable,
- },
- }),
- .data = .{ .disp = 0 },
- });
+ // const tag: Mir.Inst.Tag = switch (ty.tag()) {
+ // .f32 => Mir.Inst.Tag.mov_f32,
+ // .f64 => Mir.Inst.Tag.mov_f64,
+ // else => return self.fail("TODO genSetReg from memory for {}", .{ty.fmtDebug()}),
+ // };
+
+ // _ = try self.addInst(.{
+ // .tag = tag,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = reg.to128(),
+ // .reg2 = switch (ty.tag()) {
+ // .f32 => base_reg.to32(),
+ // .f64 => base_reg.to64(),
+ // else => unreachable,
+ // },
+ // }),
+ // .data = .{ .disp = 0 },
+ // });
return;
}
@@ -6234,42 +6290,42 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
else => {
if (x <= math.maxInt(i32)) {
// mov reg, [ds:imm32]
- _ = try self.addInst(.{
- .tag = .mov,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = registerAlias(reg, abi_size),
- .flags = 0b01,
- }),
- .data = .{ .disp = @intCast(i32, x) },
- });
+ // _ = try self.addInst(.{
+ // .tag = .mov,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = registerAlias(reg, abi_size),
+ // .flags = 0b01,
+ // }),
+ // .data = .{ .disp = @intCast(i32, x) },
+ // });
} else {
// If this is RAX, we can use a direct load.
// Otherwise, we need to load the address, then indirectly load the value.
if (reg.id() == 0) {
// movabs rax, ds:moffs64
- const payload = try self.addExtra(Mir.Imm64.encode(x));
- _ = try self.addInst(.{
- .tag = .movabs,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = .rax,
- .flags = 0b01, // imm64 will become moffs64
- }),
- .data = .{ .payload = payload },
- });
+ // const payload = try self.addExtra(Mir.Imm64.encode(x));
+ // _ = try self.addInst(.{
+ // .tag = .movabs,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = .rax,
+ // .flags = 0b01, // imm64 will become moffs64
+ // }),
+ // .data = .{ .payload = payload },
+ // });
} else {
// Rather than duplicate the logic used for the move, we just use a self-call with a new MCValue.
try self.genSetReg(ty, reg, MCValue{ .immediate = x });
// mov reg, [reg + 0x0]
- _ = try self.addInst(.{
- .tag = .mov,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = registerAlias(reg, abi_size),
- .reg2 = reg.to64(),
- .flags = 0b01,
- }),
- .data = .{ .disp = 0 },
- });
+ // _ = try self.addInst(.{
+ // .tag = .mov,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = registerAlias(reg, abi_size),
+ // .reg2 = reg.to64(),
+ // .flags = 0b01,
+ // }),
+ // .data = .{ .disp = 0 },
+ // });
}
}
},
@@ -6289,15 +6345,16 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
4 => 0b11,
else => unreachable,
};
- _ = try self.addInst(.{
- .tag = .mov_sign_extend,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = reg.to64(),
- .reg2 = .rbp,
- .flags = flags,
- }),
- .data = .{ .disp = -off },
- });
+ _ = flags;
+ // _ = try self.addInst(.{
+ // .tag = .mov_sign_extend,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = reg.to64(),
+ // .reg2 = .rbp,
+ // .flags = flags,
+ // }),
+ // .data = .{ .disp = -off },
+ // });
return;
}
},
@@ -6308,38 +6365,39 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
2 => 0b10,
else => unreachable,
};
- _ = try self.addInst(.{
- .tag = .mov_zero_extend,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = reg.to64(),
- .reg2 = .rbp,
- .flags = flags,
- }),
- .data = .{ .disp = -off },
- });
+ _ = flags;
+ // _ = try self.addInst(.{
+ // .tag = .mov_zero_extend,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = reg.to64(),
+ // .reg2 = .rbp,
+ // .flags = flags,
+ // }),
+ // .data = .{ .disp = -off },
+ // });
return;
}
},
},
.Float => {
if (intrinsicsAllowed(self.target.*, ty)) {
- const tag: Mir.Inst.Tag = switch (ty.tag()) {
- .f32 => Mir.Inst.Tag.mov_f32,
- .f64 => Mir.Inst.Tag.mov_f64,
- else => return self.fail("TODO genSetReg from stack offset for {}", .{ty.fmtDebug()}),
- };
- _ = try self.addInst(.{
- .tag = tag,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = reg.to128(),
- .reg2 = switch (ty.tag()) {
- .f32 => .ebp,
- .f64 => .rbp,
- else => unreachable,
- },
- }),
- .data = .{ .disp = -off },
- });
+ // const tag: Mir.Inst.Tag = switch (ty.tag()) {
+ // .f32 => Mir.Inst.Tag.mov_f32,
+ // .f64 => Mir.Inst.Tag.mov_f64,
+ // else => return self.fail("TODO genSetReg from stack offset for {}", .{ty.fmtDebug()}),
+ // };
+ // _ = try self.addInst(.{
+ // .tag = tag,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = reg.to128(),
+ // .reg2 = switch (ty.tag()) {
+ // .f32 => .ebp,
+ // .f64 => .rbp,
+ // else => unreachable,
+ // },
+ // }),
+ // .data = .{ .disp = -off },
+ // });
return;
}
return self.fail("TODO genSetReg from stack offset for float with no intrinsics", .{});
@@ -6347,15 +6405,15 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void
else => {},
}
- _ = try self.addInst(.{
- .tag = .mov,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = registerAlias(reg, abi_size),
- .reg2 = .rbp,
- .flags = 0b01,
- }),
- .data = .{ .disp = -off },
- });
+ // _ = try self.addInst(.{
+ // .tag = .mov,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = registerAlias(reg, abi_size),
+ // .reg2 = .rbp,
+ // .flags = 0b01,
+ // }),
+ // .data = .{ .disp = -off },
+ // });
},
}
}
@@ -6419,6 +6477,7 @@ fn airFloatToInt(self: *Self, inst: Air.Inst.Index) !void {
const src_ty = self.air.typeOf(ty_op.operand);
const dst_ty = self.air.typeOfIndex(inst);
const operand = try self.resolveInst(ty_op.operand);
+ _ = dst_ty;
// move float src to ST(0)
const stack_offset = switch (operand) {
@@ -6433,34 +6492,35 @@ fn airFloatToInt(self: *Self, inst: Air.Inst.Index) !void {
break :blk offset;
},
};
- _ = try self.addInst(.{
- .tag = .fld,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = .rbp,
- .flags = switch (src_ty.abiSize(self.target.*)) {
- 4 => 0b01,
- 8 => 0b10,
- else => |size| return self.fail("TODO load ST(0) with abiSize={}", .{size}),
- },
- }),
- .data = .{ .disp = -stack_offset },
- });
+ _ = stack_offset;
+ // _ = try self.addInst(.{
+ // .tag = .fld,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = .rbp,
+ // .flags = switch (src_ty.abiSize(self.target.*)) {
+ // 4 => 0b01,
+ // 8 => 0b10,
+ // else => |size| return self.fail("TODO load ST(0) with abiSize={}", .{size}),
+ // },
+ // }),
+ // .data = .{ .disp = -stack_offset },
+ // });
// convert
const stack_dst = try self.allocRegOrMem(inst, false);
- _ = try self.addInst(.{
- .tag = .fisttp,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = .rbp,
- .flags = switch (dst_ty.abiSize(self.target.*)) {
- 1...2 => 0b00,
- 3...4 => 0b01,
- 5...8 => 0b10,
- else => |size| return self.fail("TODO convert float with abiSize={}", .{size}),
- },
- }),
- .data = .{ .disp = -stack_dst.stack_offset },
- });
+ // _ = try self.addInst(.{
+ // .tag = .fisttp,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = .rbp,
+ // .flags = switch (dst_ty.abiSize(self.target.*)) {
+ // 1...2 => 0b00,
+ // 3...4 => 0b01,
+ // 5...8 => 0b10,
+ // else => |size| return self.fail("TODO convert float with abiSize={}", .{size}),
+ // },
+ // }),
+ // .data = .{ .disp = -stack_dst.stack_offset },
+ // });
return self.finishAir(inst, stack_dst, .{ ty_op.operand, .none, .none });
}
@@ -6551,15 +6611,15 @@ fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void {
.linker_load, .memory => {
const reg = try self.register_manager.allocReg(null, gp);
try self.loadMemPtrIntoRegister(reg, src_ty, src_ptr);
- _ = try self.addInst(.{
- .tag = .mov,
- .ops = Mir.Inst.Ops.encode(.{
- .reg1 = reg,
- .reg2 = reg,
- .flags = 0b01,
- }),
- .data = .{ .disp = 0 },
- });
+ // _ = try self.addInst(.{
+ // .tag = .mov,
+ // .ops = Mir.Inst.Ops.encode(.{
+ // .reg1 = reg,
+ // .reg2 = reg,
+ // .flags = 0b01,
+ // }),
+ // .data = .{ .disp = 0 },
+ // });
break :blk MCValue{ .register = reg };
},
else => break :blk src_ptr,
src/arch/x86_64/Emit.zig
@@ -71,124 +71,53 @@ pub fn lowerMir(emit: *Emit) InnerError!void {
const inst = @intCast(u32, index);
try emit.code_offset_mapping.putNoClobber(emit.bin_file.allocator, inst, emit.code.items.len);
switch (tag) {
- // GPR instructions
- .adc => try emit.mirArith(.adc, inst),
- .add => try emit.mirArith(.add, inst),
- .sub => try emit.mirArith(.sub, inst),
- .xor => try emit.mirArith(.xor, inst),
- .@"and" => try emit.mirArith(.@"and", inst),
- .@"or" => try emit.mirArith(.@"or", inst),
- .sbb => try emit.mirArith(.sbb, inst),
- .cmp => try emit.mirArith(.cmp, inst),
- .mov => try emit.mirArith(.mov, inst),
-
- .adc_mem_imm => try emit.mirArithMemImm(.adc, inst),
- .add_mem_imm => try emit.mirArithMemImm(.add, inst),
- .sub_mem_imm => try emit.mirArithMemImm(.sub, inst),
- .xor_mem_imm => try emit.mirArithMemImm(.xor, inst),
- .and_mem_imm => try emit.mirArithMemImm(.@"and", inst),
- .or_mem_imm => try emit.mirArithMemImm(.@"or", inst),
- .sbb_mem_imm => try emit.mirArithMemImm(.sbb, inst),
- .cmp_mem_imm => try emit.mirArithMemImm(.cmp, inst),
- .mov_mem_imm => try emit.mirArithMemImm(.mov, inst),
-
- .adc_scale_src => try emit.mirArithScaleSrc(.adc, inst),
- .add_scale_src => try emit.mirArithScaleSrc(.add, inst),
- .sub_scale_src => try emit.mirArithScaleSrc(.sub, inst),
- .xor_scale_src => try emit.mirArithScaleSrc(.xor, inst),
- .and_scale_src => try emit.mirArithScaleSrc(.@"and", inst),
- .or_scale_src => try emit.mirArithScaleSrc(.@"or", inst),
- .sbb_scale_src => try emit.mirArithScaleSrc(.sbb, inst),
- .cmp_scale_src => try emit.mirArithScaleSrc(.cmp, inst),
- .mov_scale_src => try emit.mirArithScaleSrc(.mov, inst),
-
- .adc_scale_dst => try emit.mirArithScaleDst(.adc, inst),
- .add_scale_dst => try emit.mirArithScaleDst(.add, inst),
- .sub_scale_dst => try emit.mirArithScaleDst(.sub, inst),
- .xor_scale_dst => try emit.mirArithScaleDst(.xor, inst),
- .and_scale_dst => try emit.mirArithScaleDst(.@"and", inst),
- .or_scale_dst => try emit.mirArithScaleDst(.@"or", inst),
- .sbb_scale_dst => try emit.mirArithScaleDst(.sbb, inst),
- .cmp_scale_dst => try emit.mirArithScaleDst(.cmp, inst),
- .mov_scale_dst => try emit.mirArithScaleDst(.mov, inst),
-
- .adc_scale_imm => try emit.mirArithScaleImm(.adc, inst),
- .add_scale_imm => try emit.mirArithScaleImm(.add, inst),
- .sub_scale_imm => try emit.mirArithScaleImm(.sub, inst),
- .xor_scale_imm => try emit.mirArithScaleImm(.xor, inst),
- .and_scale_imm => try emit.mirArithScaleImm(.@"and", inst),
- .or_scale_imm => try emit.mirArithScaleImm(.@"or", inst),
- .sbb_scale_imm => try emit.mirArithScaleImm(.sbb, inst),
- .cmp_scale_imm => try emit.mirArithScaleImm(.cmp, inst),
- .mov_scale_imm => try emit.mirArithScaleImm(.mov, inst),
-
- .adc_mem_index_imm => try emit.mirArithMemIndexImm(.adc, inst),
- .add_mem_index_imm => try emit.mirArithMemIndexImm(.add, inst),
- .sub_mem_index_imm => try emit.mirArithMemIndexImm(.sub, inst),
- .xor_mem_index_imm => try emit.mirArithMemIndexImm(.xor, inst),
- .and_mem_index_imm => try emit.mirArithMemIndexImm(.@"and", inst),
- .or_mem_index_imm => try emit.mirArithMemIndexImm(.@"or", inst),
- .sbb_mem_index_imm => try emit.mirArithMemIndexImm(.sbb, inst),
- .cmp_mem_index_imm => try emit.mirArithMemIndexImm(.cmp, inst),
- .mov_mem_index_imm => try emit.mirArithMemIndexImm(.mov, inst),
-
- .mov_sign_extend => try emit.mirMovSignExtend(inst),
- .mov_zero_extend => try emit.mirMovZeroExtend(inst),
-
- .movabs => try emit.mirMovabs(inst),
-
- .fisttp => try emit.mirFisttp(inst),
- .fld => try emit.mirFld(inst),
-
- .lea => try emit.mirLea(inst),
- .lea_pic => try emit.mirLeaPic(inst),
-
- .shl => try emit.mirShift(.shl, inst),
- .sal => try emit.mirShift(.sal, inst),
- .shr => try emit.mirShift(.shr, inst),
- .sar => try emit.mirShift(.sar, inst),
-
- .imul => try emit.mirMulDiv(.imul, inst),
- .mul => try emit.mirMulDiv(.mul, inst),
- .idiv => try emit.mirMulDiv(.idiv, inst),
- .div => try emit.mirMulDiv(.div, inst),
- .imul_complex => try emit.mirIMulComplex(inst),
-
- .cwd => try emit.mirCwd(inst),
-
- .push => try emit.mirPushPop(.push, inst),
- .pop => try emit.mirPushPop(.pop, inst),
-
- .jmp => try emit.mirJmpCall(.jmp, inst),
- .call => try emit.mirJmpCall(.call, inst),
-
- .cond_jmp => try emit.mirCondJmp(inst),
- .cond_set_byte => try emit.mirCondSetByte(inst),
- .cond_mov => try emit.mirCondMov(inst),
-
- .ret => try emit.mirRet(inst),
-
- .syscall => try emit.mirSyscall(),
-
- .@"test" => try emit.mirTest(inst),
-
- .ud => try emit.mirUndefinedInstruction(),
- .interrupt => try emit.mirInterrupt(inst),
- .nop => {}, // just skip it
-
- // SSE/AVX instructions
- .mov_f64 => try emit.mirMovFloat(.movsd, inst),
- .mov_f32 => try emit.mirMovFloat(.movss, inst),
-
- .add_f64 => try emit.mirAddFloat(.addsd, inst),
- .add_f32 => try emit.mirAddFloat(.addss, inst),
-
- .cmp_f64 => try emit.mirCmpFloat(.ucomisd, inst),
- .cmp_f32 => try emit.mirCmpFloat(.ucomiss, inst),
+ .adc,
+ .add,
+ .@"and",
+ .cbw,
+ .cwde,
+ .cdqe,
+ .cwd,
+ .cdq,
+ .cqo,
+ .cmp,
+ .div,
+ .fisttp,
+ .fld,
+ .idiv,
+ .imul,
+ .int3,
+ .mov,
+ .movsx,
+ .movzx,
+ .mul,
+ .nop,
+ .@"or",
+ .pop,
+ .push,
+ .ret,
+ .sal,
+ .sar,
+ .sbb,
+ .shl,
+ .shr,
+ .sub,
+ .syscall,
+ .@"test",
+ .ud2,
+ .xor,
+
+ .addss,
+ .cmpss,
+ .movss,
+ .ucomiss,
+ .addsd,
+ .cmpsd,
+ .movsd,
+ .ucomisd,
+ => try emit.mirEncodeGeneric(tag, inst),
// Pseudo-instructions
- .call_extern => try emit.mirCallExtern(inst),
-
.dbg_line => try emit.mirDbgLine(inst),
.dbg_prologue_end => try emit.mirDbgPrologueEnd(inst),
.dbg_epilogue_begin => try emit.mirDbgEpilogueBegin(inst),
@@ -196,9 +125,7 @@ pub fn lowerMir(emit: *Emit) InnerError!void {
.push_regs => try emit.mirPushPopRegisterList(.push, inst),
.pop_regs => try emit.mirPushPopRegisterList(.pop, inst),
- else => {
- return emit.fail("Implement MIR->Emit lowering for x86_64 for pseudo-inst: {}", .{tag});
- },
+ else => return emit.fail("Implement MIR->Emit lowering for x86_64 for pseudo-inst: {}", .{tag}),
}
}
@@ -246,66 +173,57 @@ fn encode(emit: *Emit, mnemonic: Instruction.Mnemonic, ops: struct {
return inst.encode(emit.code.writer());
}
-fn mirUndefinedInstruction(emit: *Emit) InnerError!void {
- return emit.encode(.ud2, .{});
-}
-
-fn mirInterrupt(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
- const tag = emit.mir.instructions.items(.tag)[inst];
- assert(tag == .interrupt);
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- switch (ops.flags) {
- 0b00 => return emit.encode(.int3, .{}),
- else => return emit.fail("TODO handle variant 0b{b} of interrupt instruction", .{ops.flags}),
- }
-}
-
-fn mirSyscall(emit: *Emit) InnerError!void {
- return emit.encode(.syscall, .{});
-}
+fn mirEncodeGeneric(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) InnerError!void {
+ const mnemonic = inline for (@typeInfo(Instruction.Mnemonic).Enum.fields) |field| {
+ if (mem.eql(u8, field.name, @tagName(tag))) break @field(Instruction.Mnemonic, field.name);
+ } else unreachable;
-fn mirPushPop(emit: *Emit, mnemonic: Instruction.Mnemonic, inst: Mir.Inst.Index) InnerError!void {
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- switch (ops.flags) {
- 0b00 => {
- return emit.encode(mnemonic, .{
- .op1 = .{ .reg = ops.reg1 },
- });
+ var operands = [4]Instruction.Operand{ .none, .none, .none, .none };
+ const ops = emit.mir.instructions.items(.ops)[inst];
+ const data = emit.mir.instructions.items(.data)[inst];
+ switch (ops) {
+ .none => {},
+ .imm_s => operands[0] = .{ .imm = Immediate.s(data.imm_s) },
+ .imm_u => operands[0] = .{ .imm = Immediate.u(data.imm_u) },
+ .r => operands[0] = .{ .reg = data.r },
+ .rr => operands[0..2].* = .{
+ .{ .reg = data.rr.r1 },
+ .{ .reg = data.rr.r2 },
},
- 0b01 => {
- const disp = emit.mir.instructions.items(.data)[inst].disp;
- return emit.encode(mnemonic, .{
- .op1 = .{ .mem = Memory.sib(.qword, .{
- .base = ops.reg1,
- .disp = disp,
- }) },
- });
+ .ri_s => operands[0..2].* = .{
+ .{ .reg = data.ri_s.r1 },
+ .{ .imm = Immediate.s(data.ri_s.imm) },
},
- 0b10 => {
- const imm = emit.mir.instructions.items(.data)[inst].imm;
- return emit.encode(.push, .{
- .op1 = .{ .imm = Immediate.u(imm) },
- });
+ .ri_u => operands[0..2].* = .{
+ .{ .reg = data.ri_u.r1 },
+ .{ .imm = Immediate.u(data.ri_u.imm) },
},
- 0b11 => unreachable,
+ else => unreachable,
}
+
+ return emit.encode(mnemonic, .{
+ .op1 = operands[0],
+ .op2 = operands[1],
+ .op3 = operands[2],
+ .op4 = operands[3],
+ });
}
-fn mirPushPopRegisterList(emit: *Emit, mnemonic: Instruction.Mnemonic, inst: Mir.Inst.Index) InnerError!void {
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
+fn mirPushPopRegisterList(emit: *Emit, tag: Mir.Inst.Tag, inst: Mir.Inst.Index) InnerError!void {
const payload = emit.mir.instructions.items(.data)[inst].payload;
const save_reg_list = emit.mir.extraData(Mir.SaveRegisterList, payload).data;
+ const base = @intToEnum(Register, save_reg_list.base_reg);
var disp: i32 = -@intCast(i32, save_reg_list.stack_end);
const reg_list = Mir.RegisterList.fromInt(save_reg_list.register_list);
const callee_preserved_regs = abi.getCalleePreservedRegs(emit.target.*);
for (callee_preserved_regs) |reg| {
if (reg_list.isSet(callee_preserved_regs, reg)) {
const op1: Instruction.Operand = .{ .mem = Memory.sib(.qword, .{
- .base = ops.reg1,
+ .base = base,
.disp = disp,
}) };
const op2: Instruction.Operand = .{ .reg = reg };
- switch (mnemonic) {
+ switch (tag) {
.push => try emit.encode(.mov, .{
.op1 = op1,
.op2 = op2,
@@ -321,858 +239,345 @@ fn mirPushPopRegisterList(emit: *Emit, mnemonic: Instruction.Mnemonic, inst: Mir
}
}
-fn mirJmpCall(emit: *Emit, mnemonic: Instruction.Mnemonic, inst: Mir.Inst.Index) InnerError!void {
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- switch (ops.flags) {
- 0b00 => {
- const target = emit.mir.instructions.items(.data)[inst].inst;
- const source = emit.code.items.len;
- try emit.encode(mnemonic, .{
- .op1 = .{ .imm = Immediate.s(0) },
- });
- try emit.relocs.append(emit.bin_file.allocator, .{
- .source = source,
- .target = target,
- .offset = emit.code.items.len - 4,
- .length = 5,
- });
- },
- 0b01 => {
- if (ops.reg1 == .none) {
- const disp = emit.mir.instructions.items(.data)[inst].disp;
- return emit.encode(mnemonic, .{
- .op1 = .{ .mem = Memory.sib(.qword, .{ .disp = disp }) },
- });
- }
- return emit.encode(mnemonic, .{
- .op1 = .{ .reg = ops.reg1 },
- });
- },
- 0b10 => {
- const disp = emit.mir.instructions.items(.data)[inst].disp;
- return emit.encode(mnemonic, .{
- .op1 = .{ .mem = Memory.sib(.qword, .{
- .base = ops.reg1,
- .disp = disp,
- }) },
- });
- },
- 0b11 => return emit.fail("TODO unused variant jmp/call 0b11", .{}),
- }
-}
-
-fn mirCondJmp(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
- const tag = emit.mir.instructions.items(.tag)[inst];
- assert(tag == .cond_jmp);
- const inst_cc = emit.mir.instructions.items(.data)[inst].inst_cc;
- const mnemonic: Instruction.Mnemonic = switch (inst_cc.cc) {
- .a => .ja,
- .ae => .jae,
- .b => .jb,
- .be => .jbe,
- .c => .jc,
- .e => .je,
- .g => .jg,
- .ge => .jge,
- .l => .jl,
- .le => .jle,
- .na => .jna,
- .nae => .jnae,
- .nb => .jnb,
- .nbe => .jnbe,
- .nc => .jnc,
- .ne => .jne,
- .ng => .jng,
- .nge => .jnge,
- .nl => .jnl,
- .nle => .jnle,
- .no => .jno,
- .np => .jnp,
- .ns => .jns,
- .nz => .jnz,
- .o => .jo,
- .p => .jp,
- .pe => .jpe,
- .po => .jpo,
- .s => .js,
- .z => .jz,
- };
- const source = emit.code.items.len;
- try emit.encode(mnemonic, .{
- .op1 = .{ .imm = Immediate.s(0) },
- });
- try emit.relocs.append(emit.bin_file.allocator, .{
- .source = source,
- .target = inst_cc.inst,
- .offset = emit.code.items.len - 4,
- .length = 6,
- });
-}
-
-fn mirCondSetByte(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
- const tag = emit.mir.instructions.items(.tag)[inst];
- assert(tag == .cond_set_byte);
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- const cc = emit.mir.instructions.items(.data)[inst].cc;
- const mnemonic: Instruction.Mnemonic = switch (cc) {
- .a => .seta,
- .ae => .setae,
- .b => .setb,
- .be => .setbe,
- .c => .setc,
- .e => .sete,
- .g => .setg,
- .ge => .setge,
- .l => .setl,
- .le => .setle,
- .na => .setna,
- .nae => .setnae,
- .nb => .setnb,
- .nbe => .setnbe,
- .nc => .setnc,
- .ne => .setne,
- .ng => .setng,
- .nge => .setnge,
- .nl => .setnl,
- .nle => .setnle,
- .no => .setno,
- .np => .setnp,
- .ns => .setns,
- .nz => .setnz,
- .o => .seto,
- .p => .setp,
- .pe => .setpe,
- .po => .setpo,
- .s => .sets,
- .z => .setz,
- };
- return emit.encode(mnemonic, .{ .op1 = .{ .reg = ops.reg1 } });
-}
-
-fn mirCondMov(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
- const tag = emit.mir.instructions.items(.tag)[inst];
- assert(tag == .cond_mov);
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- const cc = emit.mir.instructions.items(.data)[inst].cc;
- const mnemonic: Instruction.Mnemonic = switch (cc) {
- .a => .cmova,
- .ae => .cmovae,
- .b => .cmovb,
- .be => .cmovbe,
- .c => .cmovc,
- .e => .cmove,
- .g => .cmovg,
- .ge => .cmovge,
- .l => .cmovl,
- .le => .cmovle,
- .na => .cmovna,
- .nae => .cmovnae,
- .nb => .cmovnb,
- .nbe => .cmovnbe,
- .nc => .cmovnc,
- .ne => .cmovne,
- .ng => .cmovng,
- .nge => .cmovnge,
- .nl => .cmovnl,
- .nle => .cmovnle,
- .no => .cmovno,
- .np => .cmovnp,
- .ns => .cmovns,
- .nz => .cmovnz,
- .o => .cmovo,
- .p => .cmovp,
- .pe => .cmovpe,
- .po => .cmovpo,
- .s => .cmovs,
- .z => .cmovz,
- };
- const op1: Instruction.Operand = .{ .reg = ops.reg1 };
-
- if (ops.flags == 0b00) {
- return emit.encode(mnemonic, .{
- .op1 = op1,
- .op2 = .{ .reg = ops.reg2 },
- });
- }
- const disp = emit.mir.instructions.items(.data)[inst].disp;
- const ptr_size: Memory.PtrSize = switch (ops.flags) {
- 0b00 => unreachable,
- 0b01 => .word,
- 0b10 => .dword,
- 0b11 => .qword,
- };
- return emit.encode(mnemonic, .{
- .op1 = op1,
- .op2 = .{ .mem = Memory.sib(ptr_size, .{
- .base = ops.reg2,
- .disp = disp,
- }) },
- });
-}
-
-fn mirTest(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
- const tag = emit.mir.instructions.items(.tag)[inst];
- assert(tag == .@"test");
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- switch (ops.flags) {
- 0b00 => {
- if (ops.reg2 == .none) {
- const imm = emit.mir.instructions.items(.data)[inst].imm;
- return emit.encode(.@"test", .{
- .op1 = .{ .reg = ops.reg1 },
- .op2 = .{ .imm = Immediate.u(imm) },
- });
- }
- return emit.encode(.@"test", .{
- .op1 = .{ .reg = ops.reg1 },
- .op2 = .{ .reg = ops.reg2 },
- });
- },
- else => return emit.fail("TODO more TEST alternatives", .{}),
- }
-}
-
-fn mirRet(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
- const tag = emit.mir.instructions.items(.tag)[inst];
- assert(tag == .ret);
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- switch (ops.flags) {
- 0b00 => unreachable,
- 0b01 => unreachable,
- 0b10 => {
- const imm = emit.mir.instructions.items(.data)[inst].imm;
- return emit.encode(.ret, .{
- .op1 = .{ .imm = Immediate.u(imm) },
- });
- },
- 0b11 => {
- return emit.encode(.ret, .{});
- },
- }
-}
-
-fn mirArith(emit: *Emit, mnemonic: Instruction.Mnemonic, inst: Mir.Inst.Index) InnerError!void {
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- switch (ops.flags) {
- 0b00 => {
- if (ops.reg2 == .none) {
- const imm = emit.mir.instructions.items(.data)[inst].imm;
- return emit.encode(mnemonic, .{
- .op1 = .{ .reg = ops.reg1 },
- .op2 = .{ .imm = Immediate.u(imm) },
- });
- }
- return emit.encode(mnemonic, .{
- .op1 = .{ .reg = ops.reg1 },
- .op2 = .{ .reg = ops.reg2 },
- });
- },
- 0b01 => {
- const disp = emit.mir.instructions.items(.data)[inst].disp;
- const base: ?Register = if (ops.reg2 != .none) ops.reg2 else null;
- return emit.encode(mnemonic, .{
- .op1 = .{ .reg = ops.reg1 },
- .op2 = .{ .mem = Memory.sib(Memory.PtrSize.fromBitSize(ops.reg1.bitSize()), .{
- .base = base,
- .disp = disp,
- }) },
- });
- },
- 0b10 => {
- if (ops.reg2 == .none) {
- return emit.fail("TODO unused variant: mov reg1, none, 0b10", .{});
- }
- const disp = emit.mir.instructions.items(.data)[inst].disp;
- return emit.encode(mnemonic, .{
- .op1 = .{ .mem = Memory.sib(Memory.PtrSize.fromBitSize(ops.reg2.bitSize()), .{
- .base = ops.reg1,
- .disp = disp,
- }) },
- .op2 = .{ .reg = ops.reg2 },
- });
- },
- 0b11 => {
- const imm_s = emit.mir.instructions.items(.data)[inst].imm_s;
- return emit.encode(mnemonic, .{
- .op1 = .{ .reg = ops.reg1 },
- .op2 = .{ .imm = Immediate.s(imm_s) },
- });
- },
- }
-}
-
-fn mirArithMemImm(emit: *Emit, mnemonic: Instruction.Mnemonic, inst: Mir.Inst.Index) InnerError!void {
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- assert(ops.reg2 == .none);
- const payload = emit.mir.instructions.items(.data)[inst].payload;
- const imm_pair = emit.mir.extraData(Mir.ImmPair, payload).data;
- const ptr_size: Memory.PtrSize = switch (ops.flags) {
- 0b00 => .byte,
- 0b01 => .word,
- 0b10 => .dword,
- 0b11 => .qword,
- };
- const imm = switch (ops.flags) {
- 0b00 => @truncate(u8, imm_pair.operand),
- 0b01 => @truncate(u16, imm_pair.operand),
- 0b10, 0b11 => @truncate(u32, imm_pair.operand),
- };
- return emit.encode(mnemonic, .{
- .op1 = .{ .mem = Memory.sib(ptr_size, .{
- .disp = imm_pair.dest_off,
- .base = ops.reg1,
- }) },
- .op2 = .{ .imm = Immediate.u(imm) },
- });
-}
-
-fn mirArithScaleSrc(emit: *Emit, mnemonic: Instruction.Mnemonic, inst: Mir.Inst.Index) InnerError!void {
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- const scale = ops.flags;
- const payload = emit.mir.instructions.items(.data)[inst].payload;
- const index_reg_disp = emit.mir.extraData(Mir.IndexRegisterDisp, payload).data.decode();
- const scale_index = Memory.ScaleIndex{
- .scale = @as(u4, 1) << scale,
- .index = index_reg_disp.index,
- };
- return emit.encode(mnemonic, .{
- .op1 = .{ .reg = ops.reg1 },
- .op2 = .{ .mem = Memory.sib(Memory.PtrSize.fromBitSize(ops.reg1.bitSize()), .{
- .base = ops.reg2,
- .scale_index = scale_index,
- .disp = index_reg_disp.disp,
- }) },
- });
-}
-
-fn mirArithScaleDst(emit: *Emit, mnemonic: Instruction.Mnemonic, inst: Mir.Inst.Index) InnerError!void {
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- const scale = ops.flags;
- const payload = emit.mir.instructions.items(.data)[inst].payload;
- const index_reg_disp = emit.mir.extraData(Mir.IndexRegisterDisp, payload).data.decode();
- const scale_index = Memory.ScaleIndex{
- .scale = @as(u4, 1) << scale,
- .index = index_reg_disp.index,
- };
- assert(ops.reg2 != .none);
- return emit.encode(mnemonic, .{
- .op1 = .{ .mem = Memory.sib(Memory.PtrSize.fromBitSize(ops.reg2.bitSize()), .{
- .base = ops.reg1,
- .scale_index = scale_index,
- .disp = index_reg_disp.disp,
- }) },
- .op2 = .{ .reg = ops.reg2 },
- });
-}
-
-fn mirArithScaleImm(emit: *Emit, mnemonic: Instruction.Mnemonic, inst: Mir.Inst.Index) InnerError!void {
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- const scale = ops.flags;
- const payload = emit.mir.instructions.items(.data)[inst].payload;
- const index_reg_disp_imm = emit.mir.extraData(Mir.IndexRegisterDispImm, payload).data.decode();
- const scale_index = Memory.ScaleIndex{
- .scale = @as(u4, 1) << scale,
- .index = index_reg_disp_imm.index,
- };
- return emit.encode(mnemonic, .{
- .op1 = .{ .mem = Memory.sib(.qword, .{
- .base = ops.reg1,
- .disp = index_reg_disp_imm.disp,
- .scale_index = scale_index,
- }) },
- .op2 = .{ .imm = Immediate.u(index_reg_disp_imm.imm) },
- });
-}
-
-fn mirArithMemIndexImm(emit: *Emit, mnemonic: Instruction.Mnemonic, inst: Mir.Inst.Index) InnerError!void {
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- assert(ops.reg2 == .none);
- const payload = emit.mir.instructions.items(.data)[inst].payload;
- const index_reg_disp_imm = emit.mir.extraData(Mir.IndexRegisterDispImm, payload).data.decode();
- const ptr_size: Memory.PtrSize = switch (ops.flags) {
- 0b00 => .byte,
- 0b01 => .word,
- 0b10 => .dword,
- 0b11 => .qword,
- };
- const scale_index = Memory.ScaleIndex{
- .scale = 1,
- .index = index_reg_disp_imm.index,
- };
- return emit.encode(mnemonic, .{
- .op1 = .{ .mem = Memory.sib(ptr_size, .{
- .disp = index_reg_disp_imm.disp,
- .base = ops.reg1,
- .scale_index = scale_index,
- }) },
- .op2 = .{ .imm = Immediate.u(index_reg_disp_imm.imm) },
- });
-}
-
-fn mirMovSignExtend(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
- const tag = emit.mir.instructions.items(.tag)[inst];
- assert(tag == .mov_sign_extend);
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- const disp = if (ops.flags != 0b00) emit.mir.instructions.items(.data)[inst].disp else undefined;
- switch (ops.flags) {
- 0b00 => {
- const mnemonic: Instruction.Mnemonic = if (ops.reg2.bitSize() == 32) .movsxd else .movsx;
- return emit.encode(mnemonic, .{
- .op1 = .{ .reg = ops.reg1 },
- .op2 = .{ .reg = ops.reg2 },
- });
- },
- else => {
- const ptr_size: Memory.PtrSize = switch (ops.flags) {
- 0b01 => .byte,
- 0b10 => .word,
- 0b11 => .dword,
- else => unreachable,
- };
- const mnemonic: Instruction.Mnemonic = if (ops.flags == 0b11) .movsxd else .movsx;
- return emit.encode(mnemonic, .{
- .op1 = .{ .reg = ops.reg1 },
- .op2 = .{ .mem = Memory.sib(ptr_size, .{
- .base = ops.reg2,
- .disp = disp,
- }) },
- });
- },
- }
-}
-
-fn mirMovZeroExtend(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
- const tag = emit.mir.instructions.items(.tag)[inst];
- assert(tag == .mov_zero_extend);
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- const disp = if (ops.flags != 0b00) emit.mir.instructions.items(.data)[inst].disp else undefined;
- switch (ops.flags) {
- 0b00 => {
- return emit.encode(.movzx, .{
- .op1 = .{ .reg = ops.reg1 },
- .op2 = .{ .reg = ops.reg2 },
- });
- },
- 0b01, 0b10 => {
- const ptr_size: Memory.PtrSize = switch (ops.flags) {
- 0b01 => .byte,
- 0b10 => .word,
- else => unreachable,
- };
- return emit.encode(.movzx, .{
- .op1 = .{ .reg = ops.reg1 },
- .op2 = .{ .mem = Memory.sib(ptr_size, .{
- .disp = disp,
- .base = ops.reg2,
- }) },
- });
- },
- 0b11 => {
- return emit.fail("TODO unused variant: movzx 0b11", .{});
- },
- }
-}
-
-fn mirMovabs(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
- const tag = emit.mir.instructions.items(.tag)[inst];
- assert(tag == .movabs);
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- switch (ops.flags) {
- 0b00 => {
- const imm: u64 = if (ops.reg1.bitSize() == 64) blk: {
- const payload = emit.mir.instructions.items(.data)[inst].payload;
- const imm = emit.mir.extraData(Mir.Imm64, payload).data;
- break :blk imm.decode();
- } else emit.mir.instructions.items(.data)[inst].imm;
- return emit.encode(.mov, .{
- .op1 = .{ .reg = ops.reg1 },
- .op2 = .{ .imm = Immediate.u(imm) },
- });
- },
- 0b01 => {
- if (ops.reg1 == .none) {
- const imm: u64 = if (ops.reg2.bitSize() == 64) blk: {
- const payload = emit.mir.instructions.items(.data)[inst].payload;
- const imm = emit.mir.extraData(Mir.Imm64, payload).data;
- break :blk imm.decode();
- } else emit.mir.instructions.items(.data)[inst].imm;
- return emit.encode(.mov, .{
- .op1 = .{ .mem = Memory.moffs(ops.reg2, imm) },
- .op2 = .{ .reg = .rax },
- });
- }
- const imm: u64 = if (ops.reg1.bitSize() == 64) blk: {
- const payload = emit.mir.instructions.items(.data)[inst].payload;
- const imm = emit.mir.extraData(Mir.Imm64, payload).data;
- break :blk imm.decode();
- } else emit.mir.instructions.items(.data)[inst].imm;
- return emit.encode(.mov, .{
- .op1 = .{ .reg = .rax },
- .op2 = .{ .mem = Memory.moffs(ops.reg1, imm) },
- });
- },
- else => return emit.fail("TODO unused movabs variant", .{}),
- }
-}
-
-fn mirFisttp(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
- const tag = emit.mir.instructions.items(.tag)[inst];
- assert(tag == .fisttp);
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- const ptr_size: Memory.PtrSize = switch (ops.flags) {
- 0b00 => .word,
- 0b01 => .dword,
- 0b10 => .qword,
- else => unreachable,
- };
- return emit.encode(.fisttp, .{
- .op1 = .{ .mem = Memory.sib(ptr_size, .{
- .base = ops.reg1,
- .disp = emit.mir.instructions.items(.data)[inst].disp,
- }) },
- });
-}
-
-fn mirFld(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
- const tag = emit.mir.instructions.items(.tag)[inst];
- assert(tag == .fld);
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- const ptr_size: Memory.PtrSize = switch (ops.flags) {
- 0b01 => .dword,
- 0b10 => .qword,
- else => unreachable,
- };
- return emit.encode(.fld, .{
- .op1 = .{ .mem = Memory.sib(ptr_size, .{
- .base = ops.reg1,
- .disp = emit.mir.instructions.items(.data)[inst].disp,
- }) },
- });
-}
-
-fn mirShift(emit: *Emit, mnemonic: Instruction.Mnemonic, inst: Mir.Inst.Index) InnerError!void {
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- switch (ops.flags) {
- 0b00 => {
- return emit.encode(mnemonic, .{
- .op1 = .{ .reg = ops.reg1 },
- .op2 = .{ .imm = Immediate.u(1) },
- });
- },
- 0b01 => {
- return emit.encode(mnemonic, .{
- .op1 = .{ .reg = ops.reg1 },
- .op2 = .{ .reg = .cl },
- });
- },
- 0b10 => {
- const imm = @truncate(u8, emit.mir.instructions.items(.data)[inst].imm);
- return emit.encode(mnemonic, .{
- .op1 = .{ .reg = ops.reg1 },
- .op2 = .{ .imm = Immediate.u(imm) },
- });
- },
- 0b11 => {
- return emit.fail("TODO unused variant: SHIFT reg1, 0b11", .{});
- },
- }
-}
-
-fn mirMulDiv(emit: *Emit, mnemonic: Instruction.Mnemonic, inst: Mir.Inst.Index) InnerError!void {
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- if (ops.reg1 != .none) {
- assert(ops.reg2 == .none);
- return emit.encode(mnemonic, .{
- .op1 = .{ .reg = ops.reg1 },
- });
- }
- assert(ops.reg2 != .none);
- const disp = emit.mir.instructions.items(.data)[inst].disp;
- const ptr_size: Memory.PtrSize = switch (ops.flags) {
- 0b00 => .byte,
- 0b01 => .word,
- 0b10 => .dword,
- 0b11 => .qword,
- };
- return emit.encode(mnemonic, .{
- .op1 = .{ .mem = Memory.sib(ptr_size, .{
- .base = ops.reg2,
- .disp = disp,
- }) },
- });
-}
-
-fn mirIMulComplex(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
- const tag = emit.mir.instructions.items(.tag)[inst];
- assert(tag == .imul_complex);
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- switch (ops.flags) {
- 0b00 => {
- return emit.encode(.imul, .{
- .op1 = .{ .reg = ops.reg1 },
- .op2 = .{ .reg = ops.reg2 },
- });
- },
- 0b01 => {
- const disp = emit.mir.instructions.items(.data)[inst].disp;
- const src_reg: ?Register = if (ops.reg2 != .none) ops.reg2 else null;
- return emit.encode(.imul, .{
- .op1 = .{ .reg = ops.reg1 },
- .op2 = .{ .mem = Memory.sib(.qword, .{
- .base = src_reg,
- .disp = disp,
- }) },
- });
- },
- 0b10 => {
- const imm = emit.mir.instructions.items(.data)[inst].imm;
- return emit.encode(.imul, .{
- .op1 = .{ .reg = ops.reg1 },
- .op2 = .{ .reg = ops.reg2 },
- .op3 = .{ .imm = Immediate.u(imm) },
- });
- },
- 0b11 => {
- const payload = emit.mir.instructions.items(.data)[inst].payload;
- const imm_pair = emit.mir.extraData(Mir.ImmPair, payload).data;
- return emit.encode(.imul, .{
- .op1 = .{ .reg = ops.reg1 },
- .op2 = .{ .mem = Memory.sib(.qword, .{
- .base = ops.reg2,
- .disp = imm_pair.dest_off,
- }) },
- .op3 = .{ .imm = Immediate.u(imm_pair.operand) },
- });
- },
- }
-}
-
-fn mirCwd(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- const mnemonic: Instruction.Mnemonic = switch (ops.flags) {
- 0b00 => .cbw,
- 0b01 => .cwd,
- 0b10 => .cdq,
- 0b11 => .cqo,
- };
- return emit.encode(mnemonic, .{});
-}
-
-fn mirLea(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
- const tag = emit.mir.instructions.items(.tag)[inst];
- assert(tag == .lea);
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- switch (ops.flags) {
- 0b00 => {
- const disp = emit.mir.instructions.items(.data)[inst].disp;
- const src_reg: ?Register = if (ops.reg2 != .none) ops.reg2 else null;
- return emit.encode(.lea, .{
- .op1 = .{ .reg = ops.reg1 },
- .op2 = .{ .mem = Memory.sib(Memory.PtrSize.fromBitSize(ops.reg1.bitSize()), .{
- .base = src_reg,
- .disp = disp,
- }) },
- });
- },
- 0b01 => {
- const start_offset = emit.code.items.len;
- try emit.encode(.lea, .{
- .op1 = .{ .reg = ops.reg1 },
- .op2 = .{ .mem = Memory.rip(Memory.PtrSize.fromBitSize(ops.reg1.bitSize()), 0) },
- });
- const end_offset = emit.code.items.len;
- // Backpatch the displacement
- const payload = emit.mir.instructions.items(.data)[inst].payload;
- const imm = emit.mir.extraData(Mir.Imm64, payload).data.decode();
- const disp = @intCast(i32, @intCast(i64, imm) - @intCast(i64, end_offset - start_offset));
- mem.writeIntLittle(i32, emit.code.items[end_offset - 4 ..][0..4], disp);
- },
- 0b10 => {
- const payload = emit.mir.instructions.items(.data)[inst].payload;
- const index_reg_disp = emit.mir.extraData(Mir.IndexRegisterDisp, payload).data.decode();
- const src_reg: ?Register = if (ops.reg2 != .none) ops.reg2 else null;
- const scale_index = Memory.ScaleIndex{
- .scale = 1,
- .index = index_reg_disp.index,
- };
- return emit.encode(.lea, .{
- .op1 = .{ .reg = ops.reg1 },
- .op2 = .{ .mem = Memory.sib(Memory.PtrSize.fromBitSize(ops.reg1.bitSize()), .{
- .base = src_reg,
- .scale_index = scale_index,
- .disp = index_reg_disp.disp,
- }) },
- });
- },
- 0b11 => return emit.fail("TODO unused LEA variant 0b11", .{}),
- }
-}
-
-fn mirLeaPic(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
- const tag = emit.mir.instructions.items(.tag)[inst];
- assert(tag == .lea_pic);
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- const relocation = emit.mir.instructions.items(.data)[inst].relocation;
-
- switch (ops.flags) {
- 0b00, 0b01, 0b10 => {},
- else => return emit.fail("TODO unused LEA PIC variant 0b11", .{}),
- }
-
- try emit.encode(.lea, .{
- .op1 = .{ .reg = ops.reg1 },
- .op2 = .{ .mem = Memory.rip(Memory.PtrSize.fromBitSize(ops.reg1.bitSize()), 0) },
- });
-
- const end_offset = emit.code.items.len;
-
- if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
- const reloc_type = switch (ops.flags) {
- 0b00 => @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_GOT),
- 0b01 => @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_SIGNED),
- else => unreachable,
- };
- const atom_index = macho_file.getAtomIndexForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
- try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
- .type = reloc_type,
- .target = .{ .sym_index = relocation.sym_index, .file = null },
- .offset = @intCast(u32, end_offset - 4),
- .addend = 0,
- .pcrel = true,
- .length = 2,
- });
- } else if (emit.bin_file.cast(link.File.Coff)) |coff_file| {
- const atom_index = coff_file.getAtomIndexForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
- try link.File.Coff.Atom.addRelocation(coff_file, atom_index, .{
- .type = switch (ops.flags) {
- 0b00 => .got,
- 0b01 => .direct,
- 0b10 => .import,
- else => unreachable,
- },
- .target = switch (ops.flags) {
- 0b00, 0b01 => .{ .sym_index = relocation.sym_index, .file = null },
- 0b10 => coff_file.getGlobalByIndex(relocation.sym_index),
- else => unreachable,
- },
- .offset = @intCast(u32, end_offset - 4),
- .addend = 0,
- .pcrel = true,
- .length = 2,
- });
- } else {
- return emit.fail("TODO implement lea reg, [rip + reloc] for linking backends different than MachO", .{});
- }
-}
-
-// SSE/AVX instructions
-
-fn mirMovFloat(emit: *Emit, mnemonic: Instruction.Mnemonic, inst: Mir.Inst.Index) InnerError!void {
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- switch (ops.flags) {
- 0b00 => {
- const disp = emit.mir.instructions.items(.data)[inst].disp;
- return emit.encode(mnemonic, .{
- .op1 = .{ .reg = ops.reg1 },
- .op2 = .{ .mem = Memory.sib(Memory.PtrSize.fromBitSize(ops.reg2.bitSize()), .{
- .base = ops.reg2,
- .disp = disp,
- }) },
- });
- },
- 0b01 => {
- const disp = emit.mir.instructions.items(.data)[inst].disp;
- return emit.encode(mnemonic, .{
- .op1 = .{ .mem = Memory.sib(Memory.PtrSize.fromBitSize(ops.reg1.bitSize()), .{
- .base = ops.reg1,
- .disp = disp,
- }) },
- .op2 = .{ .reg = ops.reg2 },
- });
- },
- 0b10 => {
- return emit.encode(mnemonic, .{
- .op1 = .{ .reg = ops.reg1 },
- .op2 = .{ .reg = ops.reg2 },
- });
- },
- else => return emit.fail("TODO unused variant 0b{b} for {}", .{ ops.flags, mnemonic }),
- }
-}
-
-fn mirAddFloat(emit: *Emit, mnemonic: Instruction.Mnemonic, inst: Mir.Inst.Index) InnerError!void {
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- switch (ops.flags) {
- 0b00 => {
- return emit.encode(mnemonic, .{
- .op1 = .{ .reg = ops.reg1 },
- .op2 = .{ .reg = ops.reg2 },
- });
- },
- else => return emit.fail("TODO unused variant 0b{b} for {}", .{ ops.flags, mnemonic }),
- }
-}
-
-fn mirCmpFloat(emit: *Emit, mnemonic: Instruction.Mnemonic, inst: Mir.Inst.Index) InnerError!void {
- const ops = emit.mir.instructions.items(.ops)[inst].decode();
- switch (ops.flags) {
- 0b00 => {
- return emit.encode(mnemonic, .{
- .op1 = .{ .reg = ops.reg1 },
- .op2 = .{ .reg = ops.reg2 },
- });
- },
- else => return emit.fail("TODO unused variant 0b{b} for {}", .{ ops.flags, mnemonic }),
- }
-}
-
-// Pseudo-instructions
-
-fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
- const tag = emit.mir.instructions.items(.tag)[inst];
- assert(tag == .call_extern);
- const relocation = emit.mir.instructions.items(.data)[inst].relocation;
-
- const offset = blk: {
- // callq
- try emit.encode(.call, .{
- .op1 = .{ .imm = Immediate.s(0) },
- });
- break :blk @intCast(u32, emit.code.items.len) - 4;
- };
-
- if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
- // Add relocation to the decl.
- const atom_index = macho_file.getAtomIndexForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
- const target = macho_file.getGlobalByIndex(relocation.sym_index);
- try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
- .type = @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_BRANCH),
- .target = target,
- .offset = offset,
- .addend = 0,
- .pcrel = true,
- .length = 2,
- });
- } else if (emit.bin_file.cast(link.File.Coff)) |coff_file| {
- // Add relocation to the decl.
- const atom_index = coff_file.getAtomIndexForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
- const target = coff_file.getGlobalByIndex(relocation.sym_index);
- try link.File.Coff.Atom.addRelocation(coff_file, atom_index, .{
- .type = .direct,
- .target = target,
- .offset = offset,
- .addend = 0,
- .pcrel = true,
- .length = 2,
- });
- } else {
- return emit.fail("TODO implement call_extern for linking backends different than MachO and COFF", .{});
- }
-}
+// fn mirJmpCall(emit: *Emit, mnemonic: Instruction.Mnemonic, inst: Mir.Inst.Index) InnerError!void {
+// const ops = emit.mir.instructions.items(.ops)[inst].decode();
+// switch (ops.flags) {
+// 0b00 => {
+// const target = emit.mir.instructions.items(.data)[inst].inst;
+// const source = emit.code.items.len;
+// try emit.encode(mnemonic, .{
+// .op1 = .{ .imm = Immediate.s(0) },
+// });
+// try emit.relocs.append(emit.bin_file.allocator, .{
+// .source = source,
+// .target = target,
+// .offset = emit.code.items.len - 4,
+// .length = 5,
+// });
+// },
+// 0b01 => {
+// if (ops.reg1 == .none) {
+// const disp = emit.mir.instructions.items(.data)[inst].disp;
+// return emit.encode(mnemonic, .{
+// .op1 = .{ .mem = Memory.sib(.qword, .{ .disp = disp }) },
+// });
+// }
+// return emit.encode(mnemonic, .{
+// .op1 = .{ .reg = ops.reg1 },
+// });
+// },
+// 0b10 => {
+// const disp = emit.mir.instructions.items(.data)[inst].disp;
+// return emit.encode(mnemonic, .{
+// .op1 = .{ .mem = Memory.sib(.qword, .{
+// .base = ops.reg1,
+// .disp = disp,
+// }) },
+// });
+// },
+// 0b11 => return emit.fail("TODO unused variant jmp/call 0b11", .{}),
+// }
+// }
+
+// fn mirCondJmp(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
+// const tag = emit.mir.instructions.items(.tag)[inst];
+// assert(tag == .cond_jmp);
+// const inst_cc = emit.mir.instructions.items(.data)[inst].inst_cc;
+// const mnemonic: Instruction.Mnemonic = switch (inst_cc.cc) {
+// .a => .ja,
+// .ae => .jae,
+// .b => .jb,
+// .be => .jbe,
+// .c => .jc,
+// .e => .je,
+// .g => .jg,
+// .ge => .jge,
+// .l => .jl,
+// .le => .jle,
+// .na => .jna,
+// .nae => .jnae,
+// .nb => .jnb,
+// .nbe => .jnbe,
+// .nc => .jnc,
+// .ne => .jne,
+// .ng => .jng,
+// .nge => .jnge,
+// .nl => .jnl,
+// .nle => .jnle,
+// .no => .jno,
+// .np => .jnp,
+// .ns => .jns,
+// .nz => .jnz,
+// .o => .jo,
+// .p => .jp,
+// .pe => .jpe,
+// .po => .jpo,
+// .s => .js,
+// .z => .jz,
+// };
+// const source = emit.code.items.len;
+// try emit.encode(mnemonic, .{
+// .op1 = .{ .imm = Immediate.s(0) },
+// });
+// try emit.relocs.append(emit.bin_file.allocator, .{
+// .source = source,
+// .target = inst_cc.inst,
+// .offset = emit.code.items.len - 4,
+// .length = 6,
+// });
+// }
+
+// fn mirCondSetByte(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
+// const tag = emit.mir.instructions.items(.tag)[inst];
+// assert(tag == .cond_set_byte);
+// const ops = emit.mir.instructions.items(.ops)[inst].decode();
+// const cc = emit.mir.instructions.items(.data)[inst].cc;
+// const mnemonic: Instruction.Mnemonic = switch (cc) {
+// .a => .seta,
+// .ae => .setae,
+// .b => .setb,
+// .be => .setbe,
+// .c => .setc,
+// .e => .sete,
+// .g => .setg,
+// .ge => .setge,
+// .l => .setl,
+// .le => .setle,
+// .na => .setna,
+// .nae => .setnae,
+// .nb => .setnb,
+// .nbe => .setnbe,
+// .nc => .setnc,
+// .ne => .setne,
+// .ng => .setng,
+// .nge => .setnge,
+// .nl => .setnl,
+// .nle => .setnle,
+// .no => .setno,
+// .np => .setnp,
+// .ns => .setns,
+// .nz => .setnz,
+// .o => .seto,
+// .p => .setp,
+// .pe => .setpe,
+// .po => .setpo,
+// .s => .sets,
+// .z => .setz,
+// };
+// return emit.encode(mnemonic, .{ .op1 = .{ .reg = ops.reg1 } });
+// }
+
+// fn mirCondMov(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
+// const tag = emit.mir.instructions.items(.tag)[inst];
+// assert(tag == .cond_mov);
+// const ops = emit.mir.instructions.items(.ops)[inst].decode();
+// const cc = emit.mir.instructions.items(.data)[inst].cc;
+// const mnemonic: Instruction.Mnemonic = switch (cc) {
+// .a => .cmova,
+// .ae => .cmovae,
+// .b => .cmovb,
+// .be => .cmovbe,
+// .c => .cmovc,
+// .e => .cmove,
+// .g => .cmovg,
+// .ge => .cmovge,
+// .l => .cmovl,
+// .le => .cmovle,
+// .na => .cmovna,
+// .nae => .cmovnae,
+// .nb => .cmovnb,
+// .nbe => .cmovnbe,
+// .nc => .cmovnc,
+// .ne => .cmovne,
+// .ng => .cmovng,
+// .nge => .cmovnge,
+// .nl => .cmovnl,
+// .nle => .cmovnle,
+// .no => .cmovno,
+// .np => .cmovnp,
+// .ns => .cmovns,
+// .nz => .cmovnz,
+// .o => .cmovo,
+// .p => .cmovp,
+// .pe => .cmovpe,
+// .po => .cmovpo,
+// .s => .cmovs,
+// .z => .cmovz,
+// };
+// const op1: Instruction.Operand = .{ .reg = ops.reg1 };
+
+// if (ops.flags == 0b00) {
+// return emit.encode(mnemonic, .{
+// .op1 = op1,
+// .op2 = .{ .reg = ops.reg2 },
+// });
+// }
+// const disp = emit.mir.instructions.items(.data)[inst].disp;
+// const ptr_size: Memory.PtrSize = switch (ops.flags) {
+// 0b00 => unreachable,
+// 0b01 => .word,
+// 0b10 => .dword,
+// 0b11 => .qword,
+// };
+// return emit.encode(mnemonic, .{
+// .op1 = op1,
+// .op2 = .{ .mem = Memory.sib(ptr_size, .{
+// .base = ops.reg2,
+// .disp = disp,
+// }) },
+// });
+// }
+
+// fn mirLea(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
+// const tag = emit.mir.instructions.items(.tag)[inst];
+// assert(tag == .lea);
+// const ops = emit.mir.instructions.items(.ops)[inst].decode();
+// switch (ops.flags) {
+// 0b00 => {
+// const disp = emit.mir.instructions.items(.data)[inst].disp;
+// const src_reg: ?Register = if (ops.reg2 != .none) ops.reg2 else null;
+// return emit.encode(.lea, .{
+// .op1 = .{ .reg = ops.reg1 },
+// .op2 = .{ .mem = Memory.sib(Memory.PtrSize.fromBitSize(ops.reg1.bitSize()), .{
+// .base = src_reg,
+// .disp = disp,
+// }) },
+// });
+// },
+// 0b01 => {
+// const start_offset = emit.code.items.len;
+// try emit.encode(.lea, .{
+// .op1 = .{ .reg = ops.reg1 },
+// .op2 = .{ .mem = Memory.rip(Memory.PtrSize.fromBitSize(ops.reg1.bitSize()), 0) },
+// });
+// const end_offset = emit.code.items.len;
+// // Backpatch the displacement
+// const payload = emit.mir.instructions.items(.data)[inst].payload;
+// const imm = emit.mir.extraData(Mir.Imm64, payload).data.decode();
+// const disp = @intCast(i32, @intCast(i64, imm) - @intCast(i64, end_offset - start_offset));
+// mem.writeIntLittle(i32, emit.code.items[end_offset - 4 ..][0..4], disp);
+// },
+// 0b10 => {
+// const payload = emit.mir.instructions.items(.data)[inst].payload;
+// const index_reg_disp = emit.mir.extraData(Mir.IndexRegisterDisp, payload).data.decode();
+// const src_reg: ?Register = if (ops.reg2 != .none) ops.reg2 else null;
+// const scale_index = Memory.ScaleIndex{
+// .scale = 1,
+// .index = index_reg_disp.index,
+// };
+// return emit.encode(.lea, .{
+// .op1 = .{ .reg = ops.reg1 },
+// .op2 = .{ .mem = Memory.sib(Memory.PtrSize.fromBitSize(ops.reg1.bitSize()), .{
+// .base = src_reg,
+// .scale_index = scale_index,
+// .disp = index_reg_disp.disp,
+// }) },
+// });
+// },
+// 0b11 => return emit.fail("TODO unused LEA variant 0b11", .{}),
+// }
+// }
+
+// fn mirLeaPic(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
+// const tag = emit.mir.instructions.items(.tag)[inst];
+// assert(tag == .lea_pic);
+// const ops = emit.mir.instructions.items(.ops)[inst].decode();
+// const relocation = emit.mir.instructions.items(.data)[inst].relocation;
+
+// switch (ops.flags) {
+// 0b00, 0b01, 0b10 => {},
+// else => return emit.fail("TODO unused LEA PIC variant 0b11", .{}),
+// }
+
+// try emit.encode(.lea, .{
+// .op1 = .{ .reg = ops.reg1 },
+// .op2 = .{ .mem = Memory.rip(Memory.PtrSize.fromBitSize(ops.reg1.bitSize()), 0) },
+// });
+
+// const end_offset = emit.code.items.len;
+
+// if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
+// const reloc_type = switch (ops.flags) {
+// 0b00 => @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_GOT),
+// 0b01 => @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_SIGNED),
+// else => unreachable,
+// };
+// const atom_index = macho_file.getAtomIndexForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
+// try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
+// .type = reloc_type,
+// .target = .{ .sym_index = relocation.sym_index, .file = null },
+// .offset = @intCast(u32, end_offset - 4),
+// .addend = 0,
+// .pcrel = true,
+// .length = 2,
+// });
+// } else if (emit.bin_file.cast(link.File.Coff)) |coff_file| {
+// const atom_index = coff_file.getAtomIndexForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
+// try link.File.Coff.Atom.addRelocation(coff_file, atom_index, .{
+// .type = switch (ops.flags) {
+// 0b00 => .got,
+// 0b01 => .direct,
+// 0b10 => .import,
+// else => unreachable,
+// },
+// .target = switch (ops.flags) {
+// 0b00, 0b01 => .{ .sym_index = relocation.sym_index, .file = null },
+// 0b10 => coff_file.getGlobalByIndex(relocation.sym_index),
+// else => unreachable,
+// },
+// .offset = @intCast(u32, end_offset - 4),
+// .addend = 0,
+// .pcrel = true,
+// .length = 2,
+// });
+// } else {
+// return emit.fail("TODO implement lea reg, [rip + reloc] for linking backends different than MachO", .{});
+// }
+// }
+
+// fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
+// const tag = emit.mir.instructions.items(.tag)[inst];
+// assert(tag == .call_extern);
+// const relocation = emit.mir.instructions.items(.data)[inst].relocation;
+
+// const offset = blk: {
+// // callq
+// try emit.encode(.call, .{
+// .op1 = .{ .imm = Immediate.s(0) },
+// });
+// break :blk @intCast(u32, emit.code.items.len) - 4;
+// };
+
+// if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
+// // Add relocation to the decl.
+// const atom_index = macho_file.getAtomIndexForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
+// const target = macho_file.getGlobalByIndex(relocation.sym_index);
+// try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
+// .type = @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_BRANCH),
+// .target = target,
+// .offset = offset,
+// .addend = 0,
+// .pcrel = true,
+// .length = 2,
+// });
+// } else if (emit.bin_file.cast(link.File.Coff)) |coff_file| {
+// // Add relocation to the decl.
+// const atom_index = coff_file.getAtomIndexForSymbol(.{ .sym_index = relocation.atom_index, .file = null }).?;
+// const target = coff_file.getGlobalByIndex(relocation.sym_index);
+// try link.File.Coff.Atom.addRelocation(coff_file, atom_index, .{
+// .type = .direct,
+// .target = target,
+// .offset = offset,
+// .addend = 0,
+// .pcrel = true,
+// .length = 2,
+// });
+// } else {
+// return emit.fail("TODO implement call_extern for linking backends different than MachO and COFF", .{});
+// }
+// }
fn mirDbgLine(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
- const tag = emit.mir.instructions.items(.tag)[inst];
- assert(tag == .dbg_line);
const payload = emit.mir.instructions.items(.data)[inst].payload;
const dbg_line_column = emit.mir.extraData(Mir.DbgLineColumn, payload).data;
log.debug("mirDbgLine", .{});
@@ -1230,8 +635,7 @@ fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32) InnerError!void {
}
fn mirDbgPrologueEnd(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
- const tag = emit.mir.instructions.items(.tag)[inst];
- assert(tag == .dbg_prologue_end);
+ _ = inst;
switch (emit.debug_output) {
.dwarf => |dw| {
try dw.setPrologueEnd();
@@ -1247,8 +651,7 @@ fn mirDbgPrologueEnd(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
}
fn mirDbgEpilogueBegin(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
- const tag = emit.mir.instructions.items(.tag)[inst];
- assert(tag == .dbg_epilogue_begin);
+ _ = inst;
switch (emit.debug_output) {
.dwarf => |dw| {
try dw.setEpilogueBegin();
src/arch/x86_64/encoder.zig
@@ -4,10 +4,6 @@ const math = std.math;
const bits = @import("bits.zig");
const Encoding = @import("Encoding.zig");
-const Immediate = bits.Immediate;
-const Memory = bits.Memory;
-const Moffs = bits.Moffs;
-const PtrSize = bits.PtrSize;
const Register = bits.Register;
pub const Instruction = struct {
@@ -25,6 +21,9 @@ pub const Instruction = struct {
mem: Memory,
imm: Immediate,
+ pub const Memory = bits.Memory;
+ pub const Immediate = bits.Immediate;
+
/// Returns the bitsize of the operand.
pub fn bitSize(op: Operand) u64 {
return switch (op) {
@@ -296,7 +295,7 @@ pub const Instruction = struct {
try encoder.opcode_1byte(prefix);
}
- fn encodeMemory(encoding: Encoding, mem: Memory, operand: Operand, encoder: anytype) !void {
+ fn encodeMemory(encoding: Encoding, mem: Operand.Memory, operand: Operand, encoder: anytype) !void {
const operand_enc = switch (operand) {
.reg => |reg| reg.lowEnc(),
.none => encoding.modRmExt(),
@@ -379,7 +378,7 @@ pub const Instruction = struct {
}
}
- fn encodeImm(imm: Immediate, kind: Encoding.Op, encoder: anytype) !void {
+ fn encodeImm(imm: Operand.Immediate, kind: Encoding.Op, encoder: anytype) !void {
const raw = imm.asUnsigned(kind.bitSize());
switch (kind.bitSize()) {
8 => try encoder.imm8(@intCast(u8, raw)),
src/arch/x86_64/Mir.zig
@@ -12,6 +12,8 @@ const builtin = @import("builtin");
const assert = std.debug.assert;
const bits = @import("bits.zig");
+const encoder = @import("encoder.zig");
+
const Air = @import("../../Air.zig");
const CodeGen = @import("CodeGen.zig");
const IntegerBitSet = std.bit_set.IntegerBitSet;
@@ -21,421 +23,239 @@ instructions: std.MultiArrayList(Inst).Slice,
/// The meaning of this data is determined by `Inst.Tag` value.
extra: []const u32,
+pub const Mnemonic = encoder.Instruction.Mnemonic;
+pub const Operand = encoder.Instruction.Operand;
+
pub const Inst = struct {
tag: Tag,
ops: Ops,
- /// The meaning of this depends on `tag` and `ops`.
data: Data,
- pub const Tag = enum(u16) {
- /// ops flags: form:
- /// 0b00 reg1, reg2
- /// 0b00 reg1, imm32
- /// 0b01 reg1, [reg2 + imm32]
- /// 0b01 reg1, [ds:imm32]
- /// 0b10 [reg1 + imm32], reg2
- /// 0b11 reg1, imm_s
- /// Notes:
- /// * If reg2 is `none` then it means Data field `imm` is used as the immediate.
- /// * When two imm32 values are required, Data field `payload` points at `ImmPair`.
- adc,
-
- /// ops flags: form:
- /// 0b00 byte ptr [reg1 + imm32], imm8
- /// 0b01 word ptr [reg1 + imm32], imm16
- /// 0b10 dword ptr [reg1 + imm32], imm32
- /// 0b11 qword ptr [reg1 + imm32], imm32 (sign-extended to imm64)
- /// Notes:
- /// * Uses `ImmPair` as payload
- adc_mem_imm,
-
- /// form: reg1, [reg2 + scale*index + imm32]
- /// ops flags scale
- /// 0b00 1
- /// 0b01 2
- /// 0b10 4
- /// 0b11 8
- /// Notes:
- /// * Uses `IndexRegisterDisp` as payload
- adc_scale_src,
-
- /// form: [reg1 + scale*index + imm32], reg2
- /// ops flags scale
- /// 0b00 1
- /// 0b01 2
- /// 0b10 4
- /// 0b11 8
- /// Notes:
- /// * Uses `IndexRegisterDisp` payload.
- adc_scale_dst,
-
- /// form: [reg1 + scale*rax + imm32], imm32
- /// ops flags scale
- /// 0b00 1
- /// 0b01 2
- /// 0b10 4
- /// 0b11 8
- /// Notes:
- /// * Uses `IndexRegisterDispImm` payload.
- adc_scale_imm,
-
- /// ops flags: form:
- /// 0b00 byte ptr [reg1 + index + imm32], imm8
- /// 0b01 word ptr [reg1 + index + imm32], imm16
- /// 0b10 dword ptr [reg1 + index + imm32], imm32
- /// 0b11 qword ptr [reg1 + index + imm32], imm32 (sign-extended to imm64)
- /// Notes:
- /// * Uses `IndexRegisterDispImm` payload.
- adc_mem_index_imm,
-
- // The following instructions all have the same encoding as `adc`.
+ pub const Index = u32;
+ pub const Tag = enum(u8) {
+ /// Add with carry
+ adc,
+ /// Add
add,
- add_mem_imm,
- add_scale_src,
- add_scale_dst,
- add_scale_imm,
- add_mem_index_imm,
- sub,
- sub_mem_imm,
- sub_scale_src,
- sub_scale_dst,
- sub_scale_imm,
- sub_mem_index_imm,
- xor,
- xor_mem_imm,
- xor_scale_src,
- xor_scale_dst,
- xor_scale_imm,
- xor_mem_index_imm,
+ /// Logical and
@"and",
- and_mem_imm,
- and_scale_src,
- and_scale_dst,
- and_scale_imm,
- and_mem_index_imm,
- @"or",
- or_mem_imm,
- or_scale_src,
- or_scale_dst,
- or_scale_imm,
- or_mem_index_imm,
- rol,
- rol_mem_imm,
- rol_scale_src,
- rol_scale_dst,
- rol_scale_imm,
- rol_mem_index_imm,
- ror,
- ror_mem_imm,
- ror_scale_src,
- ror_scale_dst,
- ror_scale_imm,
- ror_mem_index_imm,
- rcl,
- rcl_mem_imm,
- rcl_scale_src,
- rcl_scale_dst,
- rcl_scale_imm,
- rcl_mem_index_imm,
- rcr,
- rcr_mem_imm,
- rcr_scale_src,
- rcr_scale_dst,
- rcr_scale_imm,
- rcr_mem_index_imm,
- sbb,
- sbb_mem_imm,
- sbb_scale_src,
- sbb_scale_dst,
- sbb_scale_imm,
- sbb_mem_index_imm,
+ /// Call
+ call,
+ /// Convert byte to word
+ cbw,
+ /// Convert word to doubleword
+ cwde,
+ /// Convert doubleword to quadword
+ cdqe,
+ /// Convert word to doubleword
+ cwd,
+ /// Convert doubleword to quadword
+ cdq,
+ /// Convert doubleword to quadword
+ cqo,
+ /// Logical compare
cmp,
- cmp_mem_imm,
- cmp_scale_src,
- cmp_scale_dst,
- cmp_scale_imm,
- cmp_mem_index_imm,
- mov,
- mov_mem_imm,
- mov_scale_src,
- mov_scale_dst,
- mov_scale_imm,
- mov_mem_index_imm,
-
- /// ops flags: form:
- /// 0b00 reg1, reg2,
- /// 0b01 reg1, byte ptr [reg2 + imm32]
- /// 0b10 reg1, word ptr [reg2 + imm32]
- /// 0b11 reg1, dword ptr [reg2 + imm32]
- mov_sign_extend,
-
- /// ops flags: form:
- /// 0b00 reg1, reg2
- /// 0b01 reg1, byte ptr [reg2 + imm32]
- /// 0b10 reg1, word ptr [reg2 + imm32]
- mov_zero_extend,
-
- /// ops flags: form:
- /// 0b00 reg1, [reg2 + imm32]
- /// 0b00 reg1, [ds:imm32]
- /// 0b01 reg1, [rip + imm32]
- /// 0b10 reg1, [reg2 + index + imm32]
- /// Notes:
- /// * 0b10 uses `IndexRegisterDisp` payload
- lea,
-
- /// ops flags: form:
- /// 0b00 reg1, [rip + reloc] // via GOT PIC
- /// 0b01 reg1, [rip + reloc] // direct load PIC
- /// 0b10 reg1, [rip + reloc] // via imports table PIC
- /// Notes:
- /// * `Data` contains `relocation`
- lea_pic,
-
- /// ops flags: form:
- /// 0b00 reg1, 1
- /// 0b01 reg1, .cl
- /// 0b10 reg1, imm8
- /// Notes:
- /// * If flags == 0b10, uses `imm`.
- shl,
- shl_mem_imm,
- shl_scale_src,
- shl_scale_dst,
- shl_scale_imm,
- shl_mem_index_imm,
- sal,
- sal_mem_imm,
- sal_scale_src,
- sal_scale_dst,
- sal_scale_imm,
- sal_mem_index_imm,
- shr,
- shr_mem_imm,
- shr_scale_src,
- shr_scale_dst,
- shr_scale_imm,
- shr_mem_index_imm,
- sar,
- sar_mem_imm,
- sar_scale_src,
- sar_scale_dst,
- sar_scale_imm,
- sar_mem_index_imm,
-
- /// ops flags: form:
- /// 0b00 reg1
- /// 0b00 byte ptr [reg2 + imm32]
- /// 0b01 word ptr [reg2 + imm32]
- /// 0b10 dword ptr [reg2 + imm32]
- /// 0b11 qword ptr [reg2 + imm32]
- imul,
- idiv,
- mul,
+ /// Conditional move
+ cmovcc,
+ /// Unsigned division
div,
-
- /// ops flags: form:
- /// 0b00 AX <- AL
- /// 0b01 DX:AX <- AX
- /// 0b10 EDX:EAX <- EAX
- /// 0b11 RDX:RAX <- RAX
- cwd,
-
- /// ops flags: form:
- /// 0b00 reg1, reg2
- /// 0b01 reg1, [reg2 + imm32]
- /// 0b01 reg1, [imm32] if reg2 is none
- /// 0b10 reg1, reg2, imm32
- /// 0b11 reg1, [reg2 + imm32], imm32
- imul_complex,
-
- /// ops flags: form:
- /// 0b00 reg1, imm64
- /// 0b01 rax, moffs64
- /// Notes:
- /// * If reg1 is 64-bit, the immediate is 64-bit and stored
- /// within extra data `Imm64`.
- /// * For 0b01, reg1 (or reg2) need to be
- /// a version of rax. If reg1 == .none, then reg2 == .rax,
- /// or vice versa.
- movabs,
-
- /// ops flags: form:
- /// 0b00 word ptr [reg1 + imm32]
- /// 0b01 dword ptr [reg1 + imm32]
- /// 0b10 qword ptr [reg1 + imm32]
- /// Notes:
- /// * source is always ST(0)
- /// * only supports memory operands as destination
+ /// Store integer with truncation
fisttp,
-
- /// ops flags: form:
- /// 0b01 dword ptr [reg1 + imm32]
- /// 0b10 qword ptr [reg1 + imm32]
+ /// Load floating-point value
fld,
-
- /// ops flags: form:
- /// 0b00 inst
- /// 0b01 reg1
- /// 0b01 [imm32] if reg1 is none
- /// 0b10 [reg1 + imm32]
+ /// Signed division
+ idiv,
+ /// Signed multiplication
+ imul,
+ ///
+ int3,
+ /// Conditional jump
+ jcc,
+ /// Jump
jmp,
- call,
-
- /// ops flags:
- /// unused
- /// Notes:
- /// * uses `inst_cc` in Data.
- cond_jmp,
-
- /// ops flags:
- /// 0b00 reg1
- /// Notes:
- /// * uses condition code (CC) stored as part of data
- cond_set_byte,
-
- /// ops flags:
- /// 0b00 reg1, reg2,
- /// 0b01 reg1, word ptr [reg2 + imm]
- /// 0b10 reg1, dword ptr [reg2 + imm]
- /// 0b11 reg1, qword ptr [reg2 + imm]
- /// Notes:
- /// * uses condition code (CC) stored as part of data
- cond_mov,
-
- /// ops flags: form:
- /// 0b00 reg1
- /// 0b01 [reg1 + imm32]
- /// 0b10 imm32
- /// Notes:
- /// * If 0b10 is specified and the tag is push, pushes immediate onto the stack
- /// using the mnemonic PUSH imm32.
- push,
+ /// Load effective address
+ lea,
+ /// Move
+ mov,
+ /// Move with sign extension
+ movsx,
+ /// Move with zero extension
+ movzx,
+ /// Multiply
+ mul,
+ /// No-op
+ nop,
+ /// Logical or
+ @"or",
+ /// Pop
pop,
-
- /// ops flags: form:
- /// 0b00 retf imm16
- /// 0b01 retf
- /// 0b10 retn imm16
- /// 0b11 retn
+ /// Push
+ push,
+ /// Return
ret,
-
- /// Fast system call
+ /// Arithmetic shift left
+ sal,
+ /// Arithmetic shift right
+ sar,
+ /// Integer subtraction with borrow
+ sbb,
+ /// Set byte on condition
+ setcc,
+ /// Logical shift left
+ shl,
+ /// Logical shift right
+ shr,
+ /// Subtract
+ sub,
+ /// Syscall
syscall,
-
- /// ops flags: form:
- /// 0b00 reg1, imm32 if reg2 == .none
- /// 0b00 reg1, reg2
- /// TODO handle more cases
+ /// Test condition
@"test",
+ /// Undefined instruction
+ ud2,
+ /// Logical exclusive-or
+ xor,
- /// Undefined Instruction
- ud,
-
- /// Breakpoint form:
- /// 0b00 int3
- interrupt,
-
- /// Nop
- nop,
-
- /// SSE/AVX instructions
- /// ops flags: form:
- /// 0b00 reg1, qword ptr [reg2 + imm32]
- /// 0b01 qword ptr [reg1 + imm32], reg2
- /// 0b10 reg1, reg2
- mov_f64,
- mov_f32,
-
- /// ops flags: form:
- /// 0b00 reg1, reg2
- add_f64,
- add_f32,
-
- /// ops flags: form:
- /// 0b00 reg1, reg2
- cmp_f64,
- cmp_f32,
-
- /// Pseudo-instructions
- /// call extern function
- /// Notes:
- /// * target of the call is stored as `relocation` in `Data` union.
- call_extern,
-
- /// end of prologue
+ /// Add single precision floating point
+ addss,
+ /// Compare scalar single-precision floating-point values
+ cmpss,
+ /// Move scalar single-precision floating-point value
+ movss,
+ /// Unordered compare scalar single-precision floating-point values
+ ucomiss,
+ /// Add double precision floating point
+ addsd,
+ /// Compare scalar double-precision floating-point values
+ cmpsd,
+ /// Move scalar double-precision floating-point value
+ movsd,
+ /// Unordered compare scalar double-precision floating-point values
+ ucomisd,
+
+ /// End of prologue
dbg_prologue_end,
-
- /// start of epilogue
+ /// Start of epilogue
dbg_epilogue_begin,
-
- /// update debug line
+ /// Update debug line
+ /// Uses `payload` payload with data of type `DbgLineColumn`.
dbg_line,
-
- /// push registers
- /// Uses `payload` field with `SaveRegisterList` as payload.
+ /// Push registers
+ /// Uses `payload` payload with data of type `SaveRegisterList`.
push_regs,
-
- /// pop registers
- /// Uses `payload` field with `SaveRegisterList` as payload.
+ /// Pop registers
+ /// Uses `payload` payload with data of type `SaveRegisterList`.
pop_regs,
};
- /// The position of an MIR instruction within the `Mir` instructions array.
- pub const Index = u32;
-
- pub const Ops = packed struct {
- reg1: u7,
- reg2: u7,
- flags: u2,
- pub fn encode(vals: struct {
- reg1: Register = .none,
- reg2: Register = .none,
- flags: u2 = 0b00,
- }) Ops {
- return .{
- .reg1 = @enumToInt(vals.reg1),
- .reg2 = @enumToInt(vals.reg2),
- .flags = vals.flags,
- };
- }
-
- pub fn decode(ops: Ops) struct {
- reg1: Register,
- reg2: Register,
- flags: u2,
- } {
- return .{
- .reg1 = @intToEnum(Register, ops.reg1),
- .reg2 = @intToEnum(Register, ops.reg2),
- .flags = ops.flags,
- };
- }
+ pub const Ops = enum(u8) {
+ /// No data associated with this instruction (only mnemonic is used).
+ none,
+ /// Single register operand.
+ /// Uses `r` payload.
+ r,
+ /// Register, register operands.
+ /// Uses `rr` payload.
+ rr,
+ /// Register, register, register operands.
+ /// Uses `rrr` payload.
+ rrr,
+ /// Register, immediate (sign-extended) operands.
+ /// Uses `ri_s` payload.
+ ri_s,
+ /// Register, immediate (unsigned) operands.
+ /// Uses `ri_u` payload.
+ ri_u,
+ /// Register, 64-bit unsigned immediate operands.
+ /// Uses `rx` payload with payload type `Imm64`.
+ ri64,
+ /// Immediate (sign-extended) operand.
+ /// Uses `imm_s` payload.
+ imm_s,
+ /// Immediate (unsigned) operand.
+ /// Uses `imm_u` payload.
+ imm_u,
+ /// Relative displacement operand.
+ /// Uses `rel` payload.
+ rel,
+ /// Register, memory operands.
+ /// Uses `rx` payload.
+ rm,
+ /// Register, memory, immediate (unsigned) operands
+ /// Uses `rx` payload.
+ rmi_u,
+ /// Register, memory, immediate (sign-extended) operands
+ /// Uses `rx` payload.
+ rmi_s,
+ /// Memory, immediate (unsigned) operands.
+ /// Uses `payload` payload.
+ mi_u,
+ /// Memory, immediate (sign-extend) operands.
+ /// Uses `payload` payload.
+ mi_s,
+ /// Memory, register operands.
+ /// Uses `payload` payload.
+ mr,
+ /// Lea into register with linker relocation.
+ /// Uses `payload` payload with data of type `LeaRegisterReloc`.
+ lea_r_reloc,
+ /// References another Mir instruction directly.
+ /// Uses `inst` payload.
+ inst,
+ /// References another Mir instruction directly with condition code (CC).
+ /// Uses `inst_cc` payload.
+ inst_cc,
+ /// Uses `payload` payload with data of type `MemoryConditionCode`.
+ m_cc,
+ /// Uses `rx` payload with extra data of type `MemoryConditionCode`.
+ rm_cc,
+ /// Uses `reloc` payload.
+ reloc,
};
- /// All instructions have a 4-byte payload, which is contained within
- /// this union. `Tag` determines which union field is active, as well as
- /// how to interpret the data within.
pub const Data = union {
- /// Another instruction.
+ /// References another Mir instruction.
inst: Index,
- /// A 32-bit immediate value.
- imm: u32,
- /// A 32-bit signed immediate value.
- imm_s: i32,
- /// A 32-bit signed displacement value.
- disp: i32,
- /// A condition code for use with EFLAGS register.
- cc: bits.Condition,
- /// Another instruction with condition code.
- /// Used by `cond_jmp`.
+ /// Another instruction with condition code (CC).
+ /// Used by `jcc`.
inst_cc: struct {
/// Another instruction.
inst: Index,
/// A condition code for use with EFLAGS register.
cc: bits.Condition,
},
+ /// A 32-bit signed immediate value.
+ imm_s: i32,
+ /// A 32-bit unsigned immediate value.
+ imm_u: u32,
+ /// A 32-bit signed relative offset value.
+ rel: i32,
+ r: Register,
+ rr: struct {
+ r1: Register,
+ r2: Register,
+ },
+ rrr: struct {
+ r1: Register,
+ r2: Register,
+ r3: Register,
+ },
+ /// Register, signed immediate.
+ ri_s: struct {
+ r1: Register,
+ imm: i32,
+ },
+ /// Register, unsigned immediate.
+ ri_u: struct {
+ r1: Register,
+ imm: u32,
+ },
+ /// Register, followed by custom payload found in extra.
+ rx: struct {
+ r1: Register,
+ payload: u32,
+ },
/// Relocation for the linker where:
/// * `atom_index` is the index of the source
/// * `sym_index` is the index of the target
@@ -458,62 +278,19 @@ pub const Inst = struct {
}
};
-pub const IndexRegisterDisp = struct {
- /// Index register to use with SIB-based encoding
- index: u32,
-
- /// Displacement value
- disp: i32,
-
- pub fn encode(index: Register, disp: i32) IndexRegisterDisp {
- return .{
- .index = @enumToInt(index),
- .disp = disp,
- };
- }
-
- pub fn decode(this: IndexRegisterDisp) struct {
- index: Register,
- disp: i32,
- } {
- return .{
- .index = @intToEnum(Register, this.index),
- .disp = this.disp,
- };
- }
-};
-
-/// TODO: would it be worth making `IndexRegisterDisp` and `IndexRegisterDispImm` a variable length list
-/// instead of having two structs, one a superset of the other one?
-pub const IndexRegisterDispImm = struct {
- /// Index register to use with SIB-based encoding
- index: u32,
-
- /// Displacement value
- disp: i32,
-
- /// Immediate
- imm: u32,
-
- pub fn encode(index: Register, disp: i32, imm: u32) IndexRegisterDispImm {
- return .{
- .index = @enumToInt(index),
- .disp = disp,
- .imm = imm,
- };
- }
-
- pub fn decode(this: IndexRegisterDispImm) struct {
- index: Register,
- disp: i32,
- imm: u32,
- } {
- return .{
- .index = @intToEnum(Register, this.index),
- .disp = this.disp,
- .imm = this.imm,
- };
- }
+pub const LeaRegisterReloc = struct {
+ /// Destination register.
+ reg: Register,
+ /// Type of the load.
+ load_type: enum(u2) {
+ got,
+ direct,
+ import,
+ },
+ /// Index of the containing atom.
+ atom_index: u32,
+ /// Index into the linker's symbol table.
+ sym_index: u32,
};
/// Used in conjunction with `SaveRegisterList` payload to transfer a list of used registers
@@ -557,16 +334,13 @@ pub const RegisterList = struct {
};
pub const SaveRegisterList = struct {
+ /// Base register
+ base_reg: u32,
/// Use `RegisterList` to populate.
register_list: u32,
stack_end: u32,
};
-pub const ImmPair = struct {
- dest_off: i32,
- operand: u32,
-};
-
pub const Imm64 = struct {
msb: u32,
lsb: u32,