Commit 1fecf86ebf
Changed files (8)
src/arch/x86_64/bits.zig
@@ -181,6 +181,8 @@ pub const Register = enum(u7) {
es, cs, ss, ds, fs, gs,
+ rip, eip, ip,
+
none,
// zig fmt: on
@@ -442,34 +444,58 @@ pub const FrameIndex = enum(u32) {
}
};
-pub const Memory = union(enum) {
- sib: Sib,
- rip: Rip,
- moffs: Moffs,
+/// A linker symbol not yet allocated in VM.
+pub const Symbol = struct {
+ /// Index of the containing atom.
+ atom_index: u32,
+ /// Index into the linker's symbol table.
+ sym_index: u32,
+
+ pub fn format(
+ sym: Symbol,
+ comptime fmt: []const u8,
+ options: std.fmt.FormatOptions,
+ writer: anytype,
+ ) @TypeOf(writer).Error!void {
+ try writer.writeAll("Symbol(");
+ try std.fmt.formatType(sym.atom_index, fmt, options, writer, 0);
+ try writer.writeAll(", ");
+ try std.fmt.formatType(sym.sym_index, fmt, options, writer, 0);
+ try writer.writeByte(')');
+ }
+};
+
+pub const Memory = struct {
+ base: Base,
+ mod: Mod,
- pub const Base = union(enum) {
+ pub const Base = union(enum(u2)) {
none,
reg: Register,
frame: FrameIndex,
+ reloc: Symbol,
pub const Tag = @typeInfo(Base).Union.tag_type.?;
pub fn isExtended(self: Base) bool {
return switch (self) {
- .none, .frame => false, // neither rsp nor rbp are extended
+ .none, .frame, .reloc => false, // rsp, rbp, and rip are not extended
.reg => |reg| reg.isExtended(),
};
}
};
- pub const ScaleIndex = struct {
- scale: u4,
- index: Register,
-
- const none = ScaleIndex{ .scale = 0, .index = undefined };
+ pub const Mod = union(enum(u1)) {
+ rm: struct {
+ size: Size,
+ index: Register = .none,
+ scale: Scale = .@"1",
+ disp: i32 = 0,
+ },
+ off: u64,
};
- pub const PtrSize = enum {
+ pub const Size = enum(u4) {
none,
byte,
word,
@@ -480,7 +506,7 @@ pub const Memory = union(enum) {
yword,
zword,
- pub fn fromSize(size: u32) PtrSize {
+ pub fn fromSize(size: u32) Size {
return switch (size) {
1...1 => .byte,
2...2 => .word,
@@ -493,7 +519,7 @@ pub const Memory = union(enum) {
};
}
- pub fn fromBitSize(bit_size: u64) PtrSize {
+ pub fn fromBitSize(bit_size: u64) Size {
return switch (bit_size) {
8 => .byte,
16 => .word,
@@ -507,7 +533,7 @@ pub const Memory = union(enum) {
};
}
- pub fn bitSize(s: PtrSize) u64 {
+ pub fn bitSize(s: Size) u64 {
return switch (s) {
.none => 0,
.byte => 8,
@@ -522,7 +548,7 @@ pub const Memory = union(enum) {
}
pub fn format(
- s: PtrSize,
+ s: Size,
comptime _: []const u8,
_: std.fmt.FormatOptions,
writer: anytype,
@@ -533,79 +559,7 @@ pub const Memory = union(enum) {
}
};
- pub const Sib = struct {
- ptr_size: PtrSize,
- base: Base,
- scale_index: ScaleIndex,
- disp: i32,
- };
-
- pub const Rip = struct {
- ptr_size: PtrSize,
- disp: i32,
- };
-
- pub const Moffs = struct {
- seg: Register,
- offset: u64,
- };
-
- pub fn moffs(reg: Register, offset: u64) Memory {
- assert(reg.class() == .segment);
- return .{ .moffs = .{ .seg = reg, .offset = offset } };
- }
-
- pub fn sib(ptr_size: PtrSize, args: struct {
- disp: i32 = 0,
- base: Base = .none,
- scale_index: ?ScaleIndex = null,
- }) Memory {
- if (args.scale_index) |si| assert(std.math.isPowerOfTwo(si.scale));
- return .{ .sib = .{
- .base = args.base,
- .disp = args.disp,
- .ptr_size = ptr_size,
- .scale_index = if (args.scale_index) |si| si else ScaleIndex.none,
- } };
- }
-
- pub fn rip(ptr_size: PtrSize, disp: i32) Memory {
- return .{ .rip = .{ .ptr_size = ptr_size, .disp = disp } };
- }
-
- pub fn isSegmentRegister(mem: Memory) bool {
- return switch (mem) {
- .moffs => true,
- .rip => false,
- .sib => |s| switch (s.base) {
- .none, .frame => false,
- .reg => |reg| reg.class() == .segment,
- },
- };
- }
-
- pub fn base(mem: Memory) Base {
- return switch (mem) {
- .moffs => |m| .{ .reg = m.seg },
- .sib => |s| s.base,
- .rip => .none,
- };
- }
-
- pub fn scaleIndex(mem: Memory) ?ScaleIndex {
- return switch (mem) {
- .moffs, .rip => null,
- .sib => |s| if (s.scale_index.scale > 0) s.scale_index else null,
- };
- }
-
- pub fn bitSize(mem: Memory) u64 {
- return switch (mem) {
- .rip => |r| r.ptr_size.bitSize(),
- .sib => |s| s.ptr_size.bitSize(),
- .moffs => 64,
- };
- }
+ pub const Scale = enum(u2) { @"1", @"2", @"4", @"8" };
};
pub const Immediate = union(enum) {
src/arch/x86_64/CodeGen.zig
@@ -115,6 +115,7 @@ const mir_to_air_map_init = if (builtin.mode == .Debug) std.AutoHashMapUnmanaged
const FrameAddr = struct { index: FrameIndex, off: i32 = 0 };
const RegisterOffset = struct { reg: Register, off: i32 = 0 };
+const SymbolOffset = struct { sym: u32, off: i32 = 0 };
const Owner = union(enum) {
func_index: InternPool.Index,
@@ -195,9 +196,9 @@ pub const MCValue = union(enum) {
memory: u64,
/// The value is in memory at an address not-yet-allocated by the linker.
/// This traditionally corresponds to a relocation emitted in a relocatable object file.
- load_symbol: u32,
+ load_symbol: SymbolOffset,
/// The address of the memory location not-yet-allocated by the linker.
- lea_symbol: u32,
+ lea_symbol: SymbolOffset,
/// The value is in memory at a constant offset from the address in a register.
indirect: RegisterOffset,
/// The value is in memory.
@@ -315,7 +316,7 @@ pub const MCValue = union(enum) {
.load_got => |sym_index| .{ .lea_got = sym_index },
.load_tlv => |sym_index| .{ .lea_tlv = sym_index },
.load_frame => |frame_addr| .{ .lea_frame = frame_addr },
- .load_symbol => |sym_index| .{ .lea_symbol = sym_index },
+ .load_symbol => |sym_off| .{ .lea_symbol = sym_off },
};
}
@@ -387,7 +388,7 @@ pub const MCValue = union(enum) {
};
}
- fn mem(mcv: MCValue, ptr_size: Memory.PtrSize) Memory {
+ fn mem(mcv: MCValue, size: Memory.Size) Memory {
return switch (mcv) {
.none,
.unreach,
@@ -411,18 +412,27 @@ pub const MCValue = union(enum) {
.load_symbol,
.lea_symbol,
=> unreachable,
- .memory => |addr| if (math.cast(i32, @as(i64, @bitCast(addr)))) |small_addr|
- Memory.sib(ptr_size, .{ .base = .{ .reg = .ds }, .disp = small_addr })
- else
- Memory.moffs(.ds, addr),
- .indirect => |reg_off| Memory.sib(ptr_size, .{
+ .memory => |addr| if (math.cast(i32, @as(i64, @bitCast(addr)))) |small_addr| .{
+ .base = .{ .reg = .ds },
+ .mod = .{ .rm = .{
+ .size = size,
+ .disp = small_addr,
+ } },
+ } else .{ .base = .{ .reg = .ds }, .mod = .{ .off = addr } },
+ .indirect => |reg_off| .{
.base = .{ .reg = reg_off.reg },
- .disp = reg_off.off,
- }),
- .load_frame => |frame_addr| Memory.sib(ptr_size, .{
+ .mod = .{ .rm = .{
+ .size = size,
+ .disp = reg_off.off,
+ } },
+ },
+ .load_frame => |frame_addr| .{
.base = .{ .frame = frame_addr.index },
- .disp = frame_addr.off,
- }),
+ .mod = .{ .rm = .{
+ .size = size,
+ .disp = frame_addr.off,
+ } },
+ },
};
}
@@ -442,6 +452,8 @@ pub const MCValue = union(enum) {
.register_overflow => |pl| try writer.print("{s}:{s}", .{
@tagName(pl.eflags), @tagName(pl.reg),
}),
+ .load_symbol => |pl| try writer.print("[{} + 0x{x}]", .{ pl.sym, pl.off }),
+ .lea_symbol => |pl| try writer.print("{} + 0x{x}", .{ pl.sym, pl.off }),
.indirect => |pl| try writer.print("[{s} + 0x{x}]", .{ @tagName(pl.reg), pl.off }),
.load_direct => |pl| try writer.print("[direct:{d}]", .{pl}),
.lea_direct => |pl| try writer.print("direct:{d}", .{pl}),
@@ -453,8 +465,6 @@ pub const MCValue = union(enum) {
.lea_frame => |pl| try writer.print("{} + 0x{x}", .{ pl.index, pl.off }),
.reserved_frame => |pl| try writer.print("(dead:{})", .{pl}),
.air_ref => |pl| try writer.print("(air:0x{x})", .{@intFromEnum(pl)}),
- .load_symbol => |pl| try writer.print("[symbol:{d}]", .{pl}),
- .lea_symbol => |pl| try writer.print("symbol:{d}", .{pl}),
}
}
};
@@ -888,7 +898,6 @@ pub fn generate(
.cc = cc,
.src_loc = src_loc,
},
- .bin_file = bin_file,
.debug_output = debug_output,
.code = code,
.prev_di_pc = 0,
@@ -976,7 +985,6 @@ pub fn generateLazy(
.cc = abi.resolveCallingConvention(.Unspecified, function.target.*),
.src_loc = src_loc,
},
- .bin_file = bin_file,
.debug_output = debug_output,
.code = code,
.prev_di_pc = undefined, // no debug info yet
@@ -1139,7 +1147,7 @@ fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 {
inline for (fields) |field| {
self.mir_extra.appendAssumeCapacity(switch (field.type) {
u32 => @field(extra, field.name),
- i32 => @bitCast(@field(extra, field.name)),
+ i32, Mir.Memory.Info => @bitCast(@field(extra, field.name)),
else => @compileError("bad field type: " ++ field.name ++ ": " ++ @typeName(field.type)),
});
}
@@ -1178,17 +1186,9 @@ fn asmCmovccRegisterMemory(self: *Self, cc: Condition, reg: Register, m: Memory)
.nz_or_p => .pseudo,
},
.ops = switch (cc) {
- else => switch (m) {
- .sib => .rm_sib,
- .rip => .rm_rip,
- else => unreachable,
- },
+ else => .rm,
.z_and_np => unreachable,
- .nz_or_p => switch (m) {
- .sib => .pseudo_cmov_nz_or_p_rm_sib,
- .rip => .pseudo_cmov_nz_or_p_rm_rip,
- else => unreachable,
- },
+ .nz_or_p => .pseudo_cmov_nz_or_p_rm,
},
.data = .{ .rx = .{
.fixes = switch (cc) {
@@ -1197,11 +1197,7 @@ fn asmCmovccRegisterMemory(self: *Self, cc: Condition, reg: Register, m: Memory)
.nz_or_p => ._,
},
.r1 = reg,
- .payload = switch (m) {
- .sib => try self.addExtra(Mir.MemorySib.encode(m)),
- .rip => try self.addExtra(Mir.MemoryRip.encode(m)),
- else => unreachable,
- },
+ .payload = try self.addExtra(Mir.Memory.encode(m)),
} },
});
}
@@ -1231,32 +1227,16 @@ fn asmSetccRegister(self: *Self, cc: Condition, reg: Register) !void {
}
fn asmSetccMemory(self: *Self, cc: Condition, m: Memory) !void {
- const payload = switch (m) {
- .sib => try self.addExtra(Mir.MemorySib.encode(m)),
- .rip => try self.addExtra(Mir.MemoryRip.encode(m)),
- else => unreachable,
- };
+ const payload = try self.addExtra(Mir.Memory.encode(m));
_ = try self.addInst(.{
.tag = switch (cc) {
else => .set,
.z_and_np, .nz_or_p => .pseudo,
},
.ops = switch (cc) {
- else => switch (m) {
- .sib => .m_sib,
- .rip => .m_rip,
- else => unreachable,
- },
- .z_and_np => switch (m) {
- .sib => .pseudo_set_z_and_np_m_sib,
- .rip => .pseudo_set_z_and_np_m_rip,
- else => unreachable,
- },
- .nz_or_p => switch (m) {
- .sib => .pseudo_set_nz_or_p_m_sib,
- .rip => .pseudo_set_nz_or_p_m_rip,
- else => unreachable,
- },
+ else => .m,
+ .z_and_np => .pseudo_set_z_and_np_m,
+ .nz_or_p => .pseudo_set_nz_or_p_m,
},
.data = switch (cc) {
else => .{ .x = .{
@@ -1504,20 +1484,12 @@ fn asmRegisterRegisterMemory(
) !void {
_ = try self.addInst(.{
.tag = tag[1],
- .ops = switch (m) {
- .sib => .rrm_sib,
- .rip => .rrm_rip,
- else => unreachable,
- },
+ .ops = .rrm,
.data = .{ .rrx = .{
.fixes = tag[0],
.r1 = reg1,
.r2 = reg2,
- .payload = switch (m) {
- .sib => try self.addExtra(Mir.MemorySib.encode(m)),
- .rip => try self.addExtra(Mir.MemoryRip.encode(m)),
- else => unreachable,
- },
+ .payload = try self.addExtra(Mir.Memory.encode(m)),
} },
});
}
@@ -1525,18 +1497,10 @@ fn asmRegisterRegisterMemory(
fn asmMemory(self: *Self, tag: Mir.Inst.FixedTag, m: Memory) !void {
_ = try self.addInst(.{
.tag = tag[1],
- .ops = switch (m) {
- .sib => .m_sib,
- .rip => .m_rip,
- else => unreachable,
- },
+ .ops = .m,
.data = .{ .x = .{
.fixes = tag[0],
- .payload = switch (m) {
- .sib => try self.addExtra(Mir.MemorySib.encode(m)),
- .rip => try self.addExtra(Mir.MemoryRip.encode(m)),
- else => unreachable,
- },
+ .payload = try self.addExtra(Mir.Memory.encode(m)),
} },
});
}
@@ -1544,19 +1508,11 @@ fn asmMemory(self: *Self, tag: Mir.Inst.FixedTag, m: Memory) !void {
fn asmRegisterMemory(self: *Self, tag: Mir.Inst.FixedTag, reg: Register, m: Memory) !void {
_ = try self.addInst(.{
.tag = tag[1],
- .ops = switch (m) {
- .sib => .rm_sib,
- .rip => .rm_rip,
- else => unreachable,
- },
+ .ops = .rm,
.data = .{ .rx = .{
.fixes = tag[0],
.r1 = reg,
- .payload = switch (m) {
- .sib => try self.addExtra(Mir.MemorySib.encode(m)),
- .rip => try self.addExtra(Mir.MemoryRip.encode(m)),
- else => unreachable,
- },
+ .payload = try self.addExtra(Mir.Memory.encode(m)),
} },
});
}
@@ -1574,20 +1530,12 @@ fn asmRegisterMemoryImmediate(
}) |small_imm| {
_ = try self.addInst(.{
.tag = tag[1],
- .ops = switch (m) {
- .sib => .rmi_sib,
- .rip => .rmi_rip,
- else => unreachable,
- },
+ .ops = .rmi,
.data = .{ .rix = .{
.fixes = tag[0],
.r1 = reg,
.i = small_imm,
- .payload = switch (m) {
- .sib => try self.addExtra(Mir.MemorySib.encode(m)),
- .rip => try self.addExtra(Mir.MemoryRip.encode(m)),
- else => unreachable,
- },
+ .payload = try self.addExtra(Mir.Memory.encode(m)),
} },
});
} else {
@@ -1595,23 +1543,12 @@ fn asmRegisterMemoryImmediate(
.signed => |s| @bitCast(s),
.unsigned => unreachable,
} });
- assert(payload + 1 == switch (m) {
- .sib => try self.addExtra(Mir.MemorySib.encode(m)),
- .rip => try self.addExtra(Mir.MemoryRip.encode(m)),
- else => unreachable,
- });
+ assert(payload + 1 == try self.addExtra(Mir.Memory.encode(m)));
_ = try self.addInst(.{
.tag = tag[1],
- .ops = switch (m) {
- .sib => switch (imm) {
- .signed => .rmi_sib_s,
- .unsigned => .rmi_sib_u,
- },
- .rip => switch (imm) {
- .signed => .rmi_rip_s,
- .unsigned => .rmi_rip_u,
- },
- else => unreachable,
+ .ops = switch (imm) {
+ .signed => .rmi_s,
+ .unsigned => .rmi_u,
},
.data = .{ .rx = .{
.fixes = tag[0],
@@ -1632,21 +1569,13 @@ fn asmRegisterRegisterMemoryImmediate(
) !void {
_ = try self.addInst(.{
.tag = tag[1],
- .ops = switch (m) {
- .sib => .rrmi_sib,
- .rip => .rrmi_rip,
- else => unreachable,
- },
+ .ops = .rrmi,
.data = .{ .rrix = .{
.fixes = tag[0],
.r1 = reg1,
.r2 = reg2,
.i = @intCast(imm.unsigned),
- .payload = switch (m) {
- .sib => try self.addExtra(Mir.MemorySib.encode(m)),
- .rip => try self.addExtra(Mir.MemoryRip.encode(m)),
- else => unreachable,
- },
+ .payload = try self.addExtra(Mir.Memory.encode(m)),
} },
});
}
@@ -1654,19 +1583,11 @@ fn asmRegisterRegisterMemoryImmediate(
fn asmMemoryRegister(self: *Self, tag: Mir.Inst.FixedTag, m: Memory, reg: Register) !void {
_ = try self.addInst(.{
.tag = tag[1],
- .ops = switch (m) {
- .sib => .mr_sib,
- .rip => .mr_rip,
- else => unreachable,
- },
+ .ops = .mr,
.data = .{ .rx = .{
.fixes = tag[0],
.r1 = reg,
- .payload = switch (m) {
- .sib => try self.addExtra(Mir.MemorySib.encode(m)),
- .rip => try self.addExtra(Mir.MemoryRip.encode(m)),
- else => unreachable,
- },
+ .payload = try self.addExtra(Mir.Memory.encode(m)),
} },
});
}
@@ -1676,23 +1597,12 @@ fn asmMemoryImmediate(self: *Self, tag: Mir.Inst.FixedTag, m: Memory, imm: Immed
.signed => |s| @bitCast(s),
.unsigned => |u| @intCast(u),
} });
- assert(payload + 1 == switch (m) {
- .sib => try self.addExtra(Mir.MemorySib.encode(m)),
- .rip => try self.addExtra(Mir.MemoryRip.encode(m)),
- else => unreachable,
- });
+ assert(payload + 1 == try self.addExtra(Mir.Memory.encode(m)));
_ = try self.addInst(.{
.tag = tag[1],
- .ops = switch (m) {
- .sib => switch (imm) {
- .signed => .mi_sib_s,
- .unsigned => .mi_sib_u,
- },
- .rip => switch (imm) {
- .signed => .mi_rip_s,
- .unsigned => .mi_rip_u,
- },
- else => unreachable,
+ .ops = switch (imm) {
+ .signed => .mi_s,
+ .unsigned => .mi_u,
},
.data = .{ .x = .{
.fixes = tag[0],
@@ -1710,20 +1620,12 @@ fn asmMemoryRegisterRegister(
) !void {
_ = try self.addInst(.{
.tag = tag[1],
- .ops = switch (m) {
- .sib => .mrr_sib,
- .rip => .mrr_rip,
- else => unreachable,
- },
+ .ops = .mrr,
.data = .{ .rrx = .{
.fixes = tag[0],
.r1 = reg1,
.r2 = reg2,
- .payload = switch (m) {
- .sib => try self.addExtra(Mir.MemorySib.encode(m)),
- .rip => try self.addExtra(Mir.MemoryRip.encode(m)),
- else => unreachable,
- },
+ .payload = try self.addExtra(Mir.Memory.encode(m)),
} },
});
}
@@ -1737,20 +1639,12 @@ fn asmMemoryRegisterImmediate(
) !void {
_ = try self.addInst(.{
.tag = tag[1],
- .ops = switch (m) {
- .sib => .mri_sib,
- .rip => .mri_rip,
- else => unreachable,
- },
+ .ops = .mri,
.data = .{ .rix = .{
.fixes = tag[0],
.r1 = reg,
.i = @intCast(imm.unsigned),
- .payload = switch (m) {
- .sib => try self.addExtra(Mir.MemorySib.encode(m)),
- .rip => try self.addExtra(Mir.MemoryRip.encode(m)),
- else => unreachable,
- },
+ .payload = try self.addExtra(Mir.Memory.encode(m)),
} },
});
}
@@ -1916,13 +1810,16 @@ fn gen(self: *Self) InnerError!void {
if (need_frame_align or need_stack_adjust) {
self.mir_instructions.set(backpatch_stack_dealloc, .{
.tag = .lea,
- .ops = .rm_sib,
+ .ops = .rm,
.data = .{ .rx = .{
.r1 = .rsp,
- .payload = try self.addExtra(Mir.MemorySib.encode(Memory.sib(.qword, .{
+ .payload = try self.addExtra(Mir.Memory.encode(.{
.base = .{ .reg = .rbp },
- .disp = -frame_layout.save_reg_list.size(),
- }))),
+ .mod = .{ .rm = .{
+ .size = .qword,
+ .disp = -frame_layout.save_reg_list.size(),
+ } },
+ })),
} },
});
}
@@ -3098,14 +2995,14 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void {
.{ .vp_, .@"and" },
dst_reg,
dst_reg,
- splat_addr_mcv.deref().mem(Memory.PtrSize.fromSize(splat_abi_size)),
+ splat_addr_mcv.deref().mem(Memory.Size.fromSize(splat_abi_size)),
);
try self.asmRegisterRegisterRegister(mir_tag, dst_reg, dst_reg, dst_reg);
} else {
try self.asmRegisterMemory(
.{ .p_, .@"and" },
dst_reg,
- splat_addr_mcv.deref().mem(Memory.PtrSize.fromSize(splat_abi_size)),
+ splat_addr_mcv.deref().mem(Memory.Size.fromSize(splat_abi_size)),
);
try self.asmRegisterRegister(mir_tag, dst_reg, dst_reg);
}
@@ -3254,7 +3151,7 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void {
const frame_index = try self.allocFrameIndex(FrameAlloc.initType(Type.usize, mod));
try self.asmMemoryImmediate(
.{ ._, .mov },
- Memory.sib(.qword, .{ .base = .{ .frame = frame_index } }),
+ .{ .base = .{ .frame = frame_index }, .mod = .{ .rm = .{ .size = .qword } } },
Immediate.u(0),
);
@@ -3338,9 +3235,10 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void {
call_mcv.register_pair[0],
call_mcv.register_pair[1],
);
- try self.asmSetccMemory(.nz, Memory.sib(.byte, .{
+ try self.asmSetccMemory(.nz, .{
.base = .{ .frame = signed_div_floor_state.frame_index },
- }));
+ .mod = .{ .rm = .{ .size = .byte } },
+ });
try self.performReloc(signed_div_floor_state.reloc);
const dst_mcv = try self.genCall(
.{ .lib = .{
@@ -3356,9 +3254,10 @@ fn airMulDivBinOp(self: *Self, inst: Air.Inst.Index) !void {
try self.asmRegisterMemory(
.{ ._, .sub },
dst_mcv.register_pair[0],
- Memory.sib(.qword, .{
+ .{
.base = .{ .frame = signed_div_floor_state.frame_index },
- }),
+ .mod = .{ .rm = .{ .size = .qword } },
+ },
);
try self.asmRegisterImmediate(
.{ ._, .sbb },
@@ -3891,7 +3790,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void {
);
try self.asmMemoryImmediate(
.{ ._, .cmp },
- overflow.mem(self.memPtrSize(Type.c_int)),
+ overflow.mem(self.memSize(Type.c_int)),
Immediate.s(0),
);
try self.genSetMem(
@@ -4123,7 +4022,7 @@ fn genIntMulDivOpMir(self: *Self, tag: Mir.Inst.FixedTag, ty: Type, lhs: MCValue
.register => |reg| try self.asmRegister(tag, registerAlias(reg, abi_size)),
.memory, .indirect, .load_frame => try self.asmMemory(
tag,
- mat_rhs.mem(Memory.PtrSize.fromSize(abi_size)),
+ mat_rhs.mem(Memory.Size.fromSize(abi_size)),
),
else => unreachable,
}
@@ -4360,10 +4259,13 @@ fn airUnwrapErrUnionErrPtr(self: *Self, inst: Air.Inst.Index) !void {
try self.asmRegisterMemory(
.{ ._, .mov },
registerAlias(dst_reg, err_abi_size),
- Memory.sib(Memory.PtrSize.fromSize(err_abi_size), .{
+ .{
.base = .{ .reg = src_reg },
- .disp = err_off,
- }),
+ .mod = .{ .rm = .{
+ .size = Memory.Size.fromSize(err_abi_size),
+ .disp = err_off,
+ } },
+ },
);
return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
@@ -4398,10 +4300,13 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
const err_abi_size: u32 = @intCast(err_ty.abiSize(mod));
try self.asmMemoryImmediate(
.{ ._, .mov },
- Memory.sib(Memory.PtrSize.fromSize(err_abi_size), .{
+ .{
.base = .{ .reg = src_reg },
- .disp = err_off,
- }),
+ .mod = .{ .rm = .{
+ .size = Memory.Size.fromSize(err_abi_size),
+ .disp = err_off,
+ } },
+ },
Immediate.u(0),
);
@@ -4420,7 +4325,10 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
try self.asmRegisterMemory(
.{ ._, .lea },
registerAlias(dst_reg, dst_abi_size),
- Memory.sib(.qword, .{ .base = .{ .reg = src_reg }, .disp = pl_off }),
+ .{
+ .base = .{ .reg = src_reg },
+ .mod = .{ .rm = .{ .size = .qword, .disp = pl_off } },
+ },
);
break :result .{ .register = dst_reg };
};
@@ -4542,10 +4450,13 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
.load_frame => |frame_addr| try self.asmMemoryImmediate(
.{ ._, .mov },
- Memory.sib(.byte, .{
+ .{
.base = .{ .frame = frame_addr.index },
- .disp = frame_addr.off + pl_abi_size,
- }),
+ .mod = .{ .rm = .{
+ .size = .byte,
+ .disp = frame_addr.off + pl_abi_size,
+ } },
+ },
Immediate.u(1),
),
}
@@ -4658,10 +4569,10 @@ fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void {
try self.asmRegisterMemory(
.{ ._, .lea },
registerAlias(dst_reg, dst_abi_size),
- Memory.sib(.qword, .{
+ .{
.base = .{ .reg = src_reg },
- .disp = @divExact(self.target.ptrBitWidth(), 8),
- }),
+ .mod = .{ .rm = .{ .size = .qword, .disp = 8 } },
+ },
);
return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
@@ -4791,13 +4702,16 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
try self.asmRegisterMemory(
.{ ._, .lea },
addr_reg,
- Memory.sib(.qword, .{ .base = .{ .frame = frame_index } }),
+ .{ .base = .{ .frame = frame_index }, .mod = .{ .rm = .{ .size = .qword } } },
);
},
.load_frame => |frame_addr| try self.asmRegisterMemory(
.{ ._, .lea },
addr_reg,
- Memory.sib(.qword, .{ .base = .{ .frame = frame_addr.index }, .disp = frame_addr.off }),
+ .{
+ .base = .{ .frame = frame_addr.index },
+ .mod = .{ .rm = .{ .size = .qword, .disp = frame_addr.off } },
+ },
),
.memory,
.load_symbol,
@@ -5610,10 +5524,14 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void {
try self.asmRegisterMemory(
.{ ._, .lea },
if (limb_abi_size > 4) tmp.to64() else tmp.to32(),
- Memory.sib(.qword, .{
+ .{
.base = .{ .reg = dst.to64() },
- .scale_index = .{ .index = tmp.to64(), .scale = 1 << 2 },
- }),
+ .mod = .{ .rm = .{
+ .size = .qword,
+ .index = tmp.to64(),
+ .scale = .@"4",
+ } },
+ },
);
// tmp = temp3 = ((temp2 >> 2) & 0x33...33) + ((temp2 & 0x33...33) << 2)
try self.asmRegisterRegister(.{ ._, .mov }, dst, tmp);
@@ -5633,10 +5551,14 @@ fn airBitReverse(self: *Self, inst: Air.Inst.Index) !void {
try self.asmRegisterMemory(
.{ ._, .lea },
if (limb_abi_size > 4) dst.to64() else dst.to32(),
- Memory.sib(.qword, .{
+ .{
.base = .{ .reg = tmp.to64() },
- .scale_index = .{ .index = dst.to64(), .scale = 1 << 1 },
- }),
+ .mod = .{ .rm = .{
+ .size = .qword,
+ .index = dst.to64(),
+ .scale = .@"2",
+ } },
+ },
);
// dst = ((temp3 >> 1) & 0x55...55) + ((temp3 & 0x55...55) << 1)
}
@@ -5713,12 +5635,13 @@ fn floatSign(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, ty: Type)
.abs => try vec_ty.maxInt(mod, vec_ty),
else => unreachable,
} });
- const sign_mem = if (sign_mcv.isMemory())
- sign_mcv.mem(Memory.PtrSize.fromSize(abi_size))
+ const sign_mem: Memory = if (sign_mcv.isMemory())
+ sign_mcv.mem(Memory.Size.fromSize(abi_size))
else
- Memory.sib(Memory.PtrSize.fromSize(abi_size), .{
+ .{
.base = .{ .reg = try self.copyToTmpRegister(Type.usize, sign_mcv.address()) },
- });
+ .mod = .{ .rm = .{ .size = Memory.Size.fromSize(abi_size) } },
+ };
if (self.hasFeature(.avx)) try self.asmRegisterRegisterMemory(
switch (scalar_bits) {
@@ -5896,7 +5819,7 @@ fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: Ro
mir_tag,
dst_alias,
dst_alias,
- src_mcv.mem(Memory.PtrSize.fromSize(abi_size)),
+ src_mcv.mem(Memory.Size.fromSize(abi_size)),
Immediate.u(@as(u5, @bitCast(mode))),
) else try self.asmRegisterRegisterRegisterImmediate(
mir_tag,
@@ -5911,7 +5834,7 @@ fn genRound(self: *Self, ty: Type, dst_reg: Register, src_mcv: MCValue, mode: Ro
else => if (src_mcv.isMemory()) try self.asmRegisterMemoryImmediate(
mir_tag,
dst_alias,
- src_mcv.mem(Memory.PtrSize.fromSize(abi_size)),
+ src_mcv.mem(Memory.Size.fromSize(abi_size)),
Immediate.u(@as(u5, @bitCast(mode))),
) else try self.asmRegisterRegisterImmediate(
mir_tag,
@@ -5951,7 +5874,7 @@ fn airAbs(self: *Self, inst: Air.Inst.Index) !void {
.memory, .indirect, .load_frame => try self.asmCmovccRegisterMemory(
.l,
registerAlias(dst_mcv.register, cmov_abi_size),
- src_mcv.mem(Memory.PtrSize.fromSize(cmov_abi_size)),
+ src_mcv.mem(Memory.Size.fromSize(cmov_abi_size)),
),
else => {
const val_reg = try self.copyToTmpRegister(ty, src_mcv);
@@ -6051,7 +5974,7 @@ fn airAbs(self: *Self, inst: Air.Inst.Index) !void {
if (src_mcv.isMemory()) try self.asmRegisterMemory(
mir_tag,
dst_alias,
- src_mcv.mem(self.memPtrSize(ty)),
+ src_mcv.mem(self.memSize(ty)),
) else try self.asmRegisterRegister(
mir_tag,
dst_alias,
@@ -6157,7 +6080,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void {
if (src_mcv.isMemory()) try self.asmRegisterMemory(
.{ .v_ps, .cvtph2 },
wide_reg,
- src_mcv.mem(Memory.PtrSize.fromSize(
+ src_mcv.mem(Memory.Size.fromSize(
@intCast(@divExact(wide_reg.bitSize(), 16)),
)),
) else try self.asmRegisterRegister(
@@ -6205,7 +6128,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void {
mir_tag,
dst_reg,
dst_reg,
- src_mcv.mem(Memory.PtrSize.fromSize(abi_size)),
+ src_mcv.mem(Memory.Size.fromSize(abi_size)),
) else try self.asmRegisterRegisterRegister(
mir_tag,
dst_reg,
@@ -6218,7 +6141,7 @@ fn airSqrt(self: *Self, inst: Air.Inst.Index) !void {
else => if (src_mcv.isMemory()) try self.asmRegisterMemory(
mir_tag,
dst_reg,
- src_mcv.mem(Memory.PtrSize.fromSize(abi_size)),
+ src_mcv.mem(Memory.Size.fromSize(abi_size)),
) else try self.asmRegisterRegister(
mir_tag,
dst_reg,
@@ -6337,14 +6260,13 @@ fn packedLoad(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) Inn
if (val_bit_off < val_extra_bits) val_abi_size else val_abi_size * 2;
if (load_abi_size <= 8) {
const load_reg = registerAlias(dst_reg, load_abi_size);
- try self.asmRegisterMemory(
- .{ ._, .mov },
- load_reg,
- Memory.sib(Memory.PtrSize.fromSize(load_abi_size), .{
- .base = .{ .reg = ptr_reg },
+ try self.asmRegisterMemory(.{ ._, .mov }, load_reg, .{
+ .base = .{ .reg = ptr_reg },
+ .mod = .{ .rm = .{
+ .size = Memory.Size.fromSize(load_abi_size),
.disp = val_byte_off,
- }),
- );
+ } },
+ });
try self.asmRegisterImmediate(.{ ._r, .sh }, load_reg, Immediate.u(val_bit_off));
} else {
const tmp_reg =
@@ -6353,22 +6275,20 @@ fn packedLoad(self: *Self, dst_mcv: MCValue, ptr_ty: Type, ptr_mcv: MCValue) Inn
defer self.register_manager.unlockReg(tmp_lock);
const dst_alias = registerAlias(dst_reg, val_abi_size);
- try self.asmRegisterMemory(
- .{ ._, .mov },
- dst_alias,
- Memory.sib(Memory.PtrSize.fromSize(val_abi_size), .{
- .base = .{ .reg = ptr_reg },
+ try self.asmRegisterMemory(.{ ._, .mov }, dst_alias, .{
+ .base = .{ .reg = ptr_reg },
+ .mod = .{ .rm = .{
+ .size = Memory.Size.fromSize(val_abi_size),
.disp = val_byte_off,
- }),
- );
- try self.asmRegisterMemory(
- .{ ._, .mov },
- tmp_reg,
- Memory.sib(Memory.PtrSize.fromSize(val_abi_size), .{
- .base = .{ .reg = ptr_reg },
+ } },
+ });
+ try self.asmRegisterMemory(.{ ._, .mov }, tmp_reg, .{
+ .base = .{ .reg = ptr_reg },
+ .mod = .{ .rm = .{
+ .size = Memory.Size.fromSize(val_abi_size),
.disp = val_byte_off + 1,
- }),
- );
+ } },
+ });
try self.asmRegisterRegisterImmediate(
.{ ._rd, .sh },
dst_alias,
@@ -6480,10 +6400,13 @@ fn packedStore(self: *Self, ptr_ty: Type, ptr_mcv: MCValue, src_mcv: MCValue) In
const part_bit_off = if (limb_i == 0) src_bit_off else 0;
const part_bit_size =
@min(src_bit_off + src_bit_size - limb_i * limb_abi_bits, limb_abi_bits) - part_bit_off;
- const limb_mem = Memory.sib(Memory.PtrSize.fromSize(limb_abi_size), .{
+ const limb_mem: Memory = .{
.base = .{ .reg = ptr_reg },
- .disp = src_byte_off + limb_i * limb_abi_bits,
- });
+ .mod = .{ .rm = .{
+ .size = Memory.Size.fromSize(limb_abi_size),
+ .disp = src_byte_off + limb_i * limb_abi_bits,
+ } },
+ };
const part_mask = (@as(u64, math.maxInt(u64)) >> @intCast(64 - part_bit_size)) <<
@intCast(part_bit_off);
@@ -6814,7 +6737,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
break :result dst_mcv;
}
- const limb_abi_size: u32 = @min(field_abi_size, 8);
+ const limb_abi_size: u31 = @min(field_abi_size, 8);
const limb_abi_bits = limb_abi_size * 8;
const field_byte_off: i32 = @intCast(field_off / limb_abi_bits * limb_abi_size);
const field_bit_off = field_off % limb_abi_bits;
@@ -6832,14 +6755,13 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
if (field_bit_off < field_extra_bits) field_abi_size else field_abi_size * 2;
if (load_abi_size <= 8) {
const load_reg = registerAlias(dst_reg, load_abi_size);
- try self.asmRegisterMemory(
- .{ ._, .mov },
- load_reg,
- Memory.sib(Memory.PtrSize.fromSize(load_abi_size), .{
- .base = .{ .frame = frame_addr.index },
+ try self.asmRegisterMemory(.{ ._, .mov }, load_reg, .{
+ .base = .{ .frame = frame_addr.index },
+ .mod = .{ .rm = .{
+ .size = Memory.Size.fromSize(load_abi_size),
.disp = frame_addr.off + field_byte_off,
- }),
- );
+ } },
+ });
try self.asmRegisterImmediate(.{ ._r, .sh }, load_reg, Immediate.u(field_bit_off));
} else {
const tmp_reg = registerAlias(
@@ -6853,19 +6775,21 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
try self.asmRegisterMemory(
.{ ._, .mov },
dst_alias,
- Memory.sib(Memory.PtrSize.fromSize(field_abi_size), .{
+ .{
.base = .{ .frame = frame_addr.index },
- .disp = frame_addr.off + field_byte_off,
- }),
- );
- try self.asmRegisterMemory(
- .{ ._, .mov },
- tmp_reg,
- Memory.sib(Memory.PtrSize.fromSize(field_abi_size), .{
- .base = .{ .frame = frame_addr.index },
- .disp = frame_addr.off + field_byte_off + @as(i32, @intCast(limb_abi_size)),
- }),
+ .mod = .{ .rm = .{
+ .size = Memory.Size.fromSize(field_abi_size),
+ .disp = frame_addr.off + field_byte_off,
+ } },
+ },
);
+ try self.asmRegisterMemory(.{ ._, .mov }, tmp_reg, .{
+ .base = .{ .frame = frame_addr.index },
+ .mod = .{ .rm = .{
+ .size = Memory.Size.fromSize(field_abi_size),
+ .disp = frame_addr.off + field_byte_off + limb_abi_size,
+ } },
+ });
try self.asmRegisterRegisterImmediate(
.{ ._rd, .sh },
dst_alias,
@@ -6998,14 +6922,13 @@ fn genUnOpMir(self: *Self, mir_tag: Mir.Inst.FixedTag, dst_ty: Type, dst_mcv: MC
defer self.register_manager.unlockReg(addr_reg_lock);
try self.genSetReg(addr_reg, Type.usize, dst_mcv.address());
- try self.asmMemory(
- mir_tag,
- Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = .{ .reg = addr_reg } }),
- );
+ try self.asmMemory(mir_tag, .{ .base = .{ .reg = addr_reg }, .mod = .{ .rm = .{
+ .size = Memory.Size.fromSize(abi_size),
+ } } });
},
.indirect, .load_frame => try self.asmMemory(
mir_tag,
- dst_mcv.mem(Memory.PtrSize.fromSize(abi_size)),
+ dst_mcv.mem(Memory.Size.fromSize(abi_size)),
),
}
}
@@ -7053,25 +6976,34 @@ fn genShiftBinOpMir(
}),
},
.memory, .indirect, .load_frame => {
- const lhs_mem = Memory.sib(Memory.PtrSize.fromSize(abi_size), switch (lhs_mcv) {
+ const lhs_mem: Memory = switch (lhs_mcv) {
.memory => |addr| .{
.base = .{ .reg = .ds },
- .disp = math.cast(i32, @as(i64, @bitCast(addr))) orelse
- return self.fail("TODO genShiftBinOpMir between {s} and {s}", .{
- @tagName(lhs_mcv),
- @tagName(rhs_mcv),
- }),
+ .mod = .{ .rm = .{
+ .size = Memory.Size.fromSize(abi_size),
+ .disp = math.cast(i32, @as(i64, @bitCast(addr))) orelse
+ return self.fail("TODO genShiftBinOpMir between {s} and {s}", .{
+ @tagName(lhs_mcv),
+ @tagName(rhs_mcv),
+ }),
+ } },
},
.indirect => |reg_off| .{
.base = .{ .reg = reg_off.reg },
- .disp = reg_off.off,
+ .mod = .{ .rm = .{
+ .size = Memory.Size.fromSize(abi_size),
+ .disp = reg_off.off,
+ } },
},
.load_frame => |frame_addr| .{
.base = .{ .frame = frame_addr.index },
- .disp = frame_addr.off,
+ .mod = .{ .rm = .{
+ .size = Memory.Size.fromSize(abi_size),
+ .disp = frame_addr.off,
+ } },
},
else => unreachable,
- });
+ };
switch (rhs_mcv) {
.immediate => |rhs_imm| try self.asmMemoryImmediate(
tag,
@@ -7187,26 +7119,35 @@ fn genShiftBinOpMir(
try self.asmRegisterMemory(
.{ ._, .mov },
tmp_reg,
- Memory.sib(.qword, .{
+ .{
.base = .{ .frame = dst_frame_addr.index },
- .disp = dst_frame_addr.off + info.indices[0] * 8,
- }),
+ .mod = .{ .rm = .{
+ .size = .qword,
+ .disp = dst_frame_addr.off + info.indices[0] * 8,
+ } },
+ },
);
try self.asmMemoryRegisterImmediate(
info.double_tag,
- Memory.sib(.qword, .{
+ .{
.base = .{ .frame = dst_frame_addr.index },
- .disp = dst_frame_addr.off + info.indices[1] * 8,
- }),
+ .mod = .{ .rm = .{
+ .size = .qword,
+ .disp = dst_frame_addr.off + info.indices[1] * 8,
+ } },
+ },
tmp_reg,
Immediate.u(rhs_imm),
);
try self.asmMemoryImmediate(
tag,
- Memory.sib(.qword, .{
+ .{
.base = .{ .frame = dst_frame_addr.index },
- .disp = dst_frame_addr.off + info.indices[0] * 8,
- }),
+ .mod = .{ .rm = .{
+ .size = .qword,
+ .disp = dst_frame_addr.off + info.indices[0] * 8,
+ } },
+ },
Immediate.u(rhs_imm),
);
} else {
@@ -7214,10 +7155,13 @@ fn genShiftBinOpMir(
try self.asmRegisterMemory(
.{ ._, .mov },
tmp_reg,
- Memory.sib(.qword, .{
+ .{
.base = .{ .frame = dst_frame_addr.index },
- .disp = dst_frame_addr.off + info.indices[0] * 8,
- }),
+ .mod = .{ .rm = .{
+ .size = .qword,
+ .disp = dst_frame_addr.off + info.indices[0] * 8,
+ } },
+ },
);
if (rhs_imm > 64) try self.asmRegisterImmediate(
tag,
@@ -7226,27 +7170,36 @@ fn genShiftBinOpMir(
);
try self.asmMemoryRegister(
.{ ._, .mov },
- Memory.sib(.qword, .{
+ .{
.base = .{ .frame = dst_frame_addr.index },
- .disp = dst_frame_addr.off + info.indices[1] * 8,
- }),
+ .mod = .{ .rm = .{
+ .size = .qword,
+ .disp = dst_frame_addr.off + info.indices[1] * 8,
+ } },
+ },
tmp_reg,
);
if (tag[0] == ._r and tag[1] == .sa) try self.asmMemoryImmediate(
tag,
- Memory.sib(.qword, .{
+ .{
.base = .{ .frame = dst_frame_addr.index },
- .disp = dst_frame_addr.off + info.indices[0] * 8,
- }),
+ .mod = .{ .rm = .{
+ .size = .qword,
+ .disp = dst_frame_addr.off + info.indices[0] * 8,
+ } },
+ },
Immediate.u(63),
) else {
try self.asmRegisterRegister(.{ ._, .xor }, tmp_reg.to32(), tmp_reg.to32());
try self.asmMemoryRegister(
.{ ._, .mov },
- Memory.sib(.qword, .{
+ .{
.base = .{ .frame = dst_frame_addr.index },
- .disp = dst_frame_addr.off + info.indices[0] * 8,
- }),
+ .mod = .{ .rm = .{
+ .size = .qword,
+ .disp = dst_frame_addr.off + info.indices[0] * 8,
+ } },
+ },
tmp_reg,
);
}
@@ -7265,18 +7218,24 @@ fn genShiftBinOpMir(
try self.asmRegisterMemory(
.{ ._, .mov },
first_reg,
- Memory.sib(.qword, .{
+ .{
.base = .{ .frame = dst_frame_addr.index },
- .disp = dst_frame_addr.off + info.indices[0] * 8,
- }),
+ .mod = .{ .rm = .{
+ .size = .qword,
+ .disp = dst_frame_addr.off + info.indices[0] * 8,
+ } },
+ },
);
try self.asmRegisterMemory(
.{ ._, .mov },
second_reg,
- Memory.sib(.qword, .{
+ .{
.base = .{ .frame = dst_frame_addr.index },
- .disp = dst_frame_addr.off + info.indices[1] * 8,
- }),
+ .mod = .{ .rm = .{
+ .size = .qword,
+ .disp = dst_frame_addr.off + info.indices[1] * 8,
+ } },
+ },
);
if (tag[0] == ._r and tag[1] == .sa) {
try self.asmRegisterRegister(.{ ._, .mov }, tmp_reg, first_reg);
@@ -7302,18 +7261,24 @@ fn genShiftBinOpMir(
try self.asmCmovccRegisterRegister(.ae, first_reg, tmp_reg);
try self.asmMemoryRegister(
.{ ._, .mov },
- Memory.sib(.qword, .{
+ .{
.base = .{ .frame = dst_frame_addr.index },
- .disp = dst_frame_addr.off + info.indices[1] * 8,
- }),
+ .mod = .{ .rm = .{
+ .size = .qword,
+ .disp = dst_frame_addr.off + info.indices[1] * 8,
+ } },
+ },
second_reg,
);
try self.asmMemoryRegister(
.{ ._, .mov },
- Memory.sib(.qword, .{
+ .{
.base = .{ .frame = dst_frame_addr.index },
- .disp = dst_frame_addr.off + info.indices[0] * 8,
- }),
+ .mod = .{ .rm = .{
+ .size = .qword,
+ .disp = dst_frame_addr.off + info.indices[0] * 8,
+ } },
+ },
first_reg,
);
},
@@ -7541,22 +7506,20 @@ fn genMulDivBinOp(
}, dst_abi_size) };
const dst_mcv = try self.allocRegOrMemAdvanced(dst_ty, maybe_inst, false);
- try self.asmMemoryRegister(
- .{ ._, .mov },
- Memory.sib(.qword, .{
- .base = .{ .frame = dst_mcv.load_frame.index },
+ try self.asmMemoryRegister(.{ ._, .mov }, .{
+ .base = .{ .frame = dst_mcv.load_frame.index },
+ .mod = .{ .rm = .{
+ .size = .qword,
.disp = dst_mcv.load_frame.off,
- }),
- .rax,
- );
- try self.asmMemoryRegister(
- .{ ._, .mov },
- Memory.sib(.qword, .{
- .base = .{ .frame = dst_mcv.load_frame.index },
+ } },
+ }, .rax);
+ try self.asmMemoryRegister(.{ ._, .mov }, .{
+ .base = .{ .frame = dst_mcv.load_frame.index },
+ .mod = .{ .rm = .{
+ .size = .qword,
.disp = dst_mcv.load_frame.off + 8,
- }),
- .rdx,
- );
+ } },
+ }, .rdx);
return dst_mcv;
},
@@ -7768,7 +7731,7 @@ fn genBinOp(
mir_tag,
dst_reg,
dst_reg,
- src_mcv.mem(Memory.PtrSize.fromBitSize(float_bits)),
+ src_mcv.mem(Memory.Size.fromBitSize(float_bits)),
) else try self.asmRegisterRegisterRegister(
mir_tag,
dst_reg,
@@ -7787,7 +7750,7 @@ fn genBinOp(
if (src_mcv.isMemory()) try self.asmRegisterMemory(
mir_tag,
dst_reg,
- src_mcv.mem(Memory.PtrSize.fromBitSize(float_bits)),
+ src_mcv.mem(Memory.Size.fromBitSize(float_bits)),
) else try self.asmRegisterRegister(
mir_tag,
dst_reg,
@@ -8149,21 +8112,30 @@ fn genBinOp(
.memory, .indirect, .load_frame => try self.asmCmovccRegisterMemory(
cc,
registerAlias(tmp_reg, cmov_abi_size),
- Memory.sib(Memory.PtrSize.fromSize(cmov_abi_size), switch (mat_src_mcv) {
+ switch (mat_src_mcv) {
.memory => |addr| .{
.base = .{ .reg = .ds },
- .disp = @intCast(@as(i64, @bitCast(addr))),
+ .mod = .{ .rm = .{
+ .size = Memory.Size.fromSize(cmov_abi_size),
+ .disp = @intCast(@as(i64, @bitCast(addr))),
+ } },
},
.indirect => |reg_off| .{
.base = .{ .reg = reg_off.reg },
- .disp = reg_off.off,
+ .mod = .{ .rm = .{
+ .size = Memory.Size.fromSize(cmov_abi_size),
+ .disp = reg_off.off,
+ } },
},
.load_frame => |frame_addr| .{
.base = .{ .frame = frame_addr.index },
- .disp = frame_addr.off,
+ .mod = .{ .rm = .{
+ .size = Memory.Size.fromSize(cmov_abi_size),
+ .disp = frame_addr.off,
+ } },
},
else => unreachable,
- }),
+ },
),
}
try self.genCopy(lhs_ty, dst_mcv, .{ .register = tmp_reg });
@@ -8912,8 +8884,8 @@ fn genBinOp(
dst_reg,
lhs_reg,
src_mcv.mem(switch (lhs_ty.zigTypeTag(mod)) {
- else => Memory.PtrSize.fromSize(abi_size),
- .Vector => Memory.PtrSize.fromBitSize(dst_reg.bitSize()),
+ else => Memory.Size.fromSize(abi_size),
+ .Vector => Memory.Size.fromBitSize(dst_reg.bitSize()),
}),
) else try self.asmRegisterRegisterRegister(
mir_tag,
@@ -8930,8 +8902,8 @@ fn genBinOp(
mir_tag,
dst_reg,
src_mcv.mem(switch (lhs_ty.zigTypeTag(mod)) {
- else => Memory.PtrSize.fromSize(abi_size),
- .Vector => Memory.PtrSize.fromBitSize(dst_reg.bitSize()),
+ else => Memory.Size.fromSize(abi_size),
+ .Vector => Memory.Size.fromBitSize(dst_reg.bitSize()),
}),
) else try self.asmRegisterRegister(
mir_tag,
@@ -9202,12 +9174,12 @@ fn genBinOp(
.ty = lhs_ty,
.val = try unsigned_ty.maxInt(mod, unsigned_ty),
});
- const not_mem = if (not_mcv.isMemory())
- not_mcv.mem(Memory.PtrSize.fromSize(abi_size))
+ const not_mem: Memory = if (not_mcv.isMemory())
+ not_mcv.mem(Memory.Size.fromSize(abi_size))
else
- Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = .{
+ .{ .base = .{
.reg = try self.copyToTmpRegister(Type.usize, not_mcv.address()),
- } });
+ }, .mod = .{ .rm = .{ .size = Memory.Size.fromSize(abi_size) } } };
switch (mir_tag[0]) {
.vp_b, .vp_d, .vp_q, .vp_w => try self.asmRegisterRegisterMemory(
.{ .vp_, .xor },
@@ -9359,25 +9331,30 @@ fn genBinOpMir(
.lea_frame,
=> {
blk: {
- return self.asmRegisterMemory(
- mir_limb_tag,
- dst_alias,
- Memory.sib(Memory.PtrSize.fromSize(limb_abi_size), switch (src_mcv) {
- .memory => |addr| .{
- .base = .{ .reg = .ds },
+ return self.asmRegisterMemory(mir_limb_tag, dst_alias, switch (src_mcv) {
+ .memory => |addr| .{
+ .base = .{ .reg = .ds },
+ .mod = .{ .rm = .{
+ .size = Memory.Size.fromSize(limb_abi_size),
.disp = math.cast(i32, addr + off) orelse break :blk,
- },
- .indirect => |reg_off| .{
- .base = .{ .reg = reg_off.reg },
+ } },
+ },
+ .indirect => |reg_off| .{
+ .base = .{ .reg = reg_off.reg },
+ .mod = .{ .rm = .{
+ .size = Memory.Size.fromSize(limb_abi_size),
.disp = reg_off.off + off,
- },
- .load_frame => |frame_addr| .{
- .base = .{ .frame = frame_addr.index },
+ } },
+ },
+ .load_frame => |frame_addr| .{
+ .base = .{ .frame = frame_addr.index },
+ .mod = .{ .rm = .{
+ .size = Memory.Size.fromSize(limb_abi_size),
.disp = frame_addr.off + off,
- },
- else => break :blk,
- }),
- );
+ } },
+ },
+ else => break :blk,
+ });
}
switch (src_mcv) {
@@ -9510,26 +9487,35 @@ fn genBinOpMir(
}),
},
};
- const dst_limb_mem = Memory.sib(
- Memory.PtrSize.fromSize(limb_abi_size),
- switch (dst_mcv) {
- .memory,
- .load_symbol,
- .load_got,
- .load_direct,
- .load_tlv,
- => .{ .base = .{ .reg = dst_info.?.addr_reg }, .disp = off },
- .indirect => |reg_off| .{
- .base = .{ .reg = reg_off.reg },
+ const dst_limb_mem: Memory = switch (dst_mcv) {
+ .memory,
+ .load_symbol,
+ .load_got,
+ .load_direct,
+ .load_tlv,
+ => .{
+ .base = .{ .reg = dst_info.?.addr_reg },
+ .mod = .{ .rm = .{
+ .size = Memory.Size.fromSize(limb_abi_size),
+ .disp = off,
+ } },
+ },
+ .indirect => |reg_off| .{
+ .base = .{ .reg = reg_off.reg },
+ .mod = .{ .rm = .{
+ .size = Memory.Size.fromSize(limb_abi_size),
.disp = reg_off.off + off,
- },
- .load_frame => |frame_addr| .{
- .base = .{ .frame = frame_addr.index },
+ } },
+ },
+ .load_frame => |frame_addr| .{
+ .base = .{ .frame = frame_addr.index },
+ .mod = .{ .rm = .{
+ .size = Memory.Size.fromSize(limb_abi_size),
.disp = frame_addr.off + off,
- },
- else => unreachable,
+ } },
},
- );
+ else => unreachable,
+ };
switch (resolved_src_mcv) {
.none,
.unreach,
@@ -9730,29 +9716,38 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M
.memory, .indirect, .load_frame => try self.asmRegisterMemory(
.{ .i_, .mul },
dst_alias,
- Memory.sib(Memory.PtrSize.fromSize(abi_size), switch (resolved_src_mcv) {
+ switch (resolved_src_mcv) {
.memory => |addr| .{
.base = .{ .reg = .ds },
- .disp = math.cast(i32, @as(i64, @bitCast(addr))) orelse
- return self.asmRegisterRegister(
- .{ .i_, .mul },
- dst_alias,
- registerAlias(
- try self.copyToTmpRegister(dst_ty, resolved_src_mcv),
- abi_size,
+ .mod = .{ .rm = .{
+ .size = Memory.Size.fromSize(abi_size),
+ .disp = math.cast(i32, @as(i64, @bitCast(addr))) orelse
+ return self.asmRegisterRegister(
+ .{ .i_, .mul },
+ dst_alias,
+ registerAlias(
+ try self.copyToTmpRegister(dst_ty, resolved_src_mcv),
+ abi_size,
+ ),
),
- ),
+ } },
},
.indirect => |reg_off| .{
.base = .{ .reg = reg_off.reg },
- .disp = reg_off.off,
+ .mod = .{ .rm = .{
+ .size = Memory.Size.fromSize(abi_size),
+ .disp = reg_off.off,
+ } },
},
.load_frame => |frame_addr| .{
.base = .{ .frame = frame_addr.index },
- .disp = frame_addr.off,
+ .mod = .{ .rm = .{
+ .size = Memory.Size.fromSize(abi_size),
+ .disp = frame_addr.off,
+ } },
},
else => unreachable,
- }),
+ },
),
}
},
@@ -9857,9 +9852,14 @@ fn genVarDbgInfo(
// .offset = -off,
//} },
.memory => |address| .{ .memory = address },
- .load_symbol => |sym_index| .{ .linker_load = .{ .type = .direct, .sym_index = sym_index } }, // TODO
+ .load_symbol => |sym_off| loc: {
+ assert(sym_off.off == 0);
+ break :loc .{ .linker_load = .{ .type = .direct, .sym_index = sym_off.sym } };
+ }, // TODO
.load_got => |sym_index| .{ .linker_load = .{ .type = .got, .sym_index = sym_index } },
- .load_direct => |sym_index| .{ .linker_load = .{ .type = .direct, .sym_index = sym_index } },
+ .load_direct => |sym_index| .{
+ .linker_load = .{ .type = .direct, .sym_index = sym_index },
+ },
.immediate => |x| .{ .immediate = x },
.undef => .undef,
.none => .none,
@@ -10123,19 +10123,16 @@ fn genCall(self: *Self, info: union(enum) {
try self.genSetReg(
callee_reg,
Type.usize,
- .{ .load_symbol = sym.esym_index },
+ .{ .load_symbol = .{ .sym = sym.esym_index } },
);
try self.asmRegister(.{ ._, .call }, callee_reg);
- } else {
- _ = try self.addInst(.{
- .tag = .call,
- .ops = .linker_reloc,
- .data = .{ .reloc = .{
- .atom_index = try self.owner.getSymbolIndex(self),
- .sym_index = sym.esym_index,
- } },
- });
- }
+ } else try self.asmMemory(.{ ._, .call }, .{
+ .base = .{ .reloc = .{
+ .atom_index = try self.owner.getSymbolIndex(self),
+ .sym_index = sym.esym_index,
+ } },
+ .mod = .{ .rm = .{ .size = .qword } },
+ });
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
const atom = try coff_file.getOrCreateAtomForDecl(func.owner_decl);
const sym_index = coff_file.getAtom(atom).getSymbolIndex().?;
@@ -10149,10 +10146,13 @@ fn genCall(self: *Self, info: union(enum) {
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
const atom_index = try p9.seeDecl(func.owner_decl);
const atom = p9.getAtom(atom_index);
- try self.asmMemory(.{ ._, .call }, Memory.sib(.qword, .{
+ try self.asmMemory(.{ ._, .call }, .{
.base = .{ .reg = .ds },
- .disp = @intCast(atom.getOffsetTableAddress(p9)),
- }));
+ .mod = .{ .rm = .{
+ .size = .qword,
+ .disp = @intCast(atom.getOffsetTableAddress(p9)),
+ } },
+ });
} else unreachable;
},
.extern_func => |extern_func| {
@@ -10637,7 +10637,10 @@ fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void {
try self.asmRegisterMemory(
.{ ._, .cmp },
registerAlias(dst_reg, op_abi_size),
- Memory.sib(Memory.PtrSize.fromSize(op_abi_size), .{ .base = .{ .reg = addr_reg } }),
+ .{
+ .base = .{ .reg = addr_reg },
+ .mod = .{ .rm = .{ .size = Memory.Size.fromSize(op_abi_size) } },
+ },
);
self.eflags_inst = inst;
@@ -10891,10 +10894,13 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC
const some_abi_size: u32 = @intCast(some_info.ty.abiSize(mod));
try self.asmMemoryImmediate(
.{ ._, .cmp },
- Memory.sib(Memory.PtrSize.fromSize(some_abi_size), .{
+ .{
.base = .{ .reg = addr_reg },
- .disp = some_info.off,
- }),
+ .mod = .{ .rm = .{
+ .size = Memory.Size.fromSize(some_abi_size),
+ .disp = some_info.off,
+ } },
+ },
Immediate.u(0),
);
return .{ .eflags = .e };
@@ -10904,17 +10910,23 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC
const some_abi_size: u32 = @intCast(some_info.ty.abiSize(mod));
try self.asmMemoryImmediate(
.{ ._, .cmp },
- Memory.sib(Memory.PtrSize.fromSize(some_abi_size), switch (opt_mcv) {
+ switch (opt_mcv) {
.indirect => |reg_off| .{
.base = .{ .reg = reg_off.reg },
- .disp = reg_off.off + some_info.off,
+ .mod = .{ .rm = .{
+ .size = Memory.Size.fromSize(some_abi_size),
+ .disp = reg_off.off + some_info.off,
+ } },
},
.load_frame => |frame_addr| .{
.base = .{ .frame = frame_addr.index },
- .disp = frame_addr.off + some_info.off,
+ .mod = .{ .rm = .{
+ .size = Memory.Size.fromSize(some_abi_size),
+ .disp = frame_addr.off + some_info.off,
+ } },
},
else => unreachable,
- }),
+ },
Immediate.u(0),
);
return .{ .eflags = .e };
@@ -10944,10 +10956,13 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue)
const some_abi_size: u32 = @intCast(some_info.ty.abiSize(mod));
try self.asmMemoryImmediate(
.{ ._, .cmp },
- Memory.sib(Memory.PtrSize.fromSize(some_abi_size), .{
+ .{
.base = .{ .reg = ptr_reg },
- .disp = some_info.off,
- }),
+ .mod = .{ .rm = .{
+ .size = Memory.Size.fromSize(some_abi_size),
+ .disp = some_info.off,
+ } },
+ },
Immediate.u(0),
);
@@ -11020,10 +11035,13 @@ fn isErrPtr(self: *Self, maybe_inst: ?Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCV
const err_off: u31 = @intCast(errUnionErrorOffset(eu_ty.errorUnionPayload(mod), mod));
try self.asmMemoryImmediate(
.{ ._, .cmp },
- Memory.sib(self.memPtrSize(Type.anyerror), .{
+ .{
.base = .{ .reg = ptr_reg },
- .disp = err_off,
- }),
+ .mod = .{ .rm = .{
+ .size = self.memSize(Type.anyerror),
+ .disp = err_off,
+ } },
+ },
Immediate.u(0),
);
@@ -11601,7 +11619,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
label_gop.value_ptr.target = @intCast(self.mir_instructions.len);
} else continue;
- var mnem_size: ?Memory.PtrSize = null;
+ var mnem_size: ?Memory.Size = null;
const mnem_tag = mnem: {
mnem_size = if (mem.endsWith(u8, mnem_str, "b"))
.byte
@@ -11620,7 +11638,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
mnem_size = null;
break :mnem std.meta.stringToEnum(Instruction.Mnemonic, mnem_str);
} orelse return self.fail("invalid mnemonic: '{s}'", .{mnem_str});
- if (@as(?Memory.PtrSize, switch (mnem_tag) {
+ if (@as(?Memory.Size, switch (mnem_tag) {
.fldenv, .fnstenv, .fstenv => .none,
.ldmxcsr, .stmxcsr, .vldmxcsr, .vstmxcsr => .dword,
else => null,
@@ -11685,10 +11703,13 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
if (colon) |colon_pos| {
const disp = std.fmt.parseInt(i32, op_str[colon_pos + ":".len ..], 0) catch
return self.fail("invalid displacement: '{s}'", .{op_str});
- op.* = .{ .mem = Memory.sib(
- mnem_size orelse return self.fail("unknown size: '{s}'", .{op_str}),
- .{ .base = .{ .reg = reg }, .disp = disp },
- ) };
+ op.* = .{ .mem = .{
+ .base = .{ .reg = reg },
+ .mod = .{ .rm = .{
+ .size = mnem_size orelse return self.fail("unknown size: '{s}'", .{op_str}),
+ .disp = disp,
+ } },
+ } };
} else {
if (mnem_size) |size| if (reg.bitSize() != size.bitSize())
return self.fail("invalid register size: '{s}'", .{op_str});
@@ -11712,34 +11733,45 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
.{ .reg = reg }
else
return self.fail("invalid modifier: '{s}'", .{modifier}),
- .memory => |addr| if (mem.eql(u8, modifier, "") or
- mem.eql(u8, modifier, "P"))
- .{ .mem = Memory.sib(
- mnem_size orelse return self.fail("unknown size: '{s}'", .{op_str}),
- .{ .base = .{ .reg = .ds }, .disp = @intCast(@as(i64, @bitCast(addr))) },
- ) }
+ .memory => |addr| if (mem.eql(u8, modifier, "") or mem.eql(u8, modifier, "P"))
+ .{ .mem = .{
+ .base = .{ .reg = .ds },
+ .mod = .{ .rm = .{
+ .size = mnem_size orelse
+ return self.fail("unknown size: '{s}'", .{op_str}),
+ .disp = @intCast(@as(i64, @bitCast(addr))),
+ } },
+ } }
else
return self.fail("invalid modifier: '{s}'", .{modifier}),
.indirect => |reg_off| if (mem.eql(u8, modifier, ""))
- .{ .mem = Memory.sib(
- mnem_size orelse return self.fail("unknown size: '{s}'", .{op_str}),
- .{ .base = .{ .reg = reg_off.reg }, .disp = reg_off.off },
- ) }
+ .{ .mem = .{
+ .base = .{ .reg = reg_off.reg },
+ .mod = .{ .rm = .{
+ .size = mnem_size orelse
+ return self.fail("unknown size: '{s}'", .{op_str}),
+ .disp = reg_off.off,
+ } },
+ } }
else
return self.fail("invalid modifier: '{s}'", .{modifier}),
.load_frame => |frame_addr| if (mem.eql(u8, modifier, ""))
- .{ .mem = Memory.sib(
- mnem_size orelse return self.fail("unknown size: '{s}'", .{op_str}),
- .{ .base = .{ .frame = frame_addr.index }, .disp = frame_addr.off },
- ) }
+ .{ .mem = .{
+ .base = .{ .frame = frame_addr.index },
+ .mod = .{ .rm = .{
+ .size = mnem_size orelse
+ return self.fail("unknown size: '{s}'", .{op_str}),
+ .disp = frame_addr.off,
+ } },
+ } }
else
return self.fail("invalid modifier: '{s}'", .{modifier}),
.lea_got => |sym_index| if (mem.eql(u8, modifier, "P"))
.{ .reg = try self.copyToTmpRegister(Type.usize, .{ .lea_got = sym_index }) }
else
return self.fail("invalid modifier: '{s}'", .{modifier}),
- .load_symbol => |sym_index| if (mem.eql(u8, modifier, "P"))
- .{ .reg = try self.copyToTmpRegister(Type.usize, .{ .load_symbol = sym_index }) }
+ .load_symbol => |sym_off| if (mem.eql(u8, modifier, "P"))
+ .{ .reg = try self.copyToTmpRegister(Type.usize, .{ .load_symbol = sym_off }) }
else
return self.fail("invalid modifier: '{s}'", .{modifier}),
else => return self.fail("invalid constraint: '{s}'", .{op_str}),
@@ -11774,47 +11806,61 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
const scale_str = sib_it.next() orelse "";
if (index_str.len == 0 and scale_str.len > 0)
return self.fail("invalid memory operand: '{s}'", .{op_str});
- const scale = if (scale_str.len > 0) switch (std.fmt.parseInt(u4, scale_str, 10) catch
- return self.fail("invalid scale: '{s}'", .{op_str})) {
- 1, 2, 4, 8 => |scale| scale,
- else => return self.fail("invalid scale: '{s}'", .{op_str}),
- } else 1;
+ const scale: Memory.Scale = if (scale_str.len > 0)
+ switch (std.fmt.parseInt(u4, scale_str, 10) catch
+ return self.fail("invalid scale: '{s}'", .{op_str})) {
+ 1 => .@"1",
+ 2 => .@"2",
+ 4 => .@"4",
+ 8 => .@"8",
+ else => return self.fail("invalid scale: '{s}'", .{op_str}),
+ }
+ else
+ .@"1";
if (sib_it.next()) |_| return self.fail("invalid memory operand: '{s}'", .{op_str});
- op.* = .{ .mem = Memory.sib(mnem_size orelse
- return self.fail("unknown size: '{s}'", .{op_str}), .{
- .disp = if (mem.startsWith(u8, op_str[0..open], "%[") and
- mem.endsWith(u8, op_str[0..open], "]"))
- disp: {
- const colon = mem.indexOfScalarPos(u8, op_str[0..open], "%[".len, ':');
- const modifier = if (colon) |colon_pos|
- op_str[colon_pos + ":".len .. open - "]".len]
+ op.* = .{
+ .mem = .{
+ .base = if (base_str.len > 0)
+ .{ .reg = parseRegName(base_str["%%".len..]) orelse
+ return self.fail("invalid base register: '{s}'", .{base_str}) }
else
- "";
- break :disp switch (args.items[
- arg_map.get(op_str["%[".len .. colon orelse open - "]".len]) orelse
- return self.fail("no matching constraint: '{s}'", .{op_str})
- ]) {
- .immediate => |imm| if (mem.eql(u8, modifier, "") or
- mem.eql(u8, modifier, "c"))
- math.cast(i32, @as(i64, @bitCast(imm))) orelse
+ .none,
+ .mod = .{ .rm = .{
+ .size = mnem_size orelse return self.fail("unknown size: '{s}'", .{op_str}),
+ .index = if (index_str.len > 0)
+ parseRegName(index_str["%%".len..]) orelse
+ return self.fail("invalid index register: '{s}'", .{op_str})
+ else
+ .none,
+ .scale = scale,
+ .disp = if (mem.startsWith(u8, op_str[0..open], "%[") and
+ mem.endsWith(u8, op_str[0..open], "]"))
+ disp: {
+ const colon = mem.indexOfScalarPos(u8, op_str[0..open], "%[".len, ':');
+ const modifier = if (colon) |colon_pos|
+ op_str[colon_pos + ":".len .. open - "]".len]
+ else
+ "";
+ break :disp switch (args.items[
+ arg_map.get(op_str["%[".len .. colon orelse open - "]".len]) orelse
+ return self.fail("no matching constraint: '{s}'", .{op_str})
+ ]) {
+ .immediate => |imm| if (mem.eql(u8, modifier, "") or
+ mem.eql(u8, modifier, "c"))
+ math.cast(i32, @as(i64, @bitCast(imm))) orelse
+ return self.fail("invalid displacement: '{s}'", .{op_str})
+ else
+ return self.fail("invalid modifier: '{s}'", .{modifier}),
+ else => return self.fail("invalid constraint: '{s}'", .{op_str}),
+ };
+ } else if (open > 0)
+ std.fmt.parseInt(i32, op_str[0..open], 0) catch
return self.fail("invalid displacement: '{s}'", .{op_str})
else
- return self.fail("invalid modifier: '{s}'", .{modifier}),
- else => return self.fail("invalid constraint: '{s}'", .{op_str}),
- };
- } else if (open > 0)
- std.fmt.parseInt(i32, op_str[0..open], 0) catch
- return self.fail("invalid displacement: '{s}'", .{op_str})
- else
- 0,
- .base = if (base_str.len > 0) .{ .reg = parseRegName(base_str["%%".len..]) orelse
- return self.fail("invalid base register: '{s}'", .{base_str}) } else .none,
- .scale_index = if (index_str.len > 0) .{
- .index = parseRegName(index_str["%%".len..]) orelse
- return self.fail("invalid index register: '{s}'", .{op_str}),
- .scale = scale,
- } else null,
- }) };
+ 0,
+ } },
+ },
+ };
} else if (Label.isValid(.reference, op_str)) {
const anon = std.ascii.isDigit(op_str[0]);
const label_gop = try labels.getOrPut(self.gpa, op_str[0..if (anon) 1 else op_str.len]);
@@ -12534,20 +12580,23 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr
),
.lea_frame => .{ .move = .{ ._, .lea } },
else => unreachable,
- }).read(self, registerAlias(dst_reg, abi_size), Memory.sib(
- self.memPtrSize(ty),
- switch (src_mcv) {
- .register_offset, .indirect => |reg_off| .{
- .base = .{ .reg = reg_off.reg },
+ }).read(self, registerAlias(dst_reg, abi_size), switch (src_mcv) {
+ .register_offset, .indirect => |reg_off| .{
+ .base = .{ .reg = reg_off.reg },
+ .mod = .{ .rm = .{
+ .size = self.memSize(ty),
.disp = reg_off.off,
- },
- .load_frame, .lea_frame => |frame_addr| .{
- .base = .{ .frame = frame_addr.index },
+ } },
+ },
+ .load_frame, .lea_frame => |frame_addr| .{
+ .base = .{ .frame = frame_addr.index },
+ .mod = .{ .rm = .{
+ .size = self.memSize(ty),
.disp = frame_addr.off,
- },
- else => unreachable,
+ } },
},
- )),
+ else => unreachable,
+ }),
.memory, .load_symbol, .load_direct, .load_got, .load_tlv => {
switch (src_mcv) {
.memory => |addr| if (math.cast(i32, @as(i64, @bitCast(addr)))) |small_addr|
@@ -12555,20 +12604,40 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr
ty,
dst_reg.class(),
ty.abiAlignment(mod).check(@as(u32, @bitCast(small_addr))),
- )).read(self, registerAlias(dst_reg, abi_size), Memory.sib(
- self.memPtrSize(ty),
- .{ .base = .{ .reg = .ds }, .disp = small_addr },
- )),
+ )).read(self, registerAlias(dst_reg, abi_size), .{
+ .base = .{ .reg = .ds },
+ .mod = .{ .rm = .{
+ .size = self.memSize(ty),
+ .disp = small_addr,
+ } },
+ }),
+ .load_symbol => |sym_off| switch (dst_reg.class()) {
+ .general_purpose => {
+ assert(sym_off.off == 0);
+ try self.asmRegisterMemory(.{ ._, .mov }, registerAlias(dst_reg, abi_size), .{
+ .base = .{ .reloc = .{
+ .atom_index = try self.owner.getSymbolIndex(self),
+ .sym_index = sym_off.sym,
+ } },
+ .mod = .{ .rm = .{
+ .size = self.memSize(ty),
+ .disp = sym_off.off,
+ } },
+ });
+ return;
+ },
+ .segment, .mmx => unreachable,
+ .x87, .sse => {},
+ },
.load_direct => |sym_index| switch (dst_reg.class()) {
.general_purpose => {
- const atom_index = try self.owner.getSymbolIndex(self);
_ = try self.addInst(.{
.tag = .mov,
.ops = .direct_reloc,
.data = .{ .rx = .{
- .r1 = dst_reg.to64(),
- .payload = try self.addExtra(Mir.Reloc{
- .atom_index = atom_index,
+ .r1 = registerAlias(dst_reg, abi_size),
+ .payload = try self.addExtra(bits.Symbol{
+ .atom_index = try self.owner.getSymbolIndex(self),
.sym_index = sym_index,
}),
} },
@@ -12578,7 +12647,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr
.segment, .mmx => unreachable,
.x87, .sse => {},
},
- .load_symbol, .load_got, .load_tlv => {},
+ .load_got, .load_tlv => {},
else => unreachable,
}
@@ -12589,22 +12658,42 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr
try (try self.moveStrategy(ty, dst_reg.class(), false)).read(
self,
registerAlias(dst_reg, abi_size),
- Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = .{ .reg = addr_reg } }),
+ .{
+ .base = .{ .reg = addr_reg },
+ .mod = .{ .rm = .{ .size = Memory.Size.fromSize(abi_size) } },
+ },
);
},
- .lea_symbol, .lea_direct, .lea_got => |sym_index| {
+ .lea_symbol => |sym_index| {
const atom_index = try self.owner.getSymbolIndex(self);
if (self.bin_file.cast(link.File.Elf)) |_| {
- _ = try self.addInst(.{
- .tag = .lea,
- .ops = .linker_reloc,
- .data = .{ .rx = .{
- .r1 = dst_reg.to64(),
- .payload = try self.addExtra(Mir.Reloc{
+ try self.asmRegisterMemory(
+ .{ ._, .lea },
+ dst_reg.to64(),
+ .{
+ .base = .{ .reloc = .{
.atom_index = atom_index,
- .sym_index = sym_index,
- }),
+ .sym_index = sym_index.sym,
+ } },
+ .mod = .{ .rm = .{
+ .size = .qword,
+ .disp = sym_index.off,
+ } },
+ },
+ );
+ } else return self.fail("TODO emit symbol sequence on {s}", .{
+ @tagName(self.bin_file.tag),
+ });
+ },
+ .lea_direct, .lea_got => |sym_index| {
+ const atom_index = try self.owner.getSymbolIndex(self);
+ if (self.bin_file.cast(link.File.Elf)) |_| {
+ try self.asmRegisterMemory(.{ ._, .lea }, dst_reg.to64(), .{
+ .base = .{ .reloc = .{
+ .atom_index = atom_index,
+ .sym_index = sym_index,
} },
+ .mod = .{ .rm = .{ .size = .qword } },
});
} else {
_ = try self.addInst(.{
@@ -12620,7 +12709,7 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr
},
.data = .{ .rx = .{
.r1 = dst_reg.to64(),
- .payload = try self.addExtra(Mir.Reloc{
+ .payload = try self.addExtra(bits.Symbol{
.atom_index = atom_index,
.sym_index = sym_index,
}),
@@ -12636,14 +12725,17 @@ fn genSetReg(self: *Self, dst_reg: Register, ty: Type, src_mcv: MCValue) InnerEr
.ops = .tlv_reloc,
.data = .{ .rx = .{
.r1 = .rdi,
- .payload = try self.addExtra(Mir.Reloc{
+ .payload = try self.addExtra(bits.Symbol{
.atom_index = atom_index,
.sym_index = sym_index,
}),
} },
});
// TODO: spill registers before calling
- try self.asmMemory(.{ ._, .call }, Memory.sib(.qword, .{ .base = .{ .reg = .rdi } }));
+ try self.asmMemory(.{ ._, .call }, .{
+ .base = .{ .reg = .rdi },
+ .mod = .{ .rm = .{ .size = .qword } },
+ });
try self.genSetReg(dst_reg.to64(), Type.usize, .{ .register = .rax });
} else return self.fail("TODO emit ptr to TLV sequence on {s}", .{
@tagName(self.bin_file.tag),
@@ -12660,6 +12752,7 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal
.none => .{ .immediate = @bitCast(@as(i64, disp)) },
.reg => |base_reg| .{ .register_offset = .{ .reg = base_reg, .off = disp } },
.frame => |base_frame_index| .{ .lea_frame = .{ .index = base_frame_index, .off = disp } },
+ .reloc => |base_symbol| .{ .lea_symbol = .{ .sym = base_symbol.sym_index, .off = disp } },
};
switch (src_mcv) {
.none, .unreach, .dead, .reserved_frame => unreachable,
@@ -12675,7 +12768,10 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal
};
try self.asmMemoryImmediate(
.{ ._, .mov },
- Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = base, .disp = disp }),
+ .{ .base = base, .mod = .{ .rm = .{
+ .size = Memory.Size.fromSize(abi_size),
+ .disp = disp,
+ } } },
immediate,
);
},
@@ -12683,27 +12779,31 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal
else => if (math.cast(i32, @as(i64, @bitCast(imm)))) |small| {
try self.asmMemoryImmediate(
.{ ._, .mov },
- Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = base, .disp = disp }),
+ .{ .base = base, .mod = .{ .rm = .{
+ .size = Memory.Size.fromSize(abi_size),
+ .disp = disp,
+ } } },
Immediate.s(small),
);
} else {
var offset: i32 = 0;
while (offset < abi_size) : (offset += 4) try self.asmMemoryImmediate(
.{ ._, .mov },
- Memory.sib(.dword, .{ .base = base, .disp = disp + offset }),
- if (ty.isSignedInt(mod))
- Immediate.s(
- @truncate(@as(i64, @bitCast(imm)) >> (math.cast(u6, offset * 8) orelse 63)),
- )
- else
- Immediate.u(@as(
- u32,
- @truncate(if (math.cast(u6, offset * 8)) |shift| imm >> shift else 0),
- )),
+ .{ .base = base, .mod = .{ .rm = .{
+ .size = .dword,
+ .disp = disp + offset,
+ } } },
+ if (ty.isSignedInt(mod)) Immediate.s(
+ @truncate(@as(i64, @bitCast(imm)) >> (math.cast(u6, offset * 8) orelse 63)),
+ ) else Immediate.u(
+ @as(u32, @truncate(if (math.cast(u6, offset * 8)) |shift| imm >> shift else 0)),
+ ),
);
},
},
- .eflags => |cc| try self.asmSetccMemory(cc, Memory.sib(.byte, .{ .base = base, .disp = disp })),
+ .eflags => |cc| try self.asmSetccMemory(cc, .{ .base = base, .mod = .{
+ .rm = .{ .size = .byte, .disp = disp },
+ } }),
.register => |src_reg| try (try self.moveStrategy(ty, src_reg.class(), switch (base) {
.none => ty.abiAlignment(mod).check(@as(u32, @bitCast(disp))),
.reg => |reg| switch (reg) {
@@ -12713,9 +12813,13 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal
.frame => |frame_index| self.getFrameAddrAlignment(
.{ .index = frame_index, .off = disp },
).compare(.gte, ty.abiAlignment(mod)),
+ .reloc => false,
})).write(
self,
- Memory.sib(self.memPtrSize(ty), .{ .base = base, .disp = disp }),
+ .{ .base = base, .mod = .{ .rm = .{
+ .size = self.memSize(ty),
+ .disp = disp,
+ } } },
registerAlias(src_reg, abi_size),
),
.register_pair => |src_regs| for (src_regs, 0..) |src_reg, src_reg_i| {
@@ -12732,11 +12836,12 @@ fn genSetMem(self: *Self, base: Memory.Base, disp: i32, ty: Type, src_mcv: MCVal
.frame => |frame_index| self.getFrameAddrAlignment(
.{ .index = frame_index, .off = disp },
).compare(.gte, ty.abiAlignment(mod)),
+ .reloc => false,
},
- )).write(self, Memory.sib(
- Memory.PtrSize.fromSize(part_size),
- .{ .base = base, .disp = disp + @as(i32, @intCast(src_reg_i * 8)) },
- ), registerAlias(src_reg, part_size));
+ )).write(self, .{ .base = base, .mod = .{ .rm = .{
+ .size = Memory.Size.fromSize(part_size),
+ .disp = disp + @as(i32, @intCast(src_reg_i * 8)),
+ } } }, registerAlias(src_reg, part_size));
},
.register_overflow => |ro| switch (ty.zigTypeTag(mod)) {
.Struct => {
@@ -12834,7 +12939,7 @@ fn genExternSymbolRef(
.ops = .import_reloc,
.data = .{ .rx = .{
.r1 = .rax,
- .payload = try self.addExtra(Mir.Reloc{
+ .payload = try self.addExtra(bits.Symbol{
.atom_index = atom_index,
.sym_index = try coff_file.getGlobalSymbol(callee, lib),
}),
@@ -12871,8 +12976,12 @@ fn genLazySymbolRef(
if (self.bin_file.options.pic) {
switch (tag) {
- .lea, .call => try self.genSetReg(reg, Type.usize, .{ .load_symbol = sym.esym_index }),
- .mov => try self.genSetReg(reg, Type.usize, .{ .load_symbol = sym.esym_index }),
+ .lea, .call => try self.genSetReg(reg, Type.usize, .{
+ .load_symbol = .{ .sym = sym.esym_index },
+ }),
+ .mov => try self.genSetReg(reg, Type.usize, .{
+ .load_symbol = .{ .sym = sym.esym_index },
+ }),
else => unreachable,
}
switch (tag) {
@@ -12881,23 +12990,18 @@ fn genLazySymbolRef(
else => unreachable,
}
} else {
- const reloc = Mir.Reloc{
+ const reloc = bits.Symbol{
.atom_index = try self.owner.getSymbolIndex(self),
.sym_index = sym.esym_index,
};
switch (tag) {
- .lea, .mov => _ = try self.addInst(.{
- .tag = .mov,
- .ops = .linker_reloc,
- .data = .{ .rx = .{
- .r1 = reg.to64(),
- .payload = try self.addExtra(reloc),
- } },
+ .lea, .mov => try self.asmRegisterMemory(.{ ._, .mov }, reg.to64(), .{
+ .base = .{ .reloc = reloc },
+ .mod = .{ .rm = .{ .size = .qword } },
}),
- .call => _ = try self.addInst(.{
- .tag = .call,
- .ops = .linker_reloc,
- .data = .{ .reloc = reloc },
+ .call => try self.asmMemory(.{ ._, .call }, .{
+ .base = .{ .reloc = reloc },
+ .mod = .{ .rm = .{ .size = .qword } },
}),
else => unreachable,
}
@@ -12908,8 +13012,13 @@ fn genLazySymbolRef(
var atom = p9_file.getAtom(atom_index);
_ = atom.getOrCreateOffsetTableEntry(p9_file);
const got_addr = atom.getOffsetTableAddress(p9_file);
- const got_mem =
- Memory.sib(.qword, .{ .base = .{ .reg = .ds }, .disp = @intCast(got_addr) });
+ const got_mem: Memory = .{
+ .base = .{ .reg = .ds },
+ .mod = .{ .rm = .{
+ .size = .qword,
+ .disp = @intCast(got_addr),
+ } },
+ };
switch (tag) {
.lea, .mov => try self.asmRegisterMemory(.{ ._, .mov }, reg.to64(), got_mem),
.call => try self.asmMemory(.{ ._, .call }, got_mem),
@@ -13255,19 +13364,20 @@ fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void {
defer if (new_lock) |lock| self.register_manager.unlockReg(lock);
const ptr_mcv = try self.resolveInst(extra.ptr);
- const ptr_size = Memory.PtrSize.fromSize(val_abi_size);
- const ptr_mem = switch (ptr_mcv) {
- .immediate, .register, .register_offset, .lea_frame => ptr_mcv.deref().mem(ptr_size),
- else => Memory.sib(ptr_size, .{
+ const mem_size = Memory.Size.fromSize(val_abi_size);
+ const ptr_mem: Memory = switch (ptr_mcv) {
+ .immediate, .register, .register_offset, .lea_frame => ptr_mcv.deref().mem(mem_size),
+ else => .{
.base = .{ .reg = try self.copyToTmpRegister(ptr_ty, ptr_mcv) },
- }),
+ .mod = .{ .rm = .{ .size = mem_size } },
+ },
};
- switch (ptr_mem) {
- .sib, .rip => {},
- .moffs => return self.fail("TODO airCmpxchg with {s}", .{@tagName(ptr_mcv)}),
+ switch (ptr_mem.mod) {
+ .rm => {},
+ .off => return self.fail("TODO airCmpxchg with {s}", .{@tagName(ptr_mcv)}),
}
- const ptr_lock = switch (ptr_mem.base()) {
- .none, .frame => null,
+ const ptr_lock = switch (ptr_mem.base) {
+ .none, .frame, .reloc => null,
.reg => |reg| self.register_manager.lockReg(reg),
};
defer if (ptr_lock) |lock| self.register_manager.unlockReg(lock);
@@ -13320,19 +13430,20 @@ fn atomicOp(
defer if (val_lock) |lock| self.register_manager.unlockReg(lock);
const val_abi_size: u32 = @intCast(val_ty.abiSize(mod));
- const ptr_size = Memory.PtrSize.fromSize(val_abi_size);
- const ptr_mem = switch (ptr_mcv) {
- .immediate, .register, .register_offset, .lea_frame => ptr_mcv.deref().mem(ptr_size),
- else => Memory.sib(ptr_size, .{
+ const mem_size = Memory.Size.fromSize(val_abi_size);
+ const ptr_mem: Memory = switch (ptr_mcv) {
+ .immediate, .register, .register_offset, .lea_frame => ptr_mcv.deref().mem(mem_size),
+ else => .{
.base = .{ .reg = try self.copyToTmpRegister(ptr_ty, ptr_mcv) },
- }),
+ .mod = .{ .rm = .{ .size = mem_size } },
+ },
};
- switch (ptr_mem) {
- .sib, .rip => {},
- .moffs => return self.fail("TODO airCmpxchg with {s}", .{@tagName(ptr_mcv)}),
+ switch (ptr_mem.mod) {
+ .rm => {},
+ .off => return self.fail("TODO airCmpxchg with {s}", .{@tagName(ptr_mcv)}),
}
- const mem_lock = switch (ptr_mem.base()) {
- .none, .frame => null,
+ const mem_lock = switch (ptr_mem.base) {
+ .none, .frame, .reloc => null,
.reg => |reg| self.register_manager.lockReg(reg),
};
defer if (mem_lock) |lock| self.register_manager.unlockReg(lock);
@@ -13440,7 +13551,7 @@ fn atomicOp(
.memory, .indirect, .load_frame => try self.asmCmovccRegisterMemory(
cc,
registerAlias(tmp_reg, cmov_abi_size),
- val_mcv.mem(Memory.PtrSize.fromSize(cmov_abi_size)),
+ val_mcv.mem(Memory.Size.fromSize(cmov_abi_size)),
),
else => {
const val_reg = try self.copyToTmpRegister(val_ty, val_mcv);
@@ -13461,16 +13572,24 @@ fn atomicOp(
_ = try self.asmJccReloc(.ne, loop);
return if (unused) .unreach else .{ .register = .rax };
} else {
- try self.asmRegisterMemory(.{ ._, .mov }, .rax, Memory.sib(.qword, .{
- .base = ptr_mem.sib.base,
- .scale_index = ptr_mem.scaleIndex(),
- .disp = ptr_mem.sib.disp + 0,
- }));
- try self.asmRegisterMemory(.{ ._, .mov }, .rdx, Memory.sib(.qword, .{
- .base = ptr_mem.sib.base,
- .scale_index = ptr_mem.scaleIndex(),
- .disp = ptr_mem.sib.disp + 8,
- }));
+ try self.asmRegisterMemory(.{ ._, .mov }, .rax, .{
+ .base = ptr_mem.base,
+ .mod = .{ .rm = .{
+ .size = .qword,
+ .index = ptr_mem.mod.rm.index,
+ .scale = ptr_mem.mod.rm.scale,
+ .disp = ptr_mem.mod.rm.disp + 0,
+ } },
+ });
+ try self.asmRegisterMemory(.{ ._, .mov }, .rdx, .{
+ .base = ptr_mem.base,
+ .mod = .{ .rm = .{
+ .size = .qword,
+ .index = ptr_mem.mod.rm.index,
+ .scale = ptr_mem.mod.rm.scale,
+ .disp = ptr_mem.mod.rm.disp + 8,
+ } },
+ });
const loop: Mir.Inst.Index = @intCast(self.mir_instructions.len);
const val_mem_mcv: MCValue = switch (val_mcv) {
.memory, .indirect, .load_frame => val_mcv,
@@ -13524,22 +13643,20 @@ fn atomicOp(
if (unused) return .unreach;
const dst_mcv = try self.allocTempRegOrMem(val_ty, false);
- try self.asmMemoryRegister(
- .{ ._, .mov },
- Memory.sib(.qword, .{
- .base = .{ .frame = dst_mcv.load_frame.index },
+ try self.asmMemoryRegister(.{ ._, .mov }, .{
+ .base = .{ .frame = dst_mcv.load_frame.index },
+ .mod = .{ .rm = .{
+ .size = .qword,
.disp = dst_mcv.load_frame.off + 0,
- }),
- .rax,
- );
- try self.asmMemoryRegister(
- .{ ._, .mov },
- Memory.sib(.qword, .{
- .base = .{ .frame = dst_mcv.load_frame.index },
+ } },
+ }, .rax);
+ try self.asmMemoryRegister(.{ ._, .mov }, .{
+ .base = .{ .frame = dst_mcv.load_frame.index },
+ .mod = .{ .rm = .{
+ .size = .qword,
.disp = dst_mcv.load_frame.off + 8,
- }),
- .rdx,
- );
+ } },
+ }, .rdx);
return dst_mcv;
},
.libcall => return self.fail("TODO implement x86 atomic libcall", .{}),
@@ -13845,55 +13962,74 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void {
try self.asmRegisterMemory(
.{ ._, .mov },
start_reg.to32(),
- Memory.sib(.dword, .{
+ .{
.base = .{ .reg = addr_reg.to64() },
- .scale_index = .{ .scale = 4, .index = err_reg.to64() },
- .disp = 4,
- }),
+ .mod = .{ .rm = .{
+ .size = .dword,
+ .index = err_reg.to64(),
+ .scale = .@"4",
+ .disp = 4,
+ } },
+ },
);
try self.asmRegisterMemory(
.{ ._, .mov },
end_reg.to32(),
- Memory.sib(.dword, .{
+ .{
.base = .{ .reg = addr_reg.to64() },
- .scale_index = .{ .scale = 4, .index = err_reg.to64() },
- .disp = 8,
- }),
+ .mod = .{ .rm = .{
+ .size = .dword,
+ .index = err_reg.to64(),
+ .scale = .@"4",
+ .disp = 8,
+ } },
+ },
);
try self.asmRegisterRegister(.{ ._, .sub }, end_reg.to32(), start_reg.to32());
try self.asmRegisterMemory(
.{ ._, .lea },
start_reg.to64(),
- Memory.sib(.byte, .{
+ .{
.base = .{ .reg = addr_reg.to64() },
- .scale_index = .{ .scale = 1, .index = start_reg.to64() },
- .disp = 0,
- }),
+ .mod = .{ .rm = .{
+ .size = .dword,
+ .index = start_reg.to64(),
+ } },
+ },
);
try self.asmRegisterMemory(
.{ ._, .lea },
end_reg.to32(),
- Memory.sib(.byte, .{
+ .{
.base = .{ .reg = end_reg.to64() },
- .disp = -1,
- }),
+ .mod = .{ .rm = .{
+ .size = .byte,
+ .disp = -1,
+ } },
+ },
);
const dst_mcv = try self.allocRegOrMem(inst, false);
try self.asmMemoryRegister(
.{ ._, .mov },
- Memory.sib(.qword, .{
+ .{
.base = .{ .frame = dst_mcv.load_frame.index },
- .disp = dst_mcv.load_frame.off,
- }),
+ .mod = .{ .rm = .{
+ .size = .qword,
+ .disp = dst_mcv.load_frame.off,
+ } },
+ },
start_reg.to64(),
);
try self.asmMemoryRegister(
.{ ._, .mov },
- Memory.sib(.qword, .{
+ .{
.base = .{ .frame = dst_mcv.load_frame.index },
- .disp = dst_mcv.load_frame.off + 8,
- }),
+ .mod = .{ .rm = .{
+ .size = .qword,
+ .disp = dst_mcv.load_frame.off + 8,
+ } },
+ },
end_reg.to64(),
);
@@ -14130,7 +14266,7 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void {
.Or => {
if (operand_mcv.isMemory()) try self.asmMemoryImmediate(
.{ ._, .@"test" },
- operand_mcv.mem(Memory.PtrSize.fromSize(abi_size)),
+ operand_mcv.mem(Memory.Size.fromSize(abi_size)),
Immediate.u(mask),
) else {
const operand_reg = registerAlias(if (operand_mcv.isRegister())
@@ -14522,7 +14658,7 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
mir_tag,
mop1_reg,
mop2_reg,
- mops[2].mem(Memory.PtrSize.fromSize(abi_size)),
+ mops[2].mem(Memory.Size.fromSize(abi_size)),
);
break :result mops[0];
};
@@ -14633,24 +14769,32 @@ fn airVaArg(self: *Self, inst: Air.Inst.Index) !void {
const mem_reloc = try self.asmJccReloc(.ae, undefined);
try self.genSetReg(addr_reg, ptr_anyopaque_ty, reg_save_area);
- if (!unused)
- try self.asmRegisterMemory(.{ ._, .lea }, addr_reg, Memory.sib(.qword, .{
- .base = .{ .reg = addr_reg },
- .scale_index = .{ .scale = 1, .index = offset_reg.to64() },
- }));
- try self.asmRegisterMemory(.{ ._, .lea }, offset_reg, Memory.sib(.qword, .{
+ if (!unused) try self.asmRegisterMemory(.{ ._, .lea }, addr_reg, .{
+ .base = .{ .reg = addr_reg },
+ .mod = .{ .rm = .{
+ .size = .qword,
+ .index = offset_reg.to64(),
+ } },
+ });
+ try self.asmRegisterMemory(.{ ._, .lea }, offset_reg, .{
.base = .{ .reg = offset_reg.to64() },
- .disp = 8,
- }));
+ .mod = .{ .rm = .{
+ .size = .qword,
+ .disp = 8,
+ } },
+ });
try self.genCopy(Type.c_uint, gp_offset, .{ .register = offset_reg });
const done_reloc = try self.asmJmpReloc(undefined);
try self.performReloc(mem_reloc);
try self.genSetReg(addr_reg, ptr_anyopaque_ty, overflow_arg_area);
- try self.asmRegisterMemory(.{ ._, .lea }, offset_reg.to64(), Memory.sib(.qword, .{
+ try self.asmRegisterMemory(.{ ._, .lea }, offset_reg.to64(), .{
.base = .{ .reg = addr_reg },
- .disp = @intCast(@max(promote_ty.abiSize(mod), 8)),
- }));
+ .mod = .{ .rm = .{
+ .size = .qword,
+ .disp = @intCast(@max(promote_ty.abiSize(mod), 8)),
+ } },
+ });
try self.genCopy(
ptr_anyopaque_ty,
overflow_arg_area,
@@ -14672,24 +14816,32 @@ fn airVaArg(self: *Self, inst: Air.Inst.Index) !void {
const mem_reloc = try self.asmJccReloc(.ae, undefined);
try self.genSetReg(addr_reg, ptr_anyopaque_ty, reg_save_area);
- if (!unused)
- try self.asmRegisterMemory(.{ ._, .lea }, addr_reg, Memory.sib(.qword, .{
- .base = .{ .reg = addr_reg },
- .scale_index = .{ .scale = 1, .index = offset_reg.to64() },
- }));
- try self.asmRegisterMemory(.{ ._, .lea }, offset_reg, Memory.sib(.qword, .{
+ if (!unused) try self.asmRegisterMemory(.{ ._, .lea }, addr_reg, .{
+ .base = .{ .reg = addr_reg },
+ .mod = .{ .rm = .{
+ .size = .qword,
+ .index = offset_reg.to64(),
+ } },
+ });
+ try self.asmRegisterMemory(.{ ._, .lea }, offset_reg, .{
.base = .{ .reg = offset_reg.to64() },
- .disp = 16,
- }));
+ .mod = .{ .rm = .{
+ .size = .qword,
+ .disp = 16,
+ } },
+ });
try self.genCopy(Type.c_uint, fp_offset, .{ .register = offset_reg });
const done_reloc = try self.asmJmpReloc(undefined);
try self.performReloc(mem_reloc);
try self.genSetReg(addr_reg, ptr_anyopaque_ty, overflow_arg_area);
- try self.asmRegisterMemory(.{ ._, .lea }, offset_reg.to64(), Memory.sib(.qword, .{
+ try self.asmRegisterMemory(.{ ._, .lea }, offset_reg.to64(), .{
.base = .{ .reg = addr_reg },
- .disp = @intCast(@max(promote_ty.abiSize(mod), 8)),
- }));
+ .mod = .{ .rm = .{
+ .size = .qword,
+ .disp = @intCast(@max(promote_ty.abiSize(mod), 8)),
+ } },
+ });
try self.genCopy(
ptr_anyopaque_ty,
overflow_arg_area,
@@ -14838,7 +14990,7 @@ fn genTypedValue(self: *Self, arg_tv: TypedValue) InnerError!MCValue {
.undef => .undef,
.immediate => |imm| .{ .immediate = imm },
.memory => |addr| .{ .memory = addr },
- .load_symbol => |sym_index| .{ .load_symbol = sym_index },
+ .load_symbol => |sym_index| .{ .load_symbol = .{ .sym = sym_index } },
.load_direct => |sym_index| .{ .load_direct = sym_index },
.load_got => |sym_index| .{ .lea_got = sym_index },
.load_tlv => |sym_index| .{ .lea_tlv = sym_index },
@@ -15182,11 +15334,11 @@ fn registerAlias(reg: Register, size_bytes: u32) Register {
};
}
-fn memPtrSize(self: *Self, ty: Type) Memory.PtrSize {
+fn memSize(self: *Self, ty: Type) Memory.Size {
const mod = self.bin_file.options.module.?;
return switch (ty.zigTypeTag(mod)) {
- .Float => Memory.PtrSize.fromBitSize(ty.floatBits(self.target.*)),
- else => Memory.PtrSize.fromSize(@intCast(ty.abiSize(mod))),
+ .Float => Memory.Size.fromBitSize(ty.floatBits(self.target.*)),
+ else => Memory.Size.fromSize(@intCast(ty.abiSize(mod))),
};
}
src/arch/x86_64/Disassembler.zig
@@ -11,7 +11,7 @@ const Encoding = @import("Encoding.zig");
const Immediate = bits.Immediate;
const Instruction = encoder.Instruction;
const LegacyPrefixes = encoder.LegacyPrefixes;
-const Memory = bits.Memory;
+const Memory = Instruction.Memory;
const Register = bits.Register;
const Rex = encoder.Rex;
src/arch/x86_64/Emit.zig
@@ -1,7 +1,6 @@
//! This file contains the functionality for emitting x86_64 MIR as machine code
lower: Lower,
-bin_file: *link.File,
debug_output: DebugInfoOutput,
code: *std.ArrayList(u8),
@@ -41,7 +40,7 @@ pub fn emitMir(emit: *Emit) Error!void {
.offset = end_offset - 4,
.length = @intCast(end_offset - start_offset),
}),
- .linker_extern_fn => |symbol| if (emit.bin_file.cast(link.File.Elf)) |elf_file| {
+ .linker_extern_fn => |symbol| if (emit.lower.bin_file.cast(link.File.Elf)) |elf_file| {
// Add relocation to the decl.
const atom_ptr = elf_file.symbol(symbol.atom_index).atom(elf_file).?;
try atom_ptr.addReloc(elf_file, .{
@@ -49,9 +48,10 @@ pub fn emitMir(emit: *Emit) Error!void {
.r_info = (@as(u64, @intCast(symbol.sym_index)) << 32) | std.elf.R_X86_64_PLT32,
.r_addend = -4,
});
- } else if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
+ } else if (emit.lower.bin_file.cast(link.File.MachO)) |macho_file| {
// Add relocation to the decl.
- const atom_index = macho_file.getAtomIndexForSymbol(.{ .sym_index = symbol.atom_index }).?;
+ const atom_index =
+ macho_file.getAtomIndexForSymbol(.{ .sym_index = symbol.atom_index }).?;
const target = macho_file.getGlobalByIndex(symbol.sym_index);
try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
.type = .branch,
@@ -61,7 +61,7 @@ pub fn emitMir(emit: *Emit) Error!void {
.pcrel = true,
.length = 2,
});
- } else if (emit.bin_file.cast(link.File.Coff)) |coff_file| {
+ } else if (emit.lower.bin_file.cast(link.File.Coff)) |coff_file| {
// Add relocation to the decl.
const atom_index = coff_file.getAtomIndexForSymbol(
.{ .sym_index = symbol.atom_index, .file = null },
@@ -76,12 +76,12 @@ pub fn emitMir(emit: *Emit) Error!void {
.length = 2,
});
} else return emit.fail("TODO implement extern reloc for {s}", .{
- @tagName(emit.bin_file.tag),
+ @tagName(emit.lower.bin_file.tag),
}),
- .linker_reloc => |data| if (emit.bin_file.cast(link.File.Elf)) |elf_file| {
+ .linker_reloc => |data| if (emit.lower.bin_file.cast(link.File.Elf)) |elf_file| {
const atom = elf_file.symbol(data.atom_index).atom(elf_file).?;
const sym = elf_file.symbol(elf_file.zigModulePtr().symbol(data.sym_index));
- if (emit.bin_file.options.pic) {
+ if (emit.lower.bin_file.options.pic) {
const r_type: u32 = if (sym.flags.has_zig_got)
link.File.Elf.R_X86_64_ZIG_GOTPCREL
else if (sym.flags.needs_got)
@@ -111,10 +111,11 @@ pub fn emitMir(emit: *Emit) Error!void {
.linker_direct,
.linker_import,
.linker_tlv,
- => |symbol| if (emit.bin_file.cast(link.File.Elf)) |_| {
+ => |symbol| if (emit.lower.bin_file.cast(link.File.Elf)) |_| {
unreachable;
- } else if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
- const atom_index = macho_file.getAtomIndexForSymbol(.{ .sym_index = symbol.atom_index }).?;
+ } else if (emit.lower.bin_file.cast(link.File.MachO)) |macho_file| {
+ const atom_index =
+ macho_file.getAtomIndexForSymbol(.{ .sym_index = symbol.atom_index }).?;
try link.File.MachO.Atom.addRelocation(macho_file, atom_index, .{
.type = switch (lowered_relocs[0].target) {
.linker_got => .got,
@@ -128,7 +129,7 @@ pub fn emitMir(emit: *Emit) Error!void {
.pcrel = true,
.length = 2,
});
- } else if (emit.bin_file.cast(link.File.Coff)) |coff_file| {
+ } else if (emit.lower.bin_file.cast(link.File.Coff)) |coff_file| {
const atom_index = coff_file.getAtomIndexForSymbol(.{
.sym_index = symbol.atom_index,
.file = null,
@@ -152,7 +153,7 @@ pub fn emitMir(emit: *Emit) Error!void {
.pcrel = true,
.length = 2,
});
- } else if (emit.bin_file.cast(link.File.Plan9)) |p9_file| {
+ } else if (emit.lower.bin_file.cast(link.File.Plan9)) |p9_file| {
const atom_index = symbol.atom_index;
try p9_file.addReloc(atom_index, .{ // TODO we may need to add a .type field to the relocs if they are .linker_got instead of just .linker_direct
.target = symbol.sym_index, // we set sym_index to just be the atom index
@@ -161,7 +162,7 @@ pub fn emitMir(emit: *Emit) Error!void {
.type = .pcrel,
});
} else return emit.fail("TODO implement linker reloc for {s}", .{
- @tagName(emit.bin_file.tag),
+ @tagName(emit.lower.bin_file.tag),
}),
};
}
src/arch/x86_64/encoder.zig
@@ -6,9 +6,10 @@ const testing = std.testing;
const bits = @import("bits.zig");
const Encoding = @import("Encoding.zig");
+const FrameIndex = bits.FrameIndex;
const Immediate = bits.Immediate;
-const Memory = bits.Memory;
const Register = bits.Register;
+const Symbol = bits.Symbol;
pub const Instruction = struct {
prefix: Prefix = .none,
@@ -27,6 +28,97 @@ pub const Instruction = struct {
repnz,
};
+ pub const Memory = union(enum) {
+ sib: Sib,
+ rip: Rip,
+ moffs: Moffs,
+
+ pub const Base = bits.Memory.Base;
+
+ pub const ScaleIndex = struct {
+ scale: u4,
+ index: Register,
+
+ const none = ScaleIndex{ .scale = 0, .index = undefined };
+ };
+
+ pub const PtrSize = bits.Memory.Size;
+
+ pub const Sib = struct {
+ ptr_size: PtrSize,
+ base: Base,
+ scale_index: ScaleIndex,
+ disp: i32,
+ };
+
+ pub const Rip = struct {
+ ptr_size: PtrSize,
+ disp: i32,
+ };
+
+ pub const Moffs = struct {
+ seg: Register,
+ offset: u64,
+ };
+
+ pub fn moffs(reg: Register, offset: u64) Memory {
+ assert(reg.class() == .segment);
+ return .{ .moffs = .{ .seg = reg, .offset = offset } };
+ }
+
+ pub fn sib(ptr_size: PtrSize, args: struct {
+ disp: i32 = 0,
+ base: Base = .none,
+ scale_index: ?ScaleIndex = null,
+ }) Memory {
+ if (args.scale_index) |si| assert(std.math.isPowerOfTwo(si.scale));
+ return .{ .sib = .{
+ .base = args.base,
+ .disp = args.disp,
+ .ptr_size = ptr_size,
+ .scale_index = if (args.scale_index) |si| si else ScaleIndex.none,
+ } };
+ }
+
+ pub fn rip(ptr_size: PtrSize, disp: i32) Memory {
+ return .{ .rip = .{ .ptr_size = ptr_size, .disp = disp } };
+ }
+
+ pub fn isSegmentRegister(mem: Memory) bool {
+ return switch (mem) {
+ .moffs => true,
+ .rip => false,
+ .sib => |s| switch (s.base) {
+ .none, .frame, .reloc => false,
+ .reg => |reg| reg.class() == .segment,
+ },
+ };
+ }
+
+ pub fn base(mem: Memory) Base {
+ return switch (mem) {
+ .moffs => |m| .{ .reg = m.seg },
+ .sib => |s| s.base,
+ .rip => .none,
+ };
+ }
+
+ pub fn scaleIndex(mem: Memory) ?ScaleIndex {
+ return switch (mem) {
+ .moffs, .rip => null,
+ .sib => |s| if (s.scale_index.scale > 0) s.scale_index else null,
+ };
+ }
+
+ pub fn bitSize(mem: Memory) u64 {
+ return switch (mem) {
+ .rip => |r| r.ptr_size.bitSize(),
+ .sib => |s| s.ptr_size.bitSize(),
+ .moffs => 64,
+ };
+ }
+ };
+
pub const Operand = union(enum) {
none,
reg: Register,
@@ -125,8 +217,8 @@ pub const Instruction = struct {
try writer.print("{s}", .{@tagName(reg)});
any = true;
},
- .frame => |frame| {
- try writer.print("{}", .{frame});
+ inline .frame, .reloc => |payload| {
+ try writer.print("{}", .{payload});
any = true;
},
}
@@ -498,7 +590,11 @@ pub const Instruction = struct {
}
}
},
- .frame => if (@TypeOf(encoder).options.allow_frame_loc) {
+ .frame => if (@TypeOf(encoder).options.allow_frame_locs) {
+ try encoder.modRm_indirectDisp32(operand_enc, undefined);
+ try encoder.disp32(undefined);
+ } else return error.CannotEncode,
+ .reloc => if (@TypeOf(encoder).options.allow_symbols) {
try encoder.modRm_indirectDisp32(operand_enc, undefined);
try encoder.disp32(undefined);
} else return error.CannotEncode,
@@ -570,7 +666,7 @@ pub const LegacyPrefixes = packed struct {
}
};
-pub const Options = struct { allow_frame_loc: bool = false };
+pub const Options = struct { allow_frame_locs: bool = false, allow_symbols: bool = false };
fn Encoder(comptime T: type, comptime opts: Options) type {
return struct {
@@ -1085,7 +1181,7 @@ test "lower MI encoding" {
try expectEqualHexStrings("\x49\xC7\xC4\x00\x10\x00\x00", enc.code(), "mov r12, 0x1000");
try enc.encode(.mov, &.{
- .{ .mem = Memory.sib(.byte, .{ .base = .r12 }) },
+ .{ .mem = Instruction.Memory.sib(.byte, .{ .base = .r12 }) },
.{ .imm = Immediate.u(0x10) },
});
try expectEqualHexStrings("\x41\xC6\x04\x24\x10", enc.code(), "mov BYTE PTR [r12], 0x10");
@@ -1109,13 +1205,13 @@ test "lower MI encoding" {
try expectEqualHexStrings("\x48\xc7\xc0\x10\x00\x00\x00", enc.code(), "mov rax, 0x10");
try enc.encode(.mov, &.{
- .{ .mem = Memory.sib(.dword, .{ .base = .r11 }) },
+ .{ .mem = Instruction.Memory.sib(.dword, .{ .base = .r11 }) },
.{ .imm = Immediate.u(0x10) },
});
try expectEqualHexStrings("\x41\xc7\x03\x10\x00\x00\x00", enc.code(), "mov DWORD PTR [r11], 0x10");
try enc.encode(.mov, &.{
- .{ .mem = Memory.rip(.qword, 0x10) },
+ .{ .mem = Instruction.Memory.rip(.qword, 0x10) },
.{ .imm = Immediate.u(0x10) },
});
try expectEqualHexStrings(
@@ -1125,25 +1221,25 @@ test "lower MI encoding" {
);
try enc.encode(.mov, &.{
- .{ .mem = Memory.sib(.qword, .{ .base = .rbp, .disp = -8 }) },
+ .{ .mem = Instruction.Memory.sib(.qword, .{ .base = .rbp, .disp = -8 }) },
.{ .imm = Immediate.u(0x10) },
});
try expectEqualHexStrings("\x48\xc7\x45\xf8\x10\x00\x00\x00", enc.code(), "mov QWORD PTR [rbp - 8], 0x10");
try enc.encode(.mov, &.{
- .{ .mem = Memory.sib(.word, .{ .base = .rbp, .disp = -2 }) },
+ .{ .mem = Instruction.Memory.sib(.word, .{ .base = .rbp, .disp = -2 }) },
.{ .imm = Immediate.s(-16) },
});
try expectEqualHexStrings("\x66\xC7\x45\xFE\xF0\xFF", enc.code(), "mov WORD PTR [rbp - 2], -16");
try enc.encode(.mov, &.{
- .{ .mem = Memory.sib(.byte, .{ .base = .rbp, .disp = -1 }) },
+ .{ .mem = Instruction.Memory.sib(.byte, .{ .base = .rbp, .disp = -1 }) },
.{ .imm = Immediate.u(0x10) },
});
try expectEqualHexStrings("\xC6\x45\xFF\x10", enc.code(), "mov BYTE PTR [rbp - 1], 0x10");
try enc.encode(.mov, &.{
- .{ .mem = Memory.sib(.qword, .{
+ .{ .mem = Instruction.Memory.sib(.qword, .{
.base = .ds,
.disp = 0x10000000,
.scale_index = .{ .scale = 2, .index = .rcx },
@@ -1157,13 +1253,13 @@ test "lower MI encoding" {
);
try enc.encode(.adc, &.{
- .{ .mem = Memory.sib(.byte, .{ .base = .rbp, .disp = -0x10 }) },
+ .{ .mem = Instruction.Memory.sib(.byte, .{ .base = .rbp, .disp = -0x10 }) },
.{ .imm = Immediate.u(0x10) },
});
try expectEqualHexStrings("\x80\x55\xF0\x10", enc.code(), "adc BYTE PTR [rbp - 0x10], 0x10");
try enc.encode(.adc, &.{
- .{ .mem = Memory.rip(.qword, 0) },
+ .{ .mem = Instruction.Memory.rip(.qword, 0) },
.{ .imm = Immediate.u(0x10) },
});
try expectEqualHexStrings("\x48\x83\x15\x00\x00\x00\x00\x10", enc.code(), "adc QWORD PTR [rip], 0x10");
@@ -1175,7 +1271,7 @@ test "lower MI encoding" {
try expectEqualHexStrings("\x48\x83\xD0\x10", enc.code(), "adc rax, 0x10");
try enc.encode(.add, &.{
- .{ .mem = Memory.sib(.dword, .{ .base = .rdx, .disp = -8 }) },
+ .{ .mem = Instruction.Memory.sib(.dword, .{ .base = .rdx, .disp = -8 }) },
.{ .imm = Immediate.u(0x10) },
});
try expectEqualHexStrings("\x83\x42\xF8\x10", enc.code(), "add DWORD PTR [rdx - 8], 0x10");
@@ -1187,13 +1283,13 @@ test "lower MI encoding" {
try expectEqualHexStrings("\x48\x83\xC0\x10", enc.code(), "add rax, 0x10");
try enc.encode(.add, &.{
- .{ .mem = Memory.sib(.qword, .{ .base = .rbp, .disp = -0x10 }) },
+ .{ .mem = Instruction.Memory.sib(.qword, .{ .base = .rbp, .disp = -0x10 }) },
.{ .imm = Immediate.s(-0x10) },
});
try expectEqualHexStrings("\x48\x83\x45\xF0\xF0", enc.code(), "add QWORD PTR [rbp - 0x10], -0x10");
try enc.encode(.@"and", &.{
- .{ .mem = Memory.sib(.dword, .{ .base = .ds, .disp = 0x10000000 }) },
+ .{ .mem = Instruction.Memory.sib(.dword, .{ .base = .ds, .disp = 0x10000000 }) },
.{ .imm = Immediate.u(0x10) },
});
try expectEqualHexStrings(
@@ -1203,7 +1299,7 @@ test "lower MI encoding" {
);
try enc.encode(.@"and", &.{
- .{ .mem = Memory.sib(.dword, .{ .base = .es, .disp = 0x10000000 }) },
+ .{ .mem = Instruction.Memory.sib(.dword, .{ .base = .es, .disp = 0x10000000 }) },
.{ .imm = Immediate.u(0x10) },
});
try expectEqualHexStrings(
@@ -1213,7 +1309,7 @@ test "lower MI encoding" {
);
try enc.encode(.@"and", &.{
- .{ .mem = Memory.sib(.dword, .{ .base = .r12, .disp = 0x10000000 }) },
+ .{ .mem = Instruction.Memory.sib(.dword, .{ .base = .r12, .disp = 0x10000000 }) },
.{ .imm = Immediate.u(0x10) },
});
try expectEqualHexStrings(
@@ -1223,7 +1319,7 @@ test "lower MI encoding" {
);
try enc.encode(.sub, &.{
- .{ .mem = Memory.sib(.dword, .{ .base = .r11, .disp = 0x10000000 }) },
+ .{ .mem = Instruction.Memory.sib(.dword, .{ .base = .r11, .disp = 0x10000000 }) },
.{ .imm = Immediate.u(0x10) },
});
try expectEqualHexStrings(
@@ -1238,25 +1334,25 @@ test "lower RM encoding" {
try enc.encode(.mov, &.{
.{ .reg = .rax },
- .{ .mem = Memory.sib(.qword, .{ .base = .r11 }) },
+ .{ .mem = Instruction.Memory.sib(.qword, .{ .base = .r11 }) },
});
try expectEqualHexStrings("\x49\x8b\x03", enc.code(), "mov rax, QWORD PTR [r11]");
try enc.encode(.mov, &.{
.{ .reg = .rbx },
- .{ .mem = Memory.sib(.qword, .{ .base = .ds, .disp = 0x10 }) },
+ .{ .mem = Instruction.Memory.sib(.qword, .{ .base = .ds, .disp = 0x10 }) },
});
try expectEqualHexStrings("\x48\x8B\x1C\x25\x10\x00\x00\x00", enc.code(), "mov rbx, QWORD PTR ds:0x10");
try enc.encode(.mov, &.{
.{ .reg = .rax },
- .{ .mem = Memory.sib(.qword, .{ .base = .rbp, .disp = -4 }) },
+ .{ .mem = Instruction.Memory.sib(.qword, .{ .base = .rbp, .disp = -4 }) },
});
try expectEqualHexStrings("\x48\x8B\x45\xFC", enc.code(), "mov rax, QWORD PTR [rbp - 4]");
try enc.encode(.mov, &.{
.{ .reg = .rax },
- .{ .mem = Memory.sib(.qword, .{
+ .{ .mem = Instruction.Memory.sib(.qword, .{
.base = .rbp,
.scale_index = .{ .scale = 1, .index = .rcx },
.disp = -8,
@@ -1266,7 +1362,7 @@ test "lower RM encoding" {
try enc.encode(.mov, &.{
.{ .reg = .eax },
- .{ .mem = Memory.sib(.dword, .{
+ .{ .mem = Instruction.Memory.sib(.dword, .{
.base = .rbp,
.scale_index = .{ .scale = 4, .index = .rdx },
.disp = -4,
@@ -1276,7 +1372,7 @@ test "lower RM encoding" {
try enc.encode(.mov, &.{
.{ .reg = .rax },
- .{ .mem = Memory.sib(.qword, .{
+ .{ .mem = Instruction.Memory.sib(.qword, .{
.base = .rbp,
.scale_index = .{ .scale = 8, .index = .rcx },
.disp = -8,
@@ -1286,7 +1382,7 @@ test "lower RM encoding" {
try enc.encode(.mov, &.{
.{ .reg = .r8b },
- .{ .mem = Memory.sib(.byte, .{
+ .{ .mem = Instruction.Memory.sib(.byte, .{
.base = .rsi,
.scale_index = .{ .scale = 1, .index = .rcx },
.disp = -24,
@@ -1302,7 +1398,7 @@ test "lower RM encoding" {
try expectEqualHexStrings("\x48\x8C\xC8", enc.code(), "mov rax, cs");
try enc.encode(.mov, &.{
- .{ .mem = Memory.sib(.qword, .{ .base = .rbp, .disp = -16 }) },
+ .{ .mem = Instruction.Memory.sib(.qword, .{ .base = .rbp, .disp = -16 }) },
.{ .reg = .fs },
});
try expectEqualHexStrings("\x48\x8C\x65\xF0", enc.code(), "mov QWORD PTR [rbp - 16], fs");
@@ -1314,7 +1410,7 @@ test "lower RM encoding" {
try expectEqualHexStrings("\x66\x41\x8C\xCC", enc.code(), "mov r12w, cs");
try enc.encode(.mov, &.{
- .{ .mem = Memory.sib(.word, .{ .base = .rbp, .disp = -16 }) },
+ .{ .mem = Instruction.Memory.sib(.word, .{ .base = .rbp, .disp = -16 }) },
.{ .reg = .fs },
});
try expectEqualHexStrings("\x66\x8C\x65\xF0", enc.code(), "mov WORD PTR [rbp - 16], fs");
@@ -1339,19 +1435,19 @@ test "lower RM encoding" {
try enc.encode(.movsx, &.{
.{ .reg = .eax },
- .{ .mem = Memory.sib(.word, .{ .base = .rbp }) },
+ .{ .mem = Instruction.Memory.sib(.word, .{ .base = .rbp }) },
});
try expectEqualHexStrings("\x0F\xBF\x45\x00", enc.code(), "movsx eax, BYTE PTR [rbp]");
try enc.encode(.movsx, &.{
.{ .reg = .eax },
- .{ .mem = Memory.sib(.byte, .{ .scale_index = .{ .index = .rax, .scale = 2 } }) },
+ .{ .mem = Instruction.Memory.sib(.byte, .{ .scale_index = .{ .index = .rax, .scale = 2 } }) },
});
try expectEqualHexStrings("\x0F\xBE\x04\x45\x00\x00\x00\x00", enc.code(), "movsx eax, BYTE PTR [rax * 2]");
try enc.encode(.movsx, &.{
.{ .reg = .ax },
- .{ .mem = Memory.rip(.byte, 0x10) },
+ .{ .mem = Instruction.Memory.rip(.byte, 0x10) },
});
try expectEqualHexStrings("\x66\x0F\xBE\x05\x10\x00\x00\x00", enc.code(), "movsx ax, BYTE PTR [rip + 0x10]");
@@ -1369,37 +1465,37 @@ test "lower RM encoding" {
try enc.encode(.lea, &.{
.{ .reg = .rax },
- .{ .mem = Memory.rip(.qword, 0x10) },
+ .{ .mem = Instruction.Memory.rip(.qword, 0x10) },
});
try expectEqualHexStrings("\x48\x8D\x05\x10\x00\x00\x00", enc.code(), "lea rax, QWORD PTR [rip + 0x10]");
try enc.encode(.lea, &.{
.{ .reg = .rax },
- .{ .mem = Memory.rip(.dword, 0x10) },
+ .{ .mem = Instruction.Memory.rip(.dword, 0x10) },
});
try expectEqualHexStrings("\x48\x8D\x05\x10\x00\x00\x00", enc.code(), "lea rax, DWORD PTR [rip + 0x10]");
try enc.encode(.lea, &.{
.{ .reg = .eax },
- .{ .mem = Memory.rip(.dword, 0x10) },
+ .{ .mem = Instruction.Memory.rip(.dword, 0x10) },
});
try expectEqualHexStrings("\x8D\x05\x10\x00\x00\x00", enc.code(), "lea eax, DWORD PTR [rip + 0x10]");
try enc.encode(.lea, &.{
.{ .reg = .eax },
- .{ .mem = Memory.rip(.word, 0x10) },
+ .{ .mem = Instruction.Memory.rip(.word, 0x10) },
});
try expectEqualHexStrings("\x8D\x05\x10\x00\x00\x00", enc.code(), "lea eax, WORD PTR [rip + 0x10]");
try enc.encode(.lea, &.{
.{ .reg = .ax },
- .{ .mem = Memory.rip(.byte, 0x10) },
+ .{ .mem = Instruction.Memory.rip(.byte, 0x10) },
});
try expectEqualHexStrings("\x66\x8D\x05\x10\x00\x00\x00", enc.code(), "lea ax, BYTE PTR [rip + 0x10]");
try enc.encode(.lea, &.{
.{ .reg = .rsi },
- .{ .mem = Memory.sib(.qword, .{
+ .{ .mem = Instruction.Memory.sib(.qword, .{
.base = .rbp,
.scale_index = .{ .scale = 1, .index = .rcx },
}) },
@@ -1408,31 +1504,31 @@ test "lower RM encoding" {
try enc.encode(.add, &.{
.{ .reg = .r11 },
- .{ .mem = Memory.sib(.qword, .{ .base = .ds, .disp = 0x10000000 }) },
+ .{ .mem = Instruction.Memory.sib(.qword, .{ .base = .ds, .disp = 0x10000000 }) },
});
try expectEqualHexStrings("\x4C\x03\x1C\x25\x00\x00\x00\x10", enc.code(), "add r11, QWORD PTR ds:0x10000000");
try enc.encode(.add, &.{
.{ .reg = .r12b },
- .{ .mem = Memory.sib(.byte, .{ .base = .ds, .disp = 0x10000000 }) },
+ .{ .mem = Instruction.Memory.sib(.byte, .{ .base = .ds, .disp = 0x10000000 }) },
});
try expectEqualHexStrings("\x44\x02\x24\x25\x00\x00\x00\x10", enc.code(), "add r11b, BYTE PTR ds:0x10000000");
try enc.encode(.add, &.{
.{ .reg = .r12b },
- .{ .mem = Memory.sib(.byte, .{ .base = .fs, .disp = 0x10000000 }) },
+ .{ .mem = Instruction.Memory.sib(.byte, .{ .base = .fs, .disp = 0x10000000 }) },
});
try expectEqualHexStrings("\x64\x44\x02\x24\x25\x00\x00\x00\x10", enc.code(), "add r11b, BYTE PTR fs:0x10000000");
try enc.encode(.sub, &.{
.{ .reg = .r11 },
- .{ .mem = Memory.sib(.qword, .{ .base = .r13, .disp = 0x10000000 }) },
+ .{ .mem = Instruction.Memory.sib(.qword, .{ .base = .r13, .disp = 0x10000000 }) },
});
try expectEqualHexStrings("\x4D\x2B\x9D\x00\x00\x00\x10", enc.code(), "sub r11, QWORD PTR [r13 + 0x10000000]");
try enc.encode(.sub, &.{
.{ .reg = .r11 },
- .{ .mem = Memory.sib(.qword, .{ .base = .r12, .disp = 0x10000000 }) },
+ .{ .mem = Instruction.Memory.sib(.qword, .{ .base = .r12, .disp = 0x10000000 }) },
});
try expectEqualHexStrings("\x4D\x2B\x9C\x24\x00\x00\x00\x10", enc.code(), "sub r11, QWORD PTR [r12 + 0x10000000]");
@@ -1455,7 +1551,7 @@ test "lower RMI encoding" {
try enc.encode(.imul, &.{
.{ .reg = .r11 },
- .{ .mem = Memory.rip(.qword, -16) },
+ .{ .mem = Instruction.Memory.rip(.qword, -16) },
.{ .imm = Immediate.s(-1024) },
});
try expectEqualHexStrings(
@@ -1466,7 +1562,7 @@ test "lower RMI encoding" {
try enc.encode(.imul, &.{
.{ .reg = .bx },
- .{ .mem = Memory.sib(.word, .{ .base = .rbp, .disp = -16 }) },
+ .{ .mem = Instruction.Memory.sib(.word, .{ .base = .rbp, .disp = -16 }) },
.{ .imm = Immediate.s(-1024) },
});
try expectEqualHexStrings(
@@ -1477,7 +1573,7 @@ test "lower RMI encoding" {
try enc.encode(.imul, &.{
.{ .reg = .bx },
- .{ .mem = Memory.sib(.word, .{ .base = .rbp, .disp = -16 }) },
+ .{ .mem = Instruction.Memory.sib(.word, .{ .base = .rbp, .disp = -16 }) },
.{ .imm = Immediate.u(1024) },
});
try expectEqualHexStrings(
@@ -1497,19 +1593,19 @@ test "lower MR encoding" {
try expectEqualHexStrings("\x48\x89\xD8", enc.code(), "mov rax, rbx");
try enc.encode(.mov, &.{
- .{ .mem = Memory.sib(.qword, .{ .base = .rbp, .disp = -4 }) },
+ .{ .mem = Instruction.Memory.sib(.qword, .{ .base = .rbp, .disp = -4 }) },
.{ .reg = .r11 },
});
try expectEqualHexStrings("\x4c\x89\x5d\xfc", enc.code(), "mov QWORD PTR [rbp - 4], r11");
try enc.encode(.mov, &.{
- .{ .mem = Memory.rip(.qword, 0x10) },
+ .{ .mem = Instruction.Memory.rip(.qword, 0x10) },
.{ .reg = .r12 },
});
try expectEqualHexStrings("\x4C\x89\x25\x10\x00\x00\x00", enc.code(), "mov QWORD PTR [rip + 0x10], r12");
try enc.encode(.mov, &.{
- .{ .mem = Memory.sib(.qword, .{
+ .{ .mem = Instruction.Memory.sib(.qword, .{
.base = .r11,
.scale_index = .{ .scale = 2, .index = .r12 },
.disp = 0x10,
@@ -1519,13 +1615,13 @@ test "lower MR encoding" {
try expectEqualHexStrings("\x4F\x89\x6C\x63\x10", enc.code(), "mov QWORD PTR [r11 + 2 * r12 + 0x10], r13");
try enc.encode(.mov, &.{
- .{ .mem = Memory.rip(.word, -0x10) },
+ .{ .mem = Instruction.Memory.rip(.word, -0x10) },
.{ .reg = .r12w },
});
try expectEqualHexStrings("\x66\x44\x89\x25\xF0\xFF\xFF\xFF", enc.code(), "mov WORD PTR [rip - 0x10], r12w");
try enc.encode(.mov, &.{
- .{ .mem = Memory.sib(.byte, .{
+ .{ .mem = Instruction.Memory.sib(.byte, .{
.base = .r11,
.scale_index = .{ .scale = 2, .index = .r12 },
.disp = 0x10,
@@ -1535,25 +1631,25 @@ test "lower MR encoding" {
try expectEqualHexStrings("\x47\x88\x6C\x63\x10", enc.code(), "mov BYTE PTR [r11 + 2 * r12 + 0x10], r13b");
try enc.encode(.add, &.{
- .{ .mem = Memory.sib(.byte, .{ .base = .ds, .disp = 0x10000000 }) },
+ .{ .mem = Instruction.Memory.sib(.byte, .{ .base = .ds, .disp = 0x10000000 }) },
.{ .reg = .r12b },
});
try expectEqualHexStrings("\x44\x00\x24\x25\x00\x00\x00\x10", enc.code(), "add BYTE PTR ds:0x10000000, r12b");
try enc.encode(.add, &.{
- .{ .mem = Memory.sib(.dword, .{ .base = .ds, .disp = 0x10000000 }) },
+ .{ .mem = Instruction.Memory.sib(.dword, .{ .base = .ds, .disp = 0x10000000 }) },
.{ .reg = .r12d },
});
try expectEqualHexStrings("\x44\x01\x24\x25\x00\x00\x00\x10", enc.code(), "add DWORD PTR [ds:0x10000000], r12d");
try enc.encode(.add, &.{
- .{ .mem = Memory.sib(.dword, .{ .base = .gs, .disp = 0x10000000 }) },
+ .{ .mem = Instruction.Memory.sib(.dword, .{ .base = .gs, .disp = 0x10000000 }) },
.{ .reg = .r12d },
});
try expectEqualHexStrings("\x65\x44\x01\x24\x25\x00\x00\x00\x10", enc.code(), "add DWORD PTR [gs:0x10000000], r12d");
try enc.encode(.sub, &.{
- .{ .mem = Memory.sib(.qword, .{ .base = .r11, .disp = 0x10000000 }) },
+ .{ .mem = Instruction.Memory.sib(.qword, .{ .base = .r11, .disp = 0x10000000 }) },
.{ .reg = .r12 },
});
try expectEqualHexStrings("\x4D\x29\xA3\x00\x00\x00\x10", enc.code(), "sub QWORD PTR [r11 + 0x10000000], r12");
@@ -1568,12 +1664,12 @@ test "lower M encoding" {
try expectEqualHexStrings("\x41\xFF\xD4", enc.code(), "call r12");
try enc.encode(.call, &.{
- .{ .mem = Memory.sib(.qword, .{ .base = .r12 }) },
+ .{ .mem = Instruction.Memory.sib(.qword, .{ .base = .r12 }) },
});
try expectEqualHexStrings("\x41\xFF\x14\x24", enc.code(), "call QWORD PTR [r12]");
try enc.encode(.call, &.{
- .{ .mem = Memory.sib(.qword, .{
+ .{ .mem = Instruction.Memory.sib(.qword, .{
.base = null,
.scale_index = .{ .index = .r11, .scale = 2 },
}) },
@@ -1581,7 +1677,7 @@ test "lower M encoding" {
try expectEqualHexStrings("\x42\xFF\x14\x5D\x00\x00\x00\x00", enc.code(), "call QWORD PTR [r11 * 2]");
try enc.encode(.call, &.{
- .{ .mem = Memory.sib(.qword, .{
+ .{ .mem = Instruction.Memory.sib(.qword, .{
.base = null,
.scale_index = .{ .index = .r12, .scale = 2 },
}) },
@@ -1589,7 +1685,7 @@ test "lower M encoding" {
try expectEqualHexStrings("\x42\xFF\x14\x65\x00\x00\x00\x00", enc.code(), "call QWORD PTR [r12 * 2]");
try enc.encode(.call, &.{
- .{ .mem = Memory.sib(.qword, .{ .base = .gs }) },
+ .{ .mem = Instruction.Memory.sib(.qword, .{ .base = .gs }) },
});
try expectEqualHexStrings("\x65\xFF\x14\x25\x00\x00\x00\x00", enc.code(), "call gs:0x0");
@@ -1599,22 +1695,22 @@ test "lower M encoding" {
try expectEqualHexStrings("\xE8\x00\x00\x00\x00", enc.code(), "call 0x0");
try enc.encode(.push, &.{
- .{ .mem = Memory.sib(.qword, .{ .base = .rbp }) },
+ .{ .mem = Instruction.Memory.sib(.qword, .{ .base = .rbp }) },
});
try expectEqualHexStrings("\xFF\x75\x00", enc.code(), "push QWORD PTR [rbp]");
try enc.encode(.push, &.{
- .{ .mem = Memory.sib(.word, .{ .base = .rbp }) },
+ .{ .mem = Instruction.Memory.sib(.word, .{ .base = .rbp }) },
});
try expectEqualHexStrings("\x66\xFF\x75\x00", enc.code(), "push QWORD PTR [rbp]");
try enc.encode(.pop, &.{
- .{ .mem = Memory.rip(.qword, 0) },
+ .{ .mem = Instruction.Memory.rip(.qword, 0) },
});
try expectEqualHexStrings("\x8F\x05\x00\x00\x00\x00", enc.code(), "pop QWORD PTR [rip]");
try enc.encode(.pop, &.{
- .{ .mem = Memory.rip(.word, 0) },
+ .{ .mem = Instruction.Memory.rip(.word, 0) },
});
try expectEqualHexStrings("\x66\x8F\x05\x00\x00\x00\x00", enc.code(), "pop WORD PTR [rbp]");
@@ -1695,48 +1791,48 @@ test "lower FD/TD encoding" {
try enc.encode(.mov, &.{
.{ .reg = .rax },
- .{ .mem = Memory.moffs(.cs, 0x10) },
+ .{ .mem = Instruction.Memory.moffs(.cs, 0x10) },
});
try expectEqualHexStrings("\x2E\x48\xA1\x10\x00\x00\x00\x00\x00\x00\x00", enc.code(), "movabs rax, cs:0x10");
try enc.encode(.mov, &.{
.{ .reg = .eax },
- .{ .mem = Memory.moffs(.fs, 0x10) },
+ .{ .mem = Instruction.Memory.moffs(.fs, 0x10) },
});
try expectEqualHexStrings("\x64\xA1\x10\x00\x00\x00\x00\x00\x00\x00", enc.code(), "movabs eax, fs:0x10");
try enc.encode(.mov, &.{
.{ .reg = .ax },
- .{ .mem = Memory.moffs(.gs, 0x10) },
+ .{ .mem = Instruction.Memory.moffs(.gs, 0x10) },
});
try expectEqualHexStrings("\x65\x66\xA1\x10\x00\x00\x00\x00\x00\x00\x00", enc.code(), "movabs ax, gs:0x10");
try enc.encode(.mov, &.{
.{ .reg = .al },
- .{ .mem = Memory.moffs(.ds, 0x10) },
+ .{ .mem = Instruction.Memory.moffs(.ds, 0x10) },
});
try expectEqualHexStrings("\xA0\x10\x00\x00\x00\x00\x00\x00\x00", enc.code(), "movabs al, ds:0x10");
try enc.encode(.mov, &.{
- .{ .mem = Memory.moffs(.cs, 0x10) },
+ .{ .mem = Instruction.Memory.moffs(.cs, 0x10) },
.{ .reg = .rax },
});
try expectEqualHexStrings("\x2E\x48\xA3\x10\x00\x00\x00\x00\x00\x00\x00", enc.code(), "movabs cs:0x10, rax");
try enc.encode(.mov, &.{
- .{ .mem = Memory.moffs(.fs, 0x10) },
+ .{ .mem = Instruction.Memory.moffs(.fs, 0x10) },
.{ .reg = .eax },
});
try expectEqualHexStrings("\x64\xA3\x10\x00\x00\x00\x00\x00\x00\x00", enc.code(), "movabs fs:0x10, eax");
try enc.encode(.mov, &.{
- .{ .mem = Memory.moffs(.gs, 0x10) },
+ .{ .mem = Instruction.Memory.moffs(.gs, 0x10) },
.{ .reg = .ax },
});
try expectEqualHexStrings("\x65\x66\xA3\x10\x00\x00\x00\x00\x00\x00\x00", enc.code(), "movabs gs:0x10, ax");
try enc.encode(.mov, &.{
- .{ .mem = Memory.moffs(.ds, 0x10) },
+ .{ .mem = Instruction.Memory.moffs(.ds, 0x10) },
.{ .reg = .al },
});
try expectEqualHexStrings("\xA2\x10\x00\x00\x00\x00\x00\x00\x00", enc.code(), "movabs ds:0x10, al");
@@ -1774,16 +1870,16 @@ test "invalid instruction" {
.{ .reg = .al },
});
try invalidInstruction(.call, &.{
- .{ .mem = Memory.rip(.dword, 0) },
+ .{ .mem = Instruction.Memory.rip(.dword, 0) },
});
try invalidInstruction(.call, &.{
- .{ .mem = Memory.rip(.word, 0) },
+ .{ .mem = Instruction.Memory.rip(.word, 0) },
});
try invalidInstruction(.call, &.{
- .{ .mem = Memory.rip(.byte, 0) },
+ .{ .mem = Instruction.Memory.rip(.byte, 0) },
});
try invalidInstruction(.mov, &.{
- .{ .mem = Memory.rip(.word, 0x10) },
+ .{ .mem = Instruction.Memory.rip(.word, 0x10) },
.{ .reg = .r12 },
});
try invalidInstruction(.lea, &.{
@@ -1792,7 +1888,7 @@ test "invalid instruction" {
});
try invalidInstruction(.lea, &.{
.{ .reg = .al },
- .{ .mem = Memory.rip(.byte, 0) },
+ .{ .mem = Instruction.Memory.rip(.byte, 0) },
});
try invalidInstruction(.pop, &.{
.{ .reg = .r12b },
@@ -1817,7 +1913,7 @@ fn cannotEncode(mnemonic: Instruction.Mnemonic, ops: []const Instruction.Operand
test "cannot encode" {
try cannotEncode(.@"test", &.{
- .{ .mem = Memory.sib(.byte, .{ .base = .r12 }) },
+ .{ .mem = Instruction.Memory.sib(.byte, .{ .base = .r12 }) },
.{ .reg = .ah },
});
try cannotEncode(.@"test", &.{
@@ -2149,8 +2245,8 @@ const Assembler = struct {
return null;
}
- fn parseMemory(as: *Assembler) ParseError!Memory {
- const ptr_size: ?Memory.PtrSize = blk: {
+ fn parseMemory(as: *Assembler) ParseError!Instruction.Memory {
+ const ptr_size: ?Instruction.Memory.PtrSize = blk: {
const pos = as.it.pos;
const ptr_size = as.parsePtrSize() catch |err| switch (err) {
error.UnexpectedToken => {
@@ -2194,7 +2290,7 @@ const Assembler = struct {
if (res.rip) {
if (res.base != null or res.scale_index != null or res.offset != null)
return error.InvalidMemoryOperand;
- return Memory.rip(ptr_size orelse .qword, res.disp orelse 0);
+ return Instruction.Memory.rip(ptr_size orelse .qword, res.disp orelse 0);
}
if (res.base) |base| {
if (res.rip)
@@ -2202,9 +2298,9 @@ const Assembler = struct {
if (res.offset) |offset| {
if (res.scale_index != null or res.disp != null)
return error.InvalidMemoryOperand;
- return Memory.moffs(base, offset);
+ return Instruction.Memory.moffs(base, offset);
}
- return Memory.sib(ptr_size orelse .qword, .{
+ return Instruction.Memory.sib(ptr_size orelse .qword, .{
.base = base,
.scale_index = res.scale_index,
.disp = res.disp orelse 0,
@@ -2222,12 +2318,12 @@ const Assembler = struct {
const MemoryParseResult = struct {
rip: bool = false,
base: ?Register = null,
- scale_index: ?Memory.ScaleIndex = null,
+ scale_index: ?Instruction.Memory.ScaleIndex = null,
disp: ?i32 = null,
offset: ?u64 = null,
};
- fn parseMemoryRule(as: *Assembler, rule: anytype) ParseError!MemoryParseResult {
+ fn parseMemoryRule(as: *Assembler, rule: anytype) ParseError!Instruction.MemoryParseResult {
var res: MemoryParseResult = .{};
inline for (rule, 0..) |cond, i| {
if (@typeInfo(@TypeOf(cond)) != .EnumLiteral) {
@@ -2294,7 +2390,7 @@ const Assembler = struct {
return res;
}
- fn parsePtrSize(as: *Assembler) ParseError!Memory.PtrSize {
+ fn parsePtrSize(as: *Assembler) ParseError!Instruction.Memory.PtrSize {
const size = try as.expect(.string);
try as.skip(1, .{.space});
const ptr = try as.expect(.string);
src/arch/x86_64/Encoding.zig
@@ -803,7 +803,10 @@ fn estimateInstructionLength(prefix: Prefix, encoding: Encoding, ops: []const Op
@memcpy(inst.ops[0..ops.len], ops);
var cwriter = std.io.countingWriter(std.io.null_writer);
- inst.encode(cwriter.writer(), .{ .allow_frame_loc = true }) catch unreachable; // Not allowed to fail here unless OOM.
+ inst.encode(cwriter.writer(), .{
+ .allow_frame_locs = true,
+ .allow_symbols = true,
+ }) catch unreachable; // Not allowed to fail here unless OOM.
return @as(usize, @intCast(cwriter.bytes_written));
}
src/arch/x86_64/Lower.zig
@@ -50,12 +50,12 @@ pub const Reloc = struct {
const Target = union(enum) {
inst: Mir.Inst.Index,
- linker_reloc: Mir.Reloc,
- linker_extern_fn: Mir.Reloc,
- linker_got: Mir.Reloc,
- linker_direct: Mir.Reloc,
- linker_import: Mir.Reloc,
- linker_tlv: Mir.Reloc,
+ linker_reloc: bits.Symbol,
+ linker_extern_fn: bits.Symbol,
+ linker_got: bits.Symbol,
+ linker_direct: bits.Symbol,
+ linker_import: bits.Symbol,
+ linker_tlv: bits.Symbol,
};
};
@@ -99,17 +99,15 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct {
.{ .reg = inst.data.rr.r2 },
});
},
- .pseudo_cmov_nz_or_p_rm_sib,
- .pseudo_cmov_nz_or_p_rm_rip,
- => {
+ .pseudo_cmov_nz_or_p_rm => {
assert(inst.data.rx.fixes == ._);
try lower.emit(.none, .cmovnz, &.{
.{ .reg = inst.data.rx.r1 },
- .{ .mem = lower.mem(inst.ops, inst.data.rx.payload) },
+ .{ .mem = lower.mem(inst.data.rx.payload) },
});
try lower.emit(.none, .cmovp, &.{
.{ .reg = inst.data.rx.r1 },
- .{ .mem = lower.mem(inst.ops, inst.data.rx.payload) },
+ .{ .mem = lower.mem(inst.data.rx.payload) },
});
},
.pseudo_set_z_and_np_r => {
@@ -125,18 +123,16 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct {
.{ .reg = inst.data.rr.r2 },
});
},
- .pseudo_set_z_and_np_m_sib,
- .pseudo_set_z_and_np_m_rip,
- => {
+ .pseudo_set_z_and_np_m => {
assert(inst.data.rx.fixes == ._);
try lower.emit(.none, .setz, &.{
- .{ .mem = lower.mem(inst.ops, inst.data.rx.payload) },
+ .{ .mem = lower.mem(inst.data.rx.payload) },
});
try lower.emit(.none, .setnp, &.{
.{ .reg = inst.data.rx.r1 },
});
try lower.emit(.none, .@"and", &.{
- .{ .mem = lower.mem(inst.ops, inst.data.rx.payload) },
+ .{ .mem = lower.mem(inst.data.rx.payload) },
.{ .reg = inst.data.rx.r1 },
});
},
@@ -153,18 +149,16 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct {
.{ .reg = inst.data.rr.r2 },
});
},
- .pseudo_set_nz_or_p_m_sib,
- .pseudo_set_nz_or_p_m_rip,
- => {
+ .pseudo_set_nz_or_p_m => {
assert(inst.data.rx.fixes == ._);
try lower.emit(.none, .setnz, &.{
- .{ .mem = lower.mem(inst.ops, inst.data.rx.payload) },
+ .{ .mem = lower.mem(inst.data.rx.payload) },
});
try lower.emit(.none, .setp, &.{
.{ .reg = inst.data.rx.r1 },
});
try lower.emit(.none, .@"or", &.{
- .{ .mem = lower.mem(inst.ops, inst.data.rx.payload) },
+ .{ .mem = lower.mem(inst.data.rx.payload) },
.{ .reg = inst.data.rx.r1 },
});
},
@@ -289,28 +283,20 @@ fn imm(lower: Lower, ops: Mir.Inst.Ops, i: u32) Immediate {
.rri_s,
.ri_s,
.i_s,
- .mi_sib_s,
- .mi_rip_s,
- .rmi_sib_s,
- .rmi_rip_s,
+ .mi_s,
+ .rmi_s,
=> Immediate.s(@bitCast(i)),
.rrri,
.rri_u,
.ri_u,
.i_u,
- .mi_sib_u,
- .mi_rip_u,
- .rmi_sib,
- .rmi_rip,
- .rmi_sib_u,
- .rmi_rip_u,
- .mri_sib,
- .mri_rip,
- .rrm_sib,
- .rrm_rip,
- .rrmi_sib,
- .rrmi_rip,
+ .mi_u,
+ .rmi,
+ .rmi_u,
+ .mri,
+ .rrm,
+ .rrmi,
=> Immediate.u(i),
.ri64 => Immediate.u(lower.mir.extraData(Mir.Imm64, i).data.decode()),
@@ -319,50 +305,8 @@ fn imm(lower: Lower, ops: Mir.Inst.Ops, i: u32) Immediate {
};
}
-fn mem(lower: Lower, ops: Mir.Inst.Ops, payload: u32) Memory {
- return lower.mir.resolveFrameLoc(switch (ops) {
- .rm_sib,
- .rmi_sib,
- .rmi_sib_s,
- .rmi_sib_u,
- .m_sib,
- .mi_sib_u,
- .mi_sib_s,
- .mr_sib,
- .mrr_sib,
- .mri_sib,
- .rrm_sib,
- .rrmi_sib,
-
- .pseudo_cmov_nz_or_p_rm_sib,
- .pseudo_set_z_and_np_m_sib,
- .pseudo_set_nz_or_p_m_sib,
- => lower.mir.extraData(Mir.MemorySib, payload).data.decode(),
-
- .rm_rip,
- .rmi_rip,
- .rmi_rip_s,
- .rmi_rip_u,
- .m_rip,
- .mi_rip_u,
- .mi_rip_s,
- .mr_rip,
- .mrr_rip,
- .mri_rip,
- .rrm_rip,
- .rrmi_rip,
-
- .pseudo_cmov_nz_or_p_rm_rip,
- .pseudo_set_z_and_np_m_rip,
- .pseudo_set_nz_or_p_m_rip,
- => lower.mir.extraData(Mir.MemoryRip, payload).data.decode(),
-
- .rax_moffs,
- .moffs_rax,
- => lower.mir.extraData(Mir.MemoryMoffs, payload).data.decode(),
-
- else => unreachable,
- });
+fn mem(lower: Lower, payload: u32) Memory {
+ return lower.mir.resolveFrameLoc(lower.mir.extraData(Mir.Memory, payload).data).decode();
}
fn reloc(lower: *Lower, target: Reloc.Target) Immediate {
@@ -375,7 +319,42 @@ fn reloc(lower: *Lower, target: Reloc.Target) Immediate {
}
fn emit(lower: *Lower, prefix: Prefix, mnemonic: Mnemonic, ops: []const Operand) Error!void {
- lower.result_insts[lower.result_insts_len] = try Instruction.new(prefix, mnemonic, ops);
+ var emit_prefix = prefix;
+ var emit_mnemonic = mnemonic;
+ var emit_ops_storage: [4]Operand = undefined;
+ const emit_ops = emit_ops_storage[0..ops.len];
+ for (emit_ops, ops) |*emit_op, op| {
+ emit_op.* = switch (op) {
+ else => op,
+ .mem => |mem_op| switch (mem_op.base()) {
+ else => op,
+ .reloc => |sym| op: {
+ assert(prefix == .none);
+ assert(mem_op.sib.disp == 0);
+ assert(mem_op.sib.scale_index.scale == 0);
+ _ = lower.reloc(.{ .linker_reloc = sym });
+ break :op if (lower.bin_file.options.pic) switch (mnemonic) {
+ .mov, .lea => .{ .mem = Memory.rip(mem_op.sib.ptr_size, 0) },
+ else => unreachable,
+ } else switch (mnemonic) {
+ .call => .{ .mem = Memory.sib(mem_op.sib.ptr_size, .{
+ .base = .{ .reg = .ds },
+ }) },
+ .lea => {
+ emit_mnemonic = .mov;
+ break :op .{ .imm = Immediate.s(0) };
+ },
+ .mov => .{ .mem = Memory.sib(mem_op.sib.ptr_size, .{
+ .base = .{ .reg = .ds },
+ }) },
+ else => unreachable,
+ };
+ },
+ },
+ };
+ }
+ lower.result_insts[lower.result_insts_len] =
+ try Instruction.new(emit_prefix, emit_mnemonic, emit_ops);
lower.result_insts_len += 1;
}
@@ -391,74 +370,13 @@ fn generic(lower: *Lower, inst: Mir.Inst) Error!void {
.rrri => inst.data.rrri.fixes,
.rri_s, .rri_u => inst.data.rri.fixes,
.ri_s, .ri_u => inst.data.ri.fixes,
- .ri64,
- .rm_sib,
- .rm_rip,
- .rmi_sib_s,
- .rmi_sib_u,
- .rmi_rip_s,
- .rmi_rip_u,
- .mr_sib,
- .mr_rip,
- => inst.data.rx.fixes,
- .mrr_sib, .mrr_rip, .rrm_sib, .rrm_rip => inst.data.rrx.fixes,
- .rmi_sib, .rmi_rip, .mri_sib, .mri_rip => inst.data.rix.fixes,
- .rrmi_sib, .rrmi_rip => inst.data.rrix.fixes,
- .mi_sib_u, .mi_rip_u, .mi_sib_s, .mi_rip_s => inst.data.x.fixes,
- .m_sib, .m_rip, .rax_moffs, .moffs_rax => inst.data.x.fixes,
- .extern_fn_reloc,
- .got_reloc,
- .direct_reloc,
- .import_reloc,
- .tlv_reloc,
- => ._,
- .linker_reloc => {
- if (lower.bin_file.options.pic) {
- assert(inst.data.rx.fixes == ._);
- const reg = inst.data.rx.r1;
- const extra = lower.mir.extraData(Mir.Reloc, inst.data.rx.payload).data;
- _ = lower.reloc(.{ .linker_reloc = extra });
- const mnemonic: Mnemonic = switch (inst.tag) {
- .mov => .mov,
- .lea => .lea,
- else => unreachable,
- };
- try lower.emit(.none, mnemonic, &.{
- .{ .reg = reg },
- .{ .mem = Memory.rip(Memory.PtrSize.fromBitSize(reg.bitSize()), 0) },
- });
- } else {
- switch (inst.tag) {
- .call => {
- _ = lower.reloc(.{ .linker_reloc = inst.data.reloc });
- try lower.emit(.none, .call, &.{
- .{ .mem = Memory.sib(.qword, .{ .base = .{ .reg = .ds }, .disp = 0 }) },
- });
- },
- .lea => {
- assert(inst.data.rx.fixes == ._);
- const reg = inst.data.rx.r1;
- const extra = lower.mir.extraData(Mir.Reloc, inst.data.rx.payload).data;
- try lower.emit(.none, .mov, &.{
- .{ .reg = reg },
- .{ .imm = lower.reloc(.{ .linker_reloc = extra }) },
- });
- },
- .mov => {
- assert(inst.data.rx.fixes == ._);
- const reg = inst.data.rx.r1;
- const extra = lower.mir.extraData(Mir.Reloc, inst.data.rx.payload).data;
- _ = lower.reloc(.{ .linker_reloc = extra });
- try lower.emit(.none, .mov, &.{
- .{ .reg = reg },
- .{ .mem = Memory.sib(.qword, .{ .base = .{ .reg = .ds }, .disp = 0 }) },
- });
- },
- else => return lower.fail("TODO lower {s} {s}", .{ @tagName(inst.tag), @tagName(inst.ops) }),
- }
- }
- return;
- },
+ .ri64, .rm, .rmi_s, .mr => inst.data.rx.fixes,
+ .mrr, .rrm => inst.data.rrx.fixes,
+ .rmi, .mri => inst.data.rix.fixes,
+ .rrmi => inst.data.rrix.fixes,
+ .mi_u, .mi_s => inst.data.x.fixes,
+ .m => inst.data.x.fixes,
+ .extern_fn_reloc, .got_reloc, .direct_reloc, .import_reloc, .tlv_reloc => ._,
else => return lower.fail("TODO lower .{s}", .{@tagName(inst.ops)}),
};
try lower.emit(switch (fixes) {
@@ -527,73 +445,64 @@ fn generic(lower: *Lower, inst: Mir.Inst) Error!void {
.{ .reg = inst.data.rri.r2 },
.{ .imm = lower.imm(inst.ops, inst.data.rri.i) },
},
- .m_sib, .m_rip => &.{
- .{ .mem = lower.mem(inst.ops, inst.data.x.payload) },
+ .m => &.{
+ .{ .mem = lower.mem(inst.data.x.payload) },
},
- .mi_sib_s, .mi_sib_u, .mi_rip_s, .mi_rip_u => &.{
- .{ .mem = lower.mem(inst.ops, inst.data.x.payload + 1) },
+ .mi_s, .mi_u => &.{
+ .{ .mem = lower.mem(inst.data.x.payload + 1) },
.{ .imm = lower.imm(
inst.ops,
lower.mir.extraData(Mir.Imm32, inst.data.x.payload).data.imm,
) },
},
- .rm_sib, .rm_rip => &.{
+ .rm => &.{
.{ .reg = inst.data.rx.r1 },
- .{ .mem = lower.mem(inst.ops, inst.data.rx.payload) },
+ .{ .mem = lower.mem(inst.data.rx.payload) },
},
- .rmi_sib, .rmi_rip => &.{
+ .rmi => &.{
.{ .reg = inst.data.rix.r1 },
- .{ .mem = lower.mem(inst.ops, inst.data.rix.payload) },
+ .{ .mem = lower.mem(inst.data.rix.payload) },
.{ .imm = lower.imm(inst.ops, inst.data.rix.i) },
},
- .rmi_sib_s, .rmi_sib_u, .rmi_rip_s, .rmi_rip_u => &.{
+ .rmi_s, .rmi_u => &.{
.{ .reg = inst.data.rx.r1 },
- .{ .mem = lower.mem(inst.ops, inst.data.rx.payload + 1) },
+ .{ .mem = lower.mem(inst.data.rx.payload + 1) },
.{ .imm = lower.imm(
inst.ops,
lower.mir.extraData(Mir.Imm32, inst.data.rx.payload).data.imm,
) },
},
- .mr_sib, .mr_rip => &.{
- .{ .mem = lower.mem(inst.ops, inst.data.rx.payload) },
+ .mr => &.{
+ .{ .mem = lower.mem(inst.data.rx.payload) },
.{ .reg = inst.data.rx.r1 },
},
- .mrr_sib, .mrr_rip => &.{
- .{ .mem = lower.mem(inst.ops, inst.data.rrx.payload) },
+ .mrr => &.{
+ .{ .mem = lower.mem(inst.data.rrx.payload) },
.{ .reg = inst.data.rrx.r1 },
.{ .reg = inst.data.rrx.r2 },
},
- .mri_sib, .mri_rip => &.{
- .{ .mem = lower.mem(inst.ops, inst.data.rix.payload) },
+ .mri => &.{
+ .{ .mem = lower.mem(inst.data.rix.payload) },
.{ .reg = inst.data.rix.r1 },
.{ .imm = lower.imm(inst.ops, inst.data.rix.i) },
},
- .rrm_sib, .rrm_rip => &.{
+ .rrm => &.{
.{ .reg = inst.data.rrx.r1 },
.{ .reg = inst.data.rrx.r2 },
- .{ .mem = lower.mem(inst.ops, inst.data.rrx.payload) },
+ .{ .mem = lower.mem(inst.data.rrx.payload) },
},
- .rrmi_sib, .rrmi_rip => &.{
+ .rrmi => &.{
.{ .reg = inst.data.rrix.r1 },
.{ .reg = inst.data.rrix.r2 },
- .{ .mem = lower.mem(inst.ops, inst.data.rrix.payload) },
+ .{ .mem = lower.mem(inst.data.rrix.payload) },
.{ .imm = lower.imm(inst.ops, inst.data.rrix.i) },
},
- .rax_moffs => &.{
- .{ .reg = .rax },
- .{ .mem = lower.mem(inst.ops, inst.data.x.payload) },
- },
- .moffs_rax => &.{
- .{ .mem = lower.mem(inst.ops, inst.data.x.payload) },
- .{ .reg = .rax },
- },
.extern_fn_reloc => &.{
.{ .imm = lower.reloc(.{ .linker_extern_fn = inst.data.reloc }) },
},
- .linker_reloc => unreachable,
.got_reloc, .direct_reloc, .import_reloc, .tlv_reloc => ops: {
const reg = inst.data.rx.r1;
- const extra = lower.mir.extraData(Mir.Reloc, inst.data.rx.payload).data;
+ const extra = lower.mir.extraData(bits.Symbol, inst.data.rx.payload).data;
_ = lower.reloc(switch (inst.ops) {
.got_reloc => .{ .linker_got = extra },
.direct_reloc => .{ .linker_direct = extra },
@@ -635,7 +544,7 @@ const ErrorMsg = Module.ErrorMsg;
const Immediate = bits.Immediate;
const Instruction = encoder.Instruction;
const Lower = @This();
-const Memory = bits.Memory;
+const Memory = Instruction.Memory;
const Mir = @import("Mir.zig");
const Mnemonic = Instruction.Mnemonic;
const Module = @import("../../Module.zig");
src/arch/x86_64/Mir.zig
@@ -17,7 +17,6 @@ const encoder = @import("encoder.zig");
const Air = @import("../../Air.zig");
const CodeGen = @import("CodeGen.zig");
const IntegerBitSet = std.bit_set.IntegerBitSet;
-const Memory = bits.Memory;
const Register = bits.Register;
instructions: std.MultiArrayList(Inst).Slice,
@@ -767,84 +766,42 @@ pub const Inst = struct {
/// Relative displacement operand.
/// Uses `imm` payload.
rel,
- /// Register, memory (SIB) operands.
+ /// Register, memory operands.
/// Uses `rx` payload.
- rm_sib,
- /// Register, memory (RIP) operands.
- /// Uses `rx` payload.
- rm_rip,
- /// Register, memory (SIB), immediate (word) operands.
- /// Uses `rix` payload with extra data of type `MemorySib`.
- rmi_sib,
- /// Register, memory (RIP), immediate (word) operands.
- /// Uses `rix` payload with extra data of type `MemoryRip`.
- rmi_rip,
- /// Register, memory (SIB), immediate (signed) operands.
- /// Uses `rx` payload with extra data of type `Imm32` followed by `MemorySib`.
- rmi_sib_s,
- /// Register, memory (SIB), immediate (unsigned) operands.
- /// Uses `rx` payload with extra data of type `Imm32` followed by `MemorySib`.
- rmi_sib_u,
- /// Register, memory (RIP), immediate (signed) operands.
- /// Uses `rx` payload with extra data of type `Imm32` followed by `MemoryRip`.
- rmi_rip_s,
- /// Register, memory (RIP), immediate (unsigned) operands.
- /// Uses `rx` payload with extra data of type `Imm32` followed by `MemoryRip`.
- rmi_rip_u,
- /// Register, register, memory (RIP).
- /// Uses `rrix` payload with extra data of type `MemoryRip`.
- rrm_rip,
- /// Register, register, memory (SIB).
- /// Uses `rrix` payload with extra data of type `MemorySib`.
- rrm_sib,
- /// Register, register, memory (RIP), immediate (byte) operands.
- /// Uses `rrix` payload with extra data of type `MemoryRip`.
- rrmi_rip,
- /// Register, register, memory (SIB), immediate (byte) operands.
- /// Uses `rrix` payload with extra data of type `MemorySib`.
- rrmi_sib,
- /// Single memory (SIB) operand.
- /// Uses `x` with extra data of type `MemorySib`.
- m_sib,
- /// Single memory (RIP) operand.
- /// Uses `x` with extra data of type `MemoryRip`.
- m_rip,
- /// Memory (SIB), immediate (sign-extend) operands.
- /// Uses `x` payload with extra data of type `Imm32` followed by `MemorySib`.
- mi_sib_s,
- /// Memory (SIB), immediate (unsigned) operands.
- /// Uses `x` payload with extra data of type `Imm32` followed by `MemorySib`.
- mi_sib_u,
- /// Memory (RIP), immediate (sign-extend) operands.
- /// Uses `x` payload with extra data of type `Imm32` followed by `MemoryRip`.
- mi_rip_s,
- /// Memory (RIP), immediate (unsigned) operands.
- /// Uses `x` payload with extra data of type `Imm32` followed by `MemoryRip`.
- mi_rip_u,
- /// Memory (SIB), register operands.
- /// Uses `rx` payload with extra data of type `MemorySib`.
- mr_sib,
- /// Memory (RIP), register operands.
- /// Uses `rx` payload with extra data of type `MemoryRip`.
- mr_rip,
- /// Memory (SIB), register, register operands.
- /// Uses `rrx` payload with extra data of type `MemorySib`.
- mrr_sib,
- /// Memory (RIP), register, register operands.
- /// Uses `rrx` payload with extra data of type `MemoryRip`.
- mrr_rip,
- /// Memory (SIB), register, immediate (word) operands.
- /// Uses `rix` payload with extra data of type `MemorySib`.
- mri_sib,
- /// Memory (RIP), register, immediate (word) operands.
- /// Uses `rix` payload with extra data of type `MemoryRip`.
- mri_rip,
- /// Rax, Memory moffs.
- /// Uses `x` with extra data of type `MemoryMoffs`.
- rax_moffs,
- /// Memory moffs, rax.
- /// Uses `x` with extra data of type `MemoryMoffs`.
- moffs_rax,
+ rm,
+ /// Register, memory, immediate (word) operands.
+ /// Uses `rix` payload with extra data of type `Memory`.
+ rmi,
+ /// Register, memory, immediate (signed) operands.
+ /// Uses `rx` payload with extra data of type `Imm32` followed by `Memory`.
+ rmi_s,
+ /// Register, memory, immediate (unsigned) operands.
+ /// Uses `rx` payload with extra data of type `Imm32` followed by `Memory`.
+ rmi_u,
+ /// Register, register, memory.
+ /// Uses `rrix` payload with extra data of type `Memory`.
+ rrm,
+ /// Register, register, memory, immediate (byte) operands.
+ /// Uses `rrix` payload with extra data of type `Memory`.
+ rrmi,
+ /// Single memory operand.
+ /// Uses `x` with extra data of type `Memory`.
+ m,
+ /// Memory, immediate (sign-extend) operands.
+ /// Uses `x` payload with extra data of type `Imm32` followed by `Memory`.
+ mi_s,
+ /// Memory, immediate (unsigned) operands.
+ /// Uses `x` payload with extra data of type `Imm32` followed by `Memory`.
+ mi_u,
+ /// Memory, register operands.
+ /// Uses `rx` payload with extra data of type `Memory`.
+ mr,
+ /// Memory, register, register operands.
+ /// Uses `rrx` payload with extra data of type `Memory`.
+ mrr,
+ /// Memory, register, immediate (word) operands.
+ /// Uses `rix` payload with extra data of type `Memory`.
+ mri,
/// References another Mir instruction directly.
/// Uses `inst` payload.
inst,
@@ -852,20 +809,17 @@ pub const Inst = struct {
/// Uses `reloc` payload.
extern_fn_reloc,
/// Linker relocation - GOT indirection.
- /// Uses `rx` payload with extra data of type `Reloc`.
+ /// Uses `rx` payload with extra data of type `bits.Symbol`.
got_reloc,
/// Linker relocation - direct reference.
- /// Uses `rx` payload with extra data of type `Reloc`.
+ /// Uses `rx` payload with extra data of type `bits.Symbol`.
direct_reloc,
/// Linker relocation - imports table indirection (binding).
- /// Uses `rx` payload with extra data of type `Reloc`.
+ /// Uses `rx` payload with extra data of type `bits.Symbol`.
import_reloc,
/// Linker relocation - threadlocal variable via GOT indirection.
- /// Uses `rx` payload with extra data of type `Reloc`.
+ /// Uses `rx` payload with extra data of type `bits.Symbol`.
tlv_reloc,
- /// Linker relocation.
- /// Uses `rx` payload with extra data of type `Reloc`.
- linker_reloc,
// Pseudo instructions:
@@ -878,10 +832,7 @@ pub const Inst = struct {
pseudo_cmov_nz_or_p_rr,
/// Conditional move if zero flag not set or parity flag set
/// Uses `rx` payload.
- pseudo_cmov_nz_or_p_rm_sib,
- /// Conditional move if zero flag not set or parity flag set
- /// Uses `rx` payload.
- pseudo_cmov_nz_or_p_rm_rip,
+ pseudo_cmov_nz_or_p_rm,
/// Set byte if zero flag set and parity flag not set
/// Requires a scratch register!
/// Uses `rr` payload.
@@ -889,11 +840,7 @@ pub const Inst = struct {
/// Set byte if zero flag set and parity flag not set
/// Requires a scratch register!
/// Uses `rx` payload.
- pseudo_set_z_and_np_m_sib,
- /// Set byte if zero flag set and parity flag not set
- /// Requires a scratch register!
- /// Uses `rx` payload.
- pseudo_set_z_and_np_m_rip,
+ pseudo_set_z_and_np_m,
/// Set byte if zero flag not set or parity flag set
/// Requires a scratch register!
/// Uses `rr` payload.
@@ -901,11 +848,7 @@ pub const Inst = struct {
/// Set byte if zero flag not set or parity flag set
/// Requires a scratch register!
/// Uses `rx` payload.
- pseudo_set_nz_or_p_m_sib,
- /// Set byte if zero flag not set or parity flag set
- /// Requires a scratch register!
- /// Uses `rx` payload.
- pseudo_set_nz_or_p_m_rip,
+ pseudo_set_nz_or_p_m,
/// Jump if zero flag set and parity flag not set
/// Uses `inst` payload.
pseudo_j_z_and_np_inst,
@@ -1036,7 +979,7 @@ pub const Inst = struct {
/// Relocation for the linker where:
/// * `atom_index` is the index of the source
/// * `sym_index` is the index of the target
- reloc: Reloc,
+ reloc: bits.Symbol,
/// Debug line and column position
line_column: struct {
line: u32,
@@ -1055,14 +998,6 @@ pub const Inst = struct {
}
};
-/// A linker symbol not yet allocated in VM.
-pub const Reloc = struct {
- /// Index of the containing atom.
- atom_index: u32,
- /// Index into the linker's symbol table.
- sym_index: u32,
-};
-
/// Used in conjunction with payload to transfer a list of used registers in a compact manner.
pub const RegisterList = struct {
bitset: BitSet = BitSet.initEmpty(),
@@ -1123,100 +1058,94 @@ pub const Imm64 = struct {
}
};
-// TODO this can be further compacted using packed struct
-pub const MemorySib = struct {
- /// Size of the pointer.
- ptr_size: u32,
- /// Base register tag of type Memory.Base.Tag
- base_tag: u32,
- /// Base register of type Register or FrameIndex
+pub const Memory = struct {
+ info: Info,
base: u32,
- /// Scale starting at bit 0 and index register starting at bit 4.
- scale_index: u32,
- /// Displacement value.
- disp: i32,
+ off: u32,
+ extra: u32,
+
+ pub const Info = packed struct(u32) {
+ base: @typeInfo(bits.Memory.Base).Union.tag_type.?,
+ mod: @typeInfo(bits.Memory.Mod).Union.tag_type.?,
+ size: bits.Memory.Size,
+ index: Register,
+ scale: bits.Memory.Scale,
+ _: u16 = undefined,
+ };
- pub fn encode(mem: Memory) MemorySib {
- const sib = mem.sib;
- assert(sib.scale_index.scale == 0 or std.math.isPowerOfTwo(sib.scale_index.scale));
+ pub fn encode(mem: bits.Memory) Memory {
+ assert(mem.base != .reloc or mem.mod != .off);
return .{
- .ptr_size = @intFromEnum(sib.ptr_size),
- .base_tag = @intFromEnum(sib.base),
- .base = switch (sib.base) {
+ .info = .{
+ .base = mem.base,
+ .mod = mem.mod,
+ .size = switch (mem.mod) {
+ .rm => |rm| rm.size,
+ .off => undefined,
+ },
+ .index = switch (mem.mod) {
+ .rm => |rm| rm.index,
+ .off => undefined,
+ },
+ .scale = switch (mem.mod) {
+ .rm => |rm| rm.scale,
+ .off => undefined,
+ },
+ },
+ .base = switch (mem.base) {
.none => undefined,
- .reg => |r| @intFromEnum(r),
- .frame => |fi| @intFromEnum(fi),
+ .reg => |reg| @intFromEnum(reg),
+ .frame => |frame_index| @intFromEnum(frame_index),
+ .reloc => |symbol| symbol.sym_index,
+ },
+ .off = switch (mem.mod) {
+ .rm => |rm| @bitCast(rm.disp),
+ .off => |off| @truncate(off),
},
- .scale_index = @as(u32, sib.scale_index.scale) << 0 |
- @as(u32, if (sib.scale_index.scale > 0)
- @intFromEnum(sib.scale_index.index)
+ .extra = if (mem.base == .reloc)
+ mem.base.reloc.atom_index
+ else if (mem.mod == .off)
+ @intCast(mem.mod.off >> 32)
else
- undefined) << 4,
- .disp = sib.disp,
+ undefined,
};
}
- pub fn decode(msib: MemorySib) Memory {
- const scale: u4 = @truncate(msib.scale_index);
- assert(scale == 0 or std.math.isPowerOfTwo(scale));
- return .{ .sib = .{
- .ptr_size = @enumFromInt(msib.ptr_size),
- .base = switch (@as(Memory.Base.Tag, @enumFromInt(msib.base_tag))) {
- .none => .none,
- .reg => .{ .reg = @enumFromInt(msib.base) },
- .frame => .{ .frame = @enumFromInt(msib.base) },
+ pub fn decode(mem: Memory) encoder.Instruction.Memory {
+ switch (mem.info.mod) {
+ .rm => {
+ if (mem.info.base == .reg and @as(Register, @enumFromInt(mem.base)) == .rip) {
+ assert(mem.info.index == .none and mem.info.scale == .@"1");
+ return encoder.Instruction.Memory.rip(mem.info.size, @bitCast(mem.off));
+ }
+ return encoder.Instruction.Memory.sib(mem.info.size, .{
+ .disp = @bitCast(mem.off),
+ .base = switch (mem.info.base) {
+ .none => .none,
+ .reg => .{ .reg = @enumFromInt(mem.base) },
+ .frame => .{ .frame = @enumFromInt(mem.base) },
+ .reloc => .{ .reloc = .{ .atom_index = mem.extra, .sym_index = mem.base } },
+ },
+ .scale_index = switch (mem.info.index) {
+ .none => null,
+ else => |index| .{ .scale = switch (mem.info.scale) {
+ inline else => |scale| comptime std.fmt.parseInt(
+ u4,
+ @tagName(scale),
+ 10,
+ ) catch unreachable,
+ }, .index = index },
+ },
+ });
},
- .scale_index = .{
- .scale = scale,
- .index = if (scale > 0) @enumFromInt(msib.scale_index >> 4) else undefined,
+ .off => {
+ assert(mem.info.base == .reg);
+ return encoder.Instruction.Memory.moffs(
+ @enumFromInt(mem.base),
+ @as(u64, mem.extra) << 32 | mem.off,
+ );
},
- .disp = msib.disp,
- } };
- }
-};
-
-pub const MemoryRip = struct {
- /// Size of the pointer.
- ptr_size: u32,
- /// Displacement value.
- disp: i32,
-
- pub fn encode(mem: Memory) MemoryRip {
- return .{
- .ptr_size = @intFromEnum(mem.rip.ptr_size),
- .disp = mem.rip.disp,
- };
- }
-
- pub fn decode(mrip: MemoryRip) Memory {
- return .{ .rip = .{
- .ptr_size = @enumFromInt(mrip.ptr_size),
- .disp = mrip.disp,
- } };
- }
-};
-
-pub const MemoryMoffs = struct {
- /// Segment register.
- seg: u32,
- /// Absolute offset wrt to the segment register split between MSB and LSB parts much like
- /// `Imm64` payload.
- msb: u32,
- lsb: u32,
-
- pub fn encode(seg: Register, offset: u64) MemoryMoffs {
- return .{
- .seg = @intFromEnum(seg),
- .msb = @truncate(offset >> 32),
- .lsb = @truncate(offset >> 0),
- };
- }
-
- pub fn decode(moffs: MemoryMoffs) Memory {
- return .{ .moffs = .{
- .seg = @enumFromInt(moffs.seg),
- .offset = @as(u64, moffs.msb) << 32 | @as(u64, moffs.lsb) << 0,
- } };
+ }
}
};
@@ -1234,8 +1163,8 @@ pub fn extraData(mir: Mir, comptime T: type, index: u32) struct { data: T, end:
inline for (fields) |field| {
@field(result, field.name) = switch (field.type) {
u32 => mir.extra[i],
- i32 => @bitCast(mir.extra[i]),
- else => @compileError("bad field type"),
+ i32, Memory.Info => @bitCast(mir.extra[i]),
+ else => @compileError("bad field type: " ++ field.name ++ ": " ++ @typeName(field.type)),
};
i += 1;
}
@@ -1251,15 +1180,19 @@ pub const FrameLoc = struct {
};
pub fn resolveFrameLoc(mir: Mir, mem: Memory) Memory {
- return switch (mem) {
- .sib => |sib| switch (sib.base) {
- .none, .reg => mem,
- .frame => |index| if (mir.frame_locs.len > 0) Memory.sib(sib.ptr_size, .{
- .base = .{ .reg = mir.frame_locs.items(.base)[@intFromEnum(index)] },
- .disp = mir.frame_locs.items(.disp)[@intFromEnum(index)] + sib.disp,
- .scale_index = mem.scaleIndex(),
- }) else mem,
- },
- .rip, .moffs => mem,
+ return switch (mem.info.base) {
+ .none, .reg, .reloc => mem,
+ .frame => if (mir.frame_locs.len > 0) Memory{
+ .info = .{
+ .base = .reg,
+ .mod = mem.info.mod,
+ .size = mem.info.size,
+ .index = mem.info.index,
+ .scale = mem.info.scale,
+ },
+ .base = @intFromEnum(mir.frame_locs.items(.base)[mem.base]),
+ .off = @bitCast(mir.frame_locs.items(.disp)[mem.base] + @as(i32, @bitCast(mem.off))),
+ .extra = mem.extra,
+ } else mem,
};
}