Commit f289b82d0e
Changed files (17)
lib
std
lib/std/os/linux/x86_64.zig
@@ -114,14 +114,15 @@ pub fn clone() callconv(.Naked) usize {
\\ movq %%rcx,(%%rsi)
\\ syscall
\\ testq %%rax,%%rax
- \\ jnz 1f
+ \\ jz 1f
+ \\ retq
+ \\1: .cfi_undefined %%rip
\\ xorl %%ebp,%%ebp
\\ popq %%rdi
\\ callq *%%r9
\\ movl %%eax,%%edi
\\ movl $60,%%eax // SYS_exit
\\ syscall
- \\1: ret
\\
);
}
lib/std/leb128.zig
@@ -125,7 +125,7 @@ pub const readILEB128 = readIleb128;
pub fn writeIleb128(writer: anytype, arg: anytype) !void {
const Arg = @TypeOf(arg);
const Int = switch (Arg) {
- comptime_int => std.math.IntFittingRange(-arg - 1, arg),
+ comptime_int => std.math.IntFittingRange(-@abs(arg), @abs(arg)),
else => Arg,
};
const Signed = if (@typeInfo(Int).Int.bits < 8) i8 else Int;
lib/std/start.zig
@@ -249,6 +249,7 @@ fn _start() callconv(.Naked) noreturn {
// linker explicitly.
asm volatile (switch (native_arch) {
.x86_64 =>
+ \\ .cfi_undefined %%rip
\\ xorl %%ebp, %%ebp
\\ movq %%rsp, %%rdi
\\ andq $-16, %%rsp
src/arch/x86_64/bits.zig
@@ -371,7 +371,7 @@ pub const Register = enum(u7) {
.x87 => 33 + @as(u6, reg.enc()),
.mmx => 41 + @as(u6, reg.enc()),
.segment => 50 + @as(u6, reg.enc()),
- .ip => unreachable,
+ .ip => 16,
};
}
};
src/arch/x86_64/CodeGen.zig
@@ -1491,6 +1491,46 @@ fn asmPseudo(self: *Self, ops: Mir.Inst.Ops) !void {
});
}
+fn asmPseudoRegister(self: *Self, ops: Mir.Inst.Ops, reg: Register) !void {
+ assert(std.mem.startsWith(u8, @tagName(ops), "pseudo_") and
+ std.mem.endsWith(u8, @tagName(ops), "_r"));
+ _ = try self.addInst(.{
+ .tag = .pseudo,
+ .ops = ops,
+ .data = .{ .r = .{ .r1 = reg } },
+ });
+}
+
+fn asmPseudoImmediate(self: *Self, ops: Mir.Inst.Ops, imm: Immediate) !void {
+ assert(std.mem.startsWith(u8, @tagName(ops), "pseudo_") and
+ std.mem.endsWith(u8, @tagName(ops), "_i_s"));
+ _ = try self.addInst(.{
+ .tag = .pseudo,
+ .ops = ops,
+ .data = .{ .i = .{ .i = @bitCast(imm.signed) } },
+ });
+}
+
+fn asmPseudoRegisterRegister(self: *Self, ops: Mir.Inst.Ops, reg1: Register, reg2: Register) !void {
+ assert(std.mem.startsWith(u8, @tagName(ops), "pseudo_") and
+ std.mem.endsWith(u8, @tagName(ops), "_rr"));
+ _ = try self.addInst(.{
+ .tag = .pseudo,
+ .ops = ops,
+ .data = .{ .rr = .{ .r1 = reg1, .r2 = reg2 } },
+ });
+}
+
+fn asmPseudoRegisterImmediate(self: *Self, ops: Mir.Inst.Ops, reg: Register, imm: Immediate) !void {
+ assert(std.mem.startsWith(u8, @tagName(ops), "pseudo_") and
+ std.mem.endsWith(u8, @tagName(ops), "_ri_s"));
+ _ = try self.addInst(.{
+ .tag = .pseudo,
+ .ops = ops,
+ .data = .{ .ri = .{ .r1 = reg, .i = @bitCast(imm.signed) } },
+ });
+}
+
fn asmRegister(self: *Self, tag: Mir.Inst.FixedTag, reg: Register) !void {
_ = try self.addInst(.{
.tag = tag[1],
@@ -1877,7 +1917,10 @@ fn gen(self: *Self) InnerError!void {
const cc = abi.resolveCallingConvention(fn_info.cc, self.target.*);
if (cc != .Naked) {
try self.asmRegister(.{ ._, .push }, .rbp);
+ try self.asmPseudoImmediate(.pseudo_cfi_adjust_cfa_offset_i_s, Immediate.s(8));
+ try self.asmPseudoRegisterImmediate(.pseudo_cfi_rel_offset_ri_s, .rbp, Immediate.s(0));
try self.asmRegisterRegister(.{ ._, .mov }, .rbp, .rsp);
+ try self.asmPseudoRegister(.pseudo_cfi_def_cfa_register_r, .rbp);
const backpatch_push_callee_preserved_regs = try self.asmPlaceholder();
const backpatch_frame_align = try self.asmPlaceholder();
const backpatch_frame_align_extra = try self.asmPlaceholder();
@@ -1962,6 +2005,7 @@ fn gen(self: *Self) InnerError!void {
const backpatch_stack_dealloc = try self.asmPlaceholder();
const backpatch_pop_callee_preserved_regs = try self.asmPlaceholder();
try self.asmRegister(.{ ._, .pop }, .rbp);
+ try self.asmPseudoRegisterImmediate(.pseudo_cfi_def_cfa_ri_s, .rsp, Immediate.s(8));
try self.asmOpOnly(.{ ._, .ret });
const frame_layout = try self.computeFrameLayout(cc);
@@ -14038,7 +14082,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
var mnem_it = mem.tokenizeAny(u8, line, " \t");
var prefix: Instruction.Prefix = .none;
const mnem_str = while (mnem_it.next()) |mnem_str| {
- if (mem.startsWith(u8, mnem_str, "#")) continue :next_line;
+ if (mnem_str[0] == '#') continue :next_line;
if (mem.startsWith(u8, mnem_str, "//")) continue :next_line;
if (std.meta.stringToEnum(Instruction.Prefix, mnem_str)) |pre| {
if (prefix != .none) return self.fail("extra prefix: '{s}'", .{mnem_str});
@@ -14063,8 +14107,14 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
}
label_gop.value_ptr.target = @intCast(self.mir_instructions.len);
} else continue;
+ if (mnem_str[0] == '.') {
+ if (prefix != .none) return self.fail("prefixed directive: '{s} {s}'", .{ @tagName(prefix), mnem_str });
+ prefix = .directive;
+ }
- var mnem_size: ?Memory.Size = if (mem.endsWith(u8, mnem_str, "b"))
+ var mnem_size: ?Memory.Size = if (prefix == .directive)
+ null
+ else if (mem.endsWith(u8, mnem_str, "b"))
.byte
else if (mem.endsWith(u8, mnem_str, "w"))
.word
@@ -14095,7 +14145,9 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
mnem_size = fixed_mnem_size;
}
const mnem_name = @tagName(mnem_tag);
- const mnem_fixed_tag: Mir.Inst.FixedTag = for (std.enums.values(Mir.Inst.Fixes)) |fixes| {
+ const mnem_fixed_tag: Mir.Inst.FixedTag = if (prefix == .directive)
+ .{ ._, .pseudo }
+ else for (std.enums.values(Mir.Inst.Fixes)) |fixes| {
const fixes_name = @tagName(fixes);
const space_i = mem.indexOfScalar(u8, fixes_name, ' ');
const fixes_prefix = if (space_i) |i|
@@ -14116,7 +14168,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
} else {
assert(prefix != .none); // no combination of fixes produced a known mnemonic
return self.fail("invalid prefix for mnemonic: '{s} {s}'", .{
- @tagName(prefix), mnem_str,
+ @tagName(prefix), mnem_name,
});
};
@@ -14324,7 +14376,62 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
} else return self.fail("invalid operand: '{s}'", .{op_str});
} else if (op_it.next()) |op_str| return self.fail("extra operand: '{s}'", .{op_str});
- (switch (ops[0]) {
+ (if (prefix == .directive) switch (mnem_tag) {
+ .@".cfi_def_cfa" => if (ops[0] == .reg and ops[1] == .imm and ops[2] == .none)
+ self.asmPseudoRegisterImmediate(.pseudo_cfi_def_cfa_ri_s, ops[0].reg, ops[1].imm)
+ else
+ error.InvalidInstruction,
+ .@".cfi_def_cfa_register" => if (ops[0] == .reg and ops[1] == .none)
+ self.asmPseudoRegister(.pseudo_cfi_def_cfa_register_r, ops[0].reg)
+ else
+ error.InvalidInstruction,
+ .@".cfi_def_cfa_offset" => if (ops[0] == .imm and ops[1] == .none)
+ self.asmPseudoImmediate(.pseudo_cfi_def_cfa_offset_i_s, ops[0].imm)
+ else
+ error.InvalidInstruction,
+ .@".cfi_adjust_cfa_offset" => if (ops[0] == .imm and ops[1] == .none)
+ self.asmPseudoImmediate(.pseudo_cfi_adjust_cfa_offset_i_s, ops[0].imm)
+ else
+ error.InvalidInstruction,
+ .@".cfi_offset" => if (ops[0] == .reg and ops[1] == .imm and ops[2] == .none)
+ self.asmPseudoRegisterImmediate(.pseudo_cfi_offset_ri_s, ops[0].reg, ops[1].imm)
+ else
+ error.InvalidInstruction,
+ .@".cfi_val_offset" => if (ops[0] == .reg and ops[1] == .imm and ops[2] == .none)
+ self.asmPseudoRegisterImmediate(.pseudo_cfi_val_offset_ri_s, ops[0].reg, ops[1].imm)
+ else
+ error.InvalidInstruction,
+ .@".cfi_rel_offset" => if (ops[0] == .reg and ops[1] == .imm and ops[2] == .none)
+ self.asmPseudoRegisterImmediate(.pseudo_cfi_rel_offset_ri_s, ops[0].reg, ops[1].imm)
+ else
+ error.InvalidInstruction,
+ .@".cfi_register" => if (ops[0] == .reg and ops[1] == .reg and ops[2] == .none)
+ self.asmPseudoRegisterRegister(.pseudo_cfi_register_rr, ops[0].reg, ops[1].reg)
+ else
+ error.InvalidInstruction,
+ .@".cfi_restore" => if (ops[0] == .reg and ops[1] == .none)
+ self.asmPseudoRegister(.pseudo_cfi_restore_r, ops[0].reg)
+ else
+ error.InvalidInstruction,
+ .@".cfi_undefined" => if (ops[0] == .reg and ops[1] == .none)
+ self.asmPseudoRegister(.pseudo_cfi_undefined_r, ops[0].reg)
+ else
+ error.InvalidInstruction,
+ .@".cfi_same_value" => if (ops[0] == .reg and ops[1] == .none)
+ self.asmPseudoRegister(.pseudo_cfi_same_value_r, ops[0].reg)
+ else
+ error.InvalidInstruction,
+ .@".cfi_remember_state" => if (ops[0] == .none)
+ self.asmPseudo(.pseudo_cfi_remember_state_none)
+ else
+ error.InvalidInstruction,
+ .@".cfi_restore_state" => if (ops[0] == .none)
+ self.asmPseudo(.pseudo_cfi_restore_state_none)
+ else
+ error.InvalidInstruction,
+ .@".cfi_escape" => error.InvalidInstruction,
+ else => unreachable,
+ } else switch (ops[0]) {
.none => self.asmOpOnly(mnem_fixed_tag),
.reg => |reg0| switch (ops[1]) {
.none => self.asmRegister(mnem_fixed_tag, reg0),
@@ -19210,14 +19317,6 @@ fn fail(self: *Self, comptime format: []const u8, args: anytype) InnerError {
return error.CodegenFail;
}
-fn failSymbol(self: *Self, comptime format: []const u8, args: anytype) InnerError {
- @branchHint(.cold);
- assert(self.err_msg == null);
- const gpa = self.gpa;
- self.err_msg = try ErrorMsg.create(gpa, self.src_loc, format, args);
- return error.CodegenFail;
-}
-
fn parseRegName(name: []const u8) ?Register {
if (@hasDecl(Register, "parseRegName")) {
return Register.parseRegName(name);
src/arch/x86_64/Emit.zig
@@ -30,6 +30,59 @@ pub fn emitMir(emit: *Emit) Error!void {
var lowered_relocs = lowered.relocs;
for (lowered.insts, 0..) |lowered_inst, lowered_index| {
const start_offset: u32 = @intCast(emit.code.items.len);
+ if (lowered_inst.prefix == .directive) {
+ switch (emit.debug_output) {
+ .dwarf => |dwarf| switch (lowered_inst.encoding.mnemonic) {
+ .@".cfi_def_cfa" => try dwarf.genDebugFrame(start_offset, .{ .def_cfa = .{
+ .reg = lowered_inst.ops[0].reg.dwarfNum(),
+ .off = lowered_inst.ops[1].imm.signed,
+ } }),
+ .@".cfi_def_cfa_register" => try dwarf.genDebugFrame(start_offset, .{
+ .def_cfa_register = lowered_inst.ops[0].reg.dwarfNum(),
+ }),
+ .@".cfi_def_cfa_offset" => try dwarf.genDebugFrame(start_offset, .{
+ .def_cfa_offset = lowered_inst.ops[0].imm.signed,
+ }),
+ .@".cfi_adjust_cfa_offset" => try dwarf.genDebugFrame(start_offset, .{
+ .adjust_cfa_offset = lowered_inst.ops[0].imm.signed,
+ }),
+ .@".cfi_offset" => try dwarf.genDebugFrame(start_offset, .{ .offset = .{
+ .reg = lowered_inst.ops[0].reg.dwarfNum(),
+ .off = lowered_inst.ops[1].imm.signed,
+ } }),
+ .@".cfi_val_offset" => try dwarf.genDebugFrame(start_offset, .{ .val_offset = .{
+ .reg = lowered_inst.ops[0].reg.dwarfNum(),
+ .off = lowered_inst.ops[1].imm.signed,
+ } }),
+ .@".cfi_rel_offset" => try dwarf.genDebugFrame(start_offset, .{ .rel_offset = .{
+ .reg = lowered_inst.ops[0].reg.dwarfNum(),
+ .off = lowered_inst.ops[1].imm.signed,
+ } }),
+ .@".cfi_register" => try dwarf.genDebugFrame(start_offset, .{ .register = .{
+ lowered_inst.ops[0].reg.dwarfNum(),
+ lowered_inst.ops[1].reg.dwarfNum(),
+ } }),
+ .@".cfi_restore" => try dwarf.genDebugFrame(start_offset, .{
+ .restore = lowered_inst.ops[0].reg.dwarfNum(),
+ }),
+ .@".cfi_undefined" => try dwarf.genDebugFrame(start_offset, .{
+ .undefined = lowered_inst.ops[0].reg.dwarfNum(),
+ }),
+ .@".cfi_same_value" => try dwarf.genDebugFrame(start_offset, .{
+ .same_value = lowered_inst.ops[0].reg.dwarfNum(),
+ }),
+ .@".cfi_remember_state" => try dwarf.genDebugFrame(start_offset, .remember_state),
+ .@".cfi_restore_state" => try dwarf.genDebugFrame(start_offset, .restore_state),
+ .@".cfi_escape" => try dwarf.genDebugFrame(start_offset, .{
+ .escape = lowered_inst.ops[0].bytes,
+ }),
+ else => unreachable,
+ },
+ .plan9 => {},
+ .none => {},
+ }
+ continue;
+ }
try lowered_inst.encode(emit.code.writer(), .{});
const end_offset: u32 = @intCast(emit.code.items.len);
while (lowered_relocs.len > 0 and
src/arch/x86_64/encoder.zig
@@ -25,6 +25,7 @@ pub const Instruction = struct {
repz,
repne,
repnz,
+ directive,
};
pub const Immediate = union(enum) {
@@ -180,6 +181,7 @@ pub const Instruction = struct {
reg: Register,
mem: Memory,
imm: Immediate,
+ bytes: []const u8,
/// Returns the bitsize of the operand.
pub fn bitSize(op: Operand) u64 {
@@ -188,6 +190,7 @@ pub const Instruction = struct {
.reg => |reg| reg.bitSize(),
.mem => |mem| mem.bitSize(),
.imm => unreachable,
+ .bytes => unreachable,
};
}
@@ -199,6 +202,7 @@ pub const Instruction = struct {
.reg => |reg| reg.class() == .segment,
.mem => |mem| mem.isSegmentRegister(),
.imm => unreachable,
+ .bytes => unreachable,
};
}
@@ -207,6 +211,7 @@ pub const Instruction = struct {
.none, .imm => false,
.reg => |reg| reg.isExtended(),
.mem => |mem| mem.base().isExtended(),
+ .bytes => unreachable,
};
}
@@ -214,6 +219,7 @@ pub const Instruction = struct {
return switch (op) {
.none, .reg, .imm => false,
.mem => |mem| if (mem.scaleIndex()) |si| si.index.isExtended() else false,
+ .bytes => unreachable,
};
}
@@ -299,6 +305,7 @@ pub const Instruction = struct {
if (imms < 0) try writer.writeByte('-');
try writer.print("0x{x}", .{@abs(imms)});
} else try writer.print("0x{x}", .{imm.asUnsigned(enc_op.immBitSize())}),
+ .bytes => unreachable,
}
}
@@ -308,20 +315,39 @@ pub const Instruction = struct {
};
pub fn new(prefix: Prefix, mnemonic: Mnemonic, ops: []const Operand) !Instruction {
- const encoding = (try Encoding.findByMnemonic(prefix, mnemonic, ops)) orelse {
- log.err("no encoding found for: {s} {s} {s} {s} {s} {s}", .{
- @tagName(prefix),
- @tagName(mnemonic),
- @tagName(if (ops.len > 0) Encoding.Op.fromOperand(ops[0]) else .none),
- @tagName(if (ops.len > 1) Encoding.Op.fromOperand(ops[1]) else .none),
- @tagName(if (ops.len > 2) Encoding.Op.fromOperand(ops[2]) else .none),
- @tagName(if (ops.len > 3) Encoding.Op.fromOperand(ops[3]) else .none),
- });
- return error.InvalidInstruction;
+ const encoding: Encoding = switch (prefix) {
+ else => (try Encoding.findByMnemonic(prefix, mnemonic, ops)) orelse {
+ log.err("no encoding found for: {s} {s} {s} {s} {s} {s}", .{
+ @tagName(prefix),
+ @tagName(mnemonic),
+ @tagName(if (ops.len > 0) Encoding.Op.fromOperand(ops[0]) else .none),
+ @tagName(if (ops.len > 1) Encoding.Op.fromOperand(ops[1]) else .none),
+ @tagName(if (ops.len > 2) Encoding.Op.fromOperand(ops[2]) else .none),
+ @tagName(if (ops.len > 3) Encoding.Op.fromOperand(ops[3]) else .none),
+ });
+ return error.InvalidInstruction;
+ },
+ .directive => .{
+ .mnemonic = mnemonic,
+ .data = .{
+ .op_en = .zo,
+ .ops = .{
+ if (ops.len > 0) Encoding.Op.fromOperand(ops[0]) else .none,
+ if (ops.len > 1) Encoding.Op.fromOperand(ops[1]) else .none,
+ if (ops.len > 2) Encoding.Op.fromOperand(ops[2]) else .none,
+ if (ops.len > 3) Encoding.Op.fromOperand(ops[3]) else .none,
+ },
+ .opc_len = 0,
+ .opc = undefined,
+ .modrm_ext = 0,
+ .mode = .none,
+ .feature = .none,
+ },
+ },
};
log.debug("selected encoding: {}", .{encoding});
- var inst = Instruction{
+ var inst: Instruction = .{
.prefix = prefix,
.encoding = encoding,
.ops = [1]Operand{.none} ** 4,
@@ -338,7 +364,10 @@ pub const Instruction = struct {
) @TypeOf(writer).Error!void {
_ = unused_format_string;
_ = options;
- if (inst.prefix != .none) try writer.print("{s} ", .{@tagName(inst.prefix)});
+ switch (inst.prefix) {
+ .none, .directive => {},
+ else => try writer.print("{s} ", .{@tagName(inst.prefix)}),
+ }
try writer.print("{s}", .{@tagName(inst.encoding.mnemonic)});
for (inst.ops, inst.encoding.data.ops, 0..) |op, enc, i| {
if (op == .none) break;
@@ -349,6 +378,7 @@ pub const Instruction = struct {
}
pub fn encode(inst: Instruction, writer: anytype, comptime opts: Options) !void {
+ assert(inst.prefix != .directive);
const encoder = Encoder(@TypeOf(writer), opts){ .writer = writer };
const enc = inst.encoding;
const data = enc.data;
@@ -435,6 +465,7 @@ pub const Instruction = struct {
.lock => legacy.prefix_f0 = true,
.repne, .repnz => legacy.prefix_f2 = true,
.rep, .repe, .repz => legacy.prefix_f3 = true,
+ .directive => unreachable,
}
switch (data.mode) {
src/arch/x86_64/Encoding.zig
@@ -220,6 +220,21 @@ pub fn format(
}
pub const Mnemonic = enum {
+ // Directives
+ @".cfi_def_cfa",
+ @".cfi_def_cfa_register",
+ @".cfi_def_cfa_offset",
+ @".cfi_adjust_cfa_offset",
+ @".cfi_offset",
+ @".cfi_val_offset",
+ @".cfi_rel_offset",
+ @".cfi_register",
+ @".cfi_restore",
+ @".cfi_undefined",
+ @".cfi_same_value",
+ @".cfi_remember_state",
+ @".cfi_restore_state",
+ @".cfi_escape",
// zig fmt: off
// General-purpose
adc, add, @"and",
@@ -442,6 +457,7 @@ pub const Op = enum {
imm8s, imm16s, imm32s,
al, ax, eax, rax,
cl,
+ rip, eip, ip,
r8, r16, r32, r64,
rm8, rm16, rm32, rm64,
r32_m8, r32_m16, r64_m16,
@@ -487,7 +503,12 @@ pub const Op = enum {
256 => .ymm,
else => unreachable,
},
- .ip => unreachable,
+ .ip => switch (reg) {
+ .rip => .rip,
+ .eip => .eip,
+ .ip => .ip,
+ else => unreachable,
+ },
},
.mem => |mem| switch (mem) {
@@ -531,13 +552,15 @@ pub const Op = enum {
else
.imm64,
},
+
+ .bytes => unreachable,
};
}
pub fn immBitSize(op: Op) u64 {
return switch (op) {
.none, .o16, .o32, .o64, .moffs, .m, .sreg => unreachable,
- .al, .cl, .r8, .rm8, .r32_m8 => unreachable,
+ .al, .cl, .rip, .eip, .ip, .r8, .rm8, .r32_m8 => unreachable,
.ax, .r16, .rm16 => unreachable,
.eax, .r32, .rm32, .r32_m16 => unreachable,
.rax, .r64, .rm64, .r64_m16 => unreachable,
@@ -560,9 +583,9 @@ pub const Op = enum {
.rel8, .rel16, .rel32 => unreachable,
.m8, .m16, .m32, .m64, .m80, .m128, .m256 => unreachable,
.al, .cl, .r8, .rm8 => 8,
- .ax, .r16, .rm16 => 16,
- .eax, .r32, .rm32, .r32_m8, .r32_m16 => 32,
- .rax, .r64, .rm64, .r64_m16, .mm, .mm_m64 => 64,
+ .ax, .ip, .r16, .rm16 => 16,
+ .eax, .eip, .r32, .rm32, .r32_m8, .r32_m16 => 32,
+ .rax, .rip, .r64, .rm64, .r64_m16, .mm, .mm_m64 => 64,
.st => 80,
.xmm0, .xmm, .xmm_m8, .xmm_m16, .xmm_m32, .xmm_m64, .xmm_m128 => 128,
.ymm, .ymm_m256 => 256,
@@ -574,7 +597,7 @@ pub const Op = enum {
.none, .o16, .o32, .o64, .moffs, .m, .sreg => unreachable,
.unity, .imm8, .imm8s, .imm16, .imm16s, .imm32, .imm32s, .imm64 => unreachable,
.rel8, .rel16, .rel32 => unreachable,
- .al, .cl, .r8, .ax, .r16, .eax, .r32, .rax, .r64 => unreachable,
+ .al, .cl, .r8, .ax, .ip, .r16, .eax, .eip, .r32, .rax, .rip, .r64 => unreachable,
.st, .mm, .xmm0, .xmm, .ymm => unreachable,
.m8, .rm8, .r32_m8, .xmm_m8 => 8,
.m16, .rm16, .r32_m16, .r64_m16, .xmm_m16 => 16,
@@ -602,8 +625,9 @@ pub const Op = enum {
pub fn isRegister(op: Op) bool {
// zig fmt: off
return switch (op) {
- .cl,
.al, .ax, .eax, .rax,
+ .cl,
+ .ip, .eip, .rip,
.r8, .r16, .r32, .r64,
.rm8, .rm16, .rm32, .rm64,
.r32_m8, .r32_m16, .r64_m16,
@@ -664,6 +688,7 @@ pub const Op = enum {
.mm, .mm_m64 => .mmx,
.xmm0, .xmm, .xmm_m8, .xmm_m16, .xmm_m32, .xmm_m64, .xmm_m128 => .sse,
.ymm, .ymm_m256 => .sse,
+ .rip, .eip, .ip => .ip,
};
}
src/arch/x86_64/Lower.zig
@@ -12,7 +12,7 @@ src_loc: Zcu.LazySrcLoc,
result_insts_len: u8 = undefined,
result_relocs_len: u8 = undefined,
result_insts: [
- std.mem.max(usize, &.{
+ @max(
1, // non-pseudo instructions
3, // (ELF only) TLS local dynamic (LD) sequence in PIC mode
2, // cmovcc: cmovcc \ cmovcc
@@ -22,18 +22,18 @@ result_insts: [
pseudo_probe_adjust_unrolled_max_insts,
pseudo_probe_adjust_setup_insts,
pseudo_probe_adjust_loop_insts,
- abi.Win64.callee_preserved_regs.len, // push_regs/pop_regs
- abi.SysV.callee_preserved_regs.len, // push_regs/pop_regs
- })
+ abi.Win64.callee_preserved_regs.len * 2, // push_regs/pop_regs
+ abi.SysV.callee_preserved_regs.len * 2, // push_regs/pop_regs
+ )
]Instruction = undefined,
result_relocs: [
- std.mem.max(usize, &.{
+ @max(
1, // jmp/jcc/call/mov/lea: jmp/jcc/call/mov/lea
2, // jcc: jcc \ jcc
2, // test \ jcc \ probe \ sub \ jmp
1, // probe \ sub \ jcc
3, // (ELF only) TLS local dynamic (LD) sequence in PIC mode
- })
+ )
]Reloc = undefined,
pub const pseudo_probe_align_insts = 5; // test \ jcc \ probe \ sub \ jmp
@@ -265,6 +265,50 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct {
.pseudo_push_reg_list => try lower.pushPopRegList(.push, inst),
.pseudo_pop_reg_list => try lower.pushPopRegList(.pop, inst),
+ .pseudo_cfi_def_cfa_ri_s => try lower.emit(.directive, .@".cfi_def_cfa", &.{
+ .{ .reg = inst.data.ri.r1 },
+ .{ .imm = lower.imm(.ri_s, inst.data.ri.i) },
+ }),
+ .pseudo_cfi_def_cfa_register_r => try lower.emit(.directive, .@".cfi_def_cfa_register", &.{
+ .{ .reg = inst.data.r.r1 },
+ }),
+ .pseudo_cfi_def_cfa_offset_i_s => try lower.emit(.directive, .@".cfi_def_cfa_offset", &.{
+ .{ .imm = lower.imm(.i_s, inst.data.i.i) },
+ }),
+ .pseudo_cfi_adjust_cfa_offset_i_s => try lower.emit(.directive, .@".cfi_adjust_cfa_offset", &.{
+ .{ .imm = lower.imm(.i_s, inst.data.i.i) },
+ }),
+ .pseudo_cfi_offset_ri_s => try lower.emit(.directive, .@".cfi_offset", &.{
+ .{ .reg = inst.data.ri.r1 },
+ .{ .imm = lower.imm(.ri_s, inst.data.ri.i) },
+ }),
+ .pseudo_cfi_val_offset_ri_s => try lower.emit(.directive, .@".cfi_val_offset", &.{
+ .{ .reg = inst.data.ri.r1 },
+ .{ .imm = lower.imm(.ri_s, inst.data.ri.i) },
+ }),
+ .pseudo_cfi_rel_offset_ri_s => try lower.emit(.directive, .@".cfi_rel_offset", &.{
+ .{ .reg = inst.data.ri.r1 },
+ .{ .imm = lower.imm(.ri_s, inst.data.ri.i) },
+ }),
+ .pseudo_cfi_register_rr => try lower.emit(.directive, .@".cfi_register", &.{
+ .{ .reg = inst.data.rr.r1 },
+ .{ .reg = inst.data.rr.r2 },
+ }),
+ .pseudo_cfi_restore_r => try lower.emit(.directive, .@".cfi_restore", &.{
+ .{ .reg = inst.data.r.r1 },
+ }),
+ .pseudo_cfi_undefined_r => try lower.emit(.directive, .@".cfi_undefined", &.{
+ .{ .reg = inst.data.r.r1 },
+ }),
+ .pseudo_cfi_same_value_r => try lower.emit(.directive, .@".cfi_same_value", &.{
+ .{ .reg = inst.data.r.r1 },
+ }),
+ .pseudo_cfi_remember_state_none => try lower.emit(.directive, .@".cfi_remember_state", &.{}),
+ .pseudo_cfi_restore_state_none => try lower.emit(.directive, .@".cfi_restore_state", &.{}),
+ .pseudo_cfi_escape_bytes => try lower.emit(.directive, .@".cfi_escape", &.{
+ .{ .bytes = inst.data.bytes.get(lower.mir) },
+ }),
+
.pseudo_dbg_prologue_end_none,
.pseudo_dbg_line_line_column,
.pseudo_dbg_epilogue_begin_none,
@@ -280,6 +324,7 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct {
.pseudo_dbg_local_af,
.pseudo_dbg_local_am,
.pseudo_dbg_var_args_none,
+
.pseudo_dead_none,
=> {},
else => unreachable,
@@ -665,12 +710,43 @@ fn generic(lower: *Lower, inst: Mir.Inst) Error!void {
fn pushPopRegList(lower: *Lower, comptime mnemonic: Mnemonic, inst: Mir.Inst) Error!void {
const callee_preserved_regs = abi.getCalleePreservedRegs(lower.cc);
- var it = inst.data.reg_list.iterator(.{ .direction = switch (mnemonic) {
- .push => .reverse,
- .pop => .forward,
+ var off: i32 = switch (mnemonic) {
+ .push => 0,
+ .pop => undefined,
else => unreachable,
- } });
- while (it.next()) |i| try lower.emit(.none, mnemonic, &.{.{ .reg = callee_preserved_regs[i] }});
+ };
+ {
+ var it = inst.data.reg_list.iterator(.{ .direction = switch (mnemonic) {
+ .push => .reverse,
+ .pop => .forward,
+ else => unreachable,
+ } });
+ while (it.next()) |i| {
+ try lower.emit(.none, mnemonic, &.{.{
+ .reg = callee_preserved_regs[i],
+ }});
+ switch (mnemonic) {
+ .push => off -= 8,
+ .pop => {},
+ else => unreachable,
+ }
+ }
+ }
+ switch (mnemonic) {
+ .push => {
+ var it = inst.data.reg_list.iterator(.{});
+ while (it.next()) |i| {
+ try lower.emit(.directive, .@".cfi_rel_offset", &.{
+ .{ .reg = callee_preserved_regs[i] },
+ .{ .imm = Immediate.s(off) },
+ });
+ off += 8;
+ }
+ assert(off == 0);
+ },
+ .pop => {},
+ else => unreachable,
+ }
}
const page_size: i32 = 1 << 12;
src/arch/x86_64/Mir.zig
@@ -879,6 +879,7 @@ pub const Inst = struct {
/// Probe adjust loop
/// Uses `rr` payload.
pseudo_probe_adjust_loop_rr,
+
/// Push registers
/// Uses `reg_list` payload.
pseudo_push_reg_list,
@@ -886,6 +887,47 @@ pub const Inst = struct {
/// Uses `reg_list` payload.
pseudo_pop_reg_list,
+ /// Define cfa rule as offset from register.
+ /// Uses `ri` payload.
+ pseudo_cfi_def_cfa_ri_s,
+ /// Modify cfa rule register.
+ /// Uses `r` payload.
+ pseudo_cfi_def_cfa_register_r,
+ /// Modify cfa rule offset.
+ /// Uses `i` payload.
+ pseudo_cfi_def_cfa_offset_i_s,
+ /// Offset cfa rule offset.
+ /// Uses `i` payload.
+ pseudo_cfi_adjust_cfa_offset_i_s,
+ /// Define register rule as stored at offset from cfa.
+ /// Uses `ri` payload.
+ pseudo_cfi_offset_ri_s,
+ /// Define register rule as offset from cfa.
+ /// Uses `ri` payload.
+ pseudo_cfi_val_offset_ri_s,
+ /// Define register rule as stored at offset from cfa rule register.
+ /// Uses `ri` payload.
+ pseudo_cfi_rel_offset_ri_s,
+ /// Define register rule as register.
+ /// Uses `rr` payload.
+ pseudo_cfi_register_rr,
+ /// Define register rule from initial.
+ /// Uses `r` payload.
+ pseudo_cfi_restore_r,
+ /// Define register rule as undefined.
+ /// Uses `r` payload.
+ pseudo_cfi_undefined_r,
+ /// Define register rule as itself.
+ /// Uses `r` payload.
+ pseudo_cfi_same_value_r,
+ /// Push cfi state.
+ pseudo_cfi_remember_state_none,
+ /// Pop cfi state.
+ pseudo_cfi_restore_state_none,
+ /// Raw cfi bytes.
+ /// Uses `bytes` payload.
+ pseudo_cfi_escape_bytes,
+
/// End of prologue
pseudo_dbg_prologue_end_none,
/// Update debug line
@@ -1028,8 +1070,13 @@ pub const Inst = struct {
fixes: Fixes = ._,
payload: u32,
},
- ix: struct {
+ bytes: struct {
payload: u32,
+ len: u32,
+
+ pub fn get(bytes: @This(), mir: Mir) []const u8 {
+ return std.mem.sliceAsBytes(mir.extra[bytes.payload..])[0..bytes.len];
+ }
},
a: struct {
air_inst: Air.Inst.Index,
src/link/Elf/relocatable.zig
@@ -289,8 +289,6 @@ fn claimUnresolved(elf_file: *Elf) void {
}
fn initSections(elf_file: *Elf) !void {
- const ptr_size = elf_file.ptrWidthBytes();
-
for (elf_file.objects.items) |index| {
const object = elf_file.file(index).?.object;
try object.initOutputSections(elf_file);
@@ -306,13 +304,18 @@ fn initSections(elf_file: *Elf) !void {
if (elf_file.file(index).?.object.cies.items.len > 0) break true;
} else false;
if (needs_eh_frame) {
- elf_file.eh_frame_section_index = try elf_file.addSection(.{
- .name = try elf_file.insertShString(".eh_frame"),
- .type = elf.SHT_PROGBITS,
- .flags = elf.SHF_ALLOC,
- .addralign = ptr_size,
- .offset = std.math.maxInt(u64),
- });
+ if (elf_file.eh_frame_section_index == null) {
+ elf_file.eh_frame_section_index = try elf_file.addSection(.{
+ .name = try elf_file.insertShString(".eh_frame"),
+ .type = if (elf_file.getTarget().cpu.arch == .x86_64)
+ elf.SHT_X86_64_UNWIND
+ else
+ elf.SHT_PROGBITS,
+ .flags = elf.SHF_ALLOC,
+ .addralign = elf_file.ptrWidthBytes(),
+ .offset = std.math.maxInt(u64),
+ });
+ }
elf_file.eh_frame_rela_section_index = try elf_file.addRelaShdr(
try elf_file.insertShString(".rela.eh_frame"),
elf_file.eh_frame_section_index.?,
@@ -373,7 +376,11 @@ fn updateSectionSizes(elf_file: *Elf) !void {
}
if (elf_file.eh_frame_section_index) |index| {
- slice.items(.shdr)[index].sh_size = try eh_frame.calcEhFrameSize(elf_file);
+ slice.items(.shdr)[index].sh_size = existing_size: {
+ const zo = elf_file.zigObjectPtr() orelse break :existing_size 0;
+ const sym = zo.symbol(zo.eh_frame_index orelse break :existing_size 0);
+ break :existing_size sym.atom(elf_file).?.size;
+ } + try eh_frame.calcEhFrameSize(elf_file);
}
if (elf_file.eh_frame_rela_section_index) |index| {
const shdr = &slice.items(.shdr)[index];
@@ -526,17 +533,22 @@ fn writeSyntheticSections(elf_file: *Elf) !void {
}
if (elf_file.eh_frame_section_index) |shndx| {
+ const existing_size = existing_size: {
+ const zo = elf_file.zigObjectPtr() orelse break :existing_size 0;
+ const sym = zo.symbol(zo.eh_frame_index orelse break :existing_size 0);
+ break :existing_size sym.atom(elf_file).?.size;
+ };
const shdr = slice.items(.shdr)[shndx];
const sh_size = math.cast(usize, shdr.sh_size) orelse return error.Overflow;
- var buffer = try std.ArrayList(u8).initCapacity(gpa, sh_size);
+ var buffer = try std.ArrayList(u8).initCapacity(gpa, @intCast(sh_size - existing_size));
defer buffer.deinit();
try eh_frame.writeEhFrameObject(elf_file, buffer.writer());
log.debug("writing .eh_frame from 0x{x} to 0x{x}", .{
- shdr.sh_offset,
- shdr.sh_offset + shdr.sh_size,
+ shdr.sh_offset + existing_size,
+ shdr.sh_offset + sh_size,
});
- assert(buffer.items.len == sh_size);
- try elf_file.base.file.?.pwriteAll(buffer.items, shdr.sh_offset);
+ assert(buffer.items.len == sh_size - existing_size);
+ try elf_file.base.file.?.pwriteAll(buffer.items, shdr.sh_offset + existing_size);
}
if (elf_file.eh_frame_rela_section_index) |shndx| {
const shdr = slice.items(.shdr)[shndx];
src/link/Elf/relocation.zig
@@ -108,20 +108,27 @@ pub const dwarf = struct {
pub fn externalRelocType(
target: Symbol,
+ source_section: Dwarf.Section.Index,
address_size: Dwarf.AddressSize,
cpu_arch: std.Target.Cpu.Arch,
) u32 {
return switch (cpu_arch) {
- .x86_64 => @intFromEnum(switch (address_size) {
- .@"32" => if (target.flags.is_tls) elf.R_X86_64.DTPOFF32 else .@"32",
- .@"64" => if (target.flags.is_tls) elf.R_X86_64.DTPOFF64 else .@"64",
- else => unreachable,
- }),
- .riscv64 => @intFromEnum(switch (address_size) {
- .@"32" => elf.R_RISCV.@"32",
- .@"64" => elf.R_RISCV.@"64",
- else => unreachable,
- }),
+ .x86_64 => @intFromEnum(@as(elf.R_X86_64, switch (source_section) {
+ else => switch (address_size) {
+ .@"32" => if (target.flags.is_tls) .DTPOFF32 else .@"32",
+ .@"64" => if (target.flags.is_tls) .DTPOFF64 else .@"64",
+ else => unreachable,
+ },
+ .debug_frame => .PC32,
+ })),
+ .riscv64 => @intFromEnum(@as(elf.R_RISCV, switch (source_section) {
+ else => switch (address_size) {
+ .@"32" => .@"32",
+ .@"64" => .@"64",
+ else => unreachable,
+ },
+ .debug_frame => unreachable,
+ })),
else => @panic("TODO unhandled cpu arch"),
};
}
src/link/Elf/ZigObject.zig
@@ -49,6 +49,7 @@ debug_line_section_dirty: bool = false,
debug_line_str_section_dirty: bool = false,
debug_loclists_section_dirty: bool = false,
debug_rnglists_section_dirty: bool = false,
+eh_frame_section_dirty: bool = false,
debug_info_index: ?Symbol.Index = null,
debug_abbrev_index: ?Symbol.Index = null,
@@ -58,6 +59,7 @@ debug_line_index: ?Symbol.Index = null,
debug_line_str_index: ?Symbol.Index = null,
debug_loclists_index: ?Symbol.Index = null,
debug_rnglists_index: ?Symbol.Index = null,
+eh_frame_index: ?Symbol.Index = null,
pub const global_symbol_bit: u32 = 0x80000000;
pub const symbol_mask: u32 = 0x7fffffff;
@@ -72,8 +74,6 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void {
const comp = elf_file.base.comp;
const gpa = comp.gpa;
const ptr_size = elf_file.ptrWidthBytes();
- const target = elf_file.getTarget();
- const ptr_bit_width = target.ptrBitWidth();
try self.atoms.append(gpa, .{ .extra_index = try self.addAtomExtra(gpa, .{}) }); // null input section
try self.relocs.append(gpa, .{}); // null relocs section
@@ -113,7 +113,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void {
.type = elf.PT_LOAD,
.offset = off,
.filesz = filesz,
- .addr = if (ptr_bit_width >= 32) 0x4000000 else 0x4000,
+ .addr = if (ptr_size >= 4) 0x4000000 else 0x4000,
.memsz = filesz,
.@"align" = elf_file.page_size,
.flags = elf.PF_X | elf.PF_R | elf.PF_W,
@@ -128,7 +128,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void {
.type = elf.PT_LOAD,
.offset = off,
.filesz = filesz,
- .addr = if (ptr_bit_width >= 32) 0xc000000 else 0xa000,
+ .addr = if (ptr_size >= 4) 0xc000000 else 0xa000,
.memsz = filesz,
.@"align" = alignment,
.flags = elf.PF_R | elf.PF_W,
@@ -143,7 +143,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void {
.type = elf.PT_LOAD,
.offset = off,
.filesz = filesz,
- .addr = if (ptr_bit_width >= 32) 0x10000000 else 0xc000,
+ .addr = if (ptr_size >= 4) 0x10000000 else 0xc000,
.memsz = filesz,
.@"align" = alignment,
.flags = elf.PF_R | elf.PF_W,
@@ -154,7 +154,7 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void {
const alignment = elf_file.page_size;
elf_file.phdr_zig_load_zerofill_index = try elf_file.addPhdr(.{
.type = elf.PT_LOAD,
- .addr = if (ptr_bit_width >= 32) 0x14000000 else 0xf000,
+ .addr = if (ptr_size >= 4) 0x14000000 else 0xf000,
.memsz = 1024,
.@"align" = alignment,
.flags = elf.PF_R | elf.PF_W,
@@ -354,6 +354,20 @@ pub fn init(self: *ZigObject, elf_file: *Elf, options: InitOptions) !void {
self.debug_rnglists_index = try addSectionSymbol(self, gpa, ".debug_rnglists", .@"1", elf_file.debug_rnglists_section_index.?);
}
+ if (elf_file.eh_frame_section_index == null) {
+ elf_file.eh_frame_section_index = try elf_file.addSection(.{
+ .name = try elf_file.insertShString(".eh_frame"),
+ .type = if (elf_file.getTarget().cpu.arch == .x86_64)
+ elf.SHT_X86_64_UNWIND
+ else
+ elf.SHT_PROGBITS,
+ .flags = elf.SHF_ALLOC,
+ .addralign = ptr_size,
+ });
+ self.eh_frame_section_dirty = true;
+ self.eh_frame_index = try addSectionSymbol(self, gpa, ".eh_frame", Atom.Alignment.fromNonzeroByteUnits(ptr_size), elf_file.eh_frame_section_index.?);
+ }
+
try dwarf.initMetadata();
self.dwarf = dwarf;
},
@@ -460,6 +474,7 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !voi
self.debug_line_str_index.?,
self.debug_loclists_index.?,
self.debug_rnglists_index.?,
+ self.eh_frame_index.?,
}, [_]*Dwarf.Section{
&dwarf.debug_info.section,
&dwarf.debug_abbrev.section,
@@ -469,7 +484,18 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !voi
&dwarf.debug_line_str.section,
&dwarf.debug_loclists.section,
&dwarf.debug_rnglists.section,
- }) |sym_index, sect| {
+ &dwarf.debug_frame.section,
+ }, [_]Dwarf.Section.Index{
+ .debug_info,
+ .debug_abbrev,
+ .debug_str,
+ .debug_aranges,
+ .debug_line,
+ .debug_line_str,
+ .debug_loclists,
+ .debug_rnglists,
+ .debug_frame,
+ }) |sym_index, sect, sect_index| {
const sym = self.symbol(sym_index);
const atom_ptr = self.atom(sym.ref.index).?;
if (!atom_ptr.alive) continue;
@@ -509,6 +535,8 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !voi
for (unit.cross_section_relocs.items) |reloc| {
const target_sym_index = switch (reloc.target_sec) {
.debug_abbrev => self.debug_abbrev_index.?,
+ .debug_aranges => self.debug_aranges_index.?,
+ .debug_frame => self.eh_frame_index.?,
.debug_info => self.debug_info_index.?,
.debug_line => self.debug_line_index.?,
.debug_line_str => self.debug_line_str_index.?,
@@ -547,7 +575,10 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !voi
entry.external_relocs.items.len);
for (entry.cross_entry_relocs.items) |reloc| {
const r_offset = entry_off + reloc.source_off;
- const r_addend: i64 = @intCast(unit.off + reloc.target_off + unit.header_len + unit.getEntry(reloc.target_entry).assertNonEmpty(unit, sect, dwarf).off);
+ const r_addend: i64 = @intCast(unit.off + reloc.target_off + (if (reloc.target_entry.unwrap()) |target_entry|
+ unit.header_len + unit.getEntry(target_entry).assertNonEmpty(unit, sect, dwarf).off
+ else
+ 0));
const r_type = relocation.dwarf.crossSectionRelocType(dwarf.format, cpu_arch);
log.debug(" {s} <- r_off={x}, r_add={x}, r_type={}", .{
self.symbol(sym_index).name(elf_file),
@@ -584,6 +615,8 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !voi
for (entry.cross_section_relocs.items) |reloc| {
const target_sym_index = switch (reloc.target_sec) {
.debug_abbrev => self.debug_abbrev_index.?,
+ .debug_aranges => self.debug_aranges_index.?,
+ .debug_frame => self.eh_frame_index.?,
.debug_info => self.debug_info_index.?,
.debug_line => self.debug_line_index.?,
.debug_line_str => self.debug_line_str_index.?,
@@ -617,7 +650,7 @@ pub fn flushModule(self: *ZigObject, elf_file: *Elf, tid: Zcu.PerThread.Id) !voi
const target_sym = self.symbol(reloc.target_sym);
const r_offset = entry_off + reloc.source_off;
const r_addend: i64 = @intCast(reloc.target_off);
- const r_type = relocation.dwarf.externalRelocType(target_sym.*, dwarf.address_size, cpu_arch);
+ const r_type = relocation.dwarf.externalRelocType(target_sym.*, sect_index, dwarf.address_size, cpu_arch);
log.debug(" {s} <- r_off={x}, r_add={x}, r_type={}", .{
target_sym.name(elf_file),
r_offset,
src/link/MachO/DebugSymbols.zig
@@ -105,9 +105,7 @@ pub fn growSection(
const sect = self.getSectionPtr(sect_index);
const allocated_size = self.allocatedSize(sect.offset);
- if (sect.offset + allocated_size == std.math.maxInt(u64)) {
- try self.file.setEndPos(sect.offset + needed_size);
- } else if (needed_size > allocated_size) {
+ if (needed_size > allocated_size) {
const existing_size = sect.size;
sect.size = 0; // free the space
const new_offset = try self.findFreeSpace(needed_size, 1);
@@ -130,6 +128,8 @@ pub fn growSection(
}
sect.offset = @intCast(new_offset);
+ } else if (sect.offset + allocated_size == std.math.maxInt(u64)) {
+ try self.file.setEndPos(sect.offset + needed_size);
}
sect.size = needed_size;
src/link/Dwarf.zig
@@ -10,6 +10,7 @@ navs: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, Entry.Index),
debug_abbrev: DebugAbbrev,
debug_aranges: DebugAranges,
+debug_frame: DebugFrame,
debug_info: DebugInfo,
debug_line: DebugLine,
debug_line_str: StringSection,
@@ -73,11 +74,7 @@ const DebugAranges = struct {
section: Section,
fn headerBytes(dwarf: *Dwarf) u32 {
- return std.mem.alignForwardAnyAlign(
- u32,
- dwarf.unitLengthBytes() + 2 + dwarf.sectionOffsetBytes() + 1 + 1,
- @intFromEnum(dwarf.address_size) * 2,
- );
+ return dwarf.unitLengthBytes() + 2 + dwarf.sectionOffsetBytes() + 1 + 1;
}
fn trailerBytes(dwarf: *Dwarf) u32 {
@@ -85,6 +82,47 @@ const DebugAranges = struct {
}
};
+const DebugFrame = struct {
+ header: Header,
+ section: Section,
+
+ const Format = enum { none, debug_frame, eh_frame };
+ const Header = struct {
+ format: Format,
+ code_alignment_factor: u32,
+ data_alignment_factor: i32,
+ return_address_register: u32,
+ initial_instructions: []const Cfa,
+ };
+
+ fn headerBytes(dwarf: *Dwarf) u32 {
+ const target = dwarf.bin_file.comp.root_mod.resolved_target.result;
+ return @intCast(switch (dwarf.debug_frame.header.format) {
+ .none => return 0,
+ .debug_frame => dwarf.unitLengthBytes() + dwarf.sectionOffsetBytes() + 1 + "\x00".len + 1 + 1,
+ .eh_frame => dwarf.unitLengthBytes() + 4 + 1 + "zR\x00".len +
+ uleb128Bytes(1) + 1,
+ } + switch (target.cpu.arch) {
+ .x86_64 => len: {
+ dev.check(.x86_64_backend);
+ const Register = @import("../arch/x86_64/bits.zig").Register;
+ break :len uleb128Bytes(1) + sleb128Bytes(-8) + uleb128Bytes(Register.rip.dwarfNum()) +
+ 1 + uleb128Bytes(Register.rsp.dwarfNum()) + sleb128Bytes(-1) +
+ 1 + uleb128Bytes(1);
+ },
+ else => unreachable,
+ });
+ }
+
+ fn trailerBytes(dwarf: *Dwarf) u32 {
+ return @intCast(switch (dwarf.debug_frame.header.format) {
+ .none => 0,
+ .debug_frame => dwarf.unitLengthBytes() + dwarf.sectionOffsetBytes() + 1 + "\x00".len + 1 + 1 + uleb128Bytes(1) + sleb128Bytes(1) + uleb128Bytes(0),
+ .eh_frame => dwarf.unitLengthBytes() + 4 + 1 + "\x00".len + uleb128Bytes(1) + sleb128Bytes(1) + uleb128Bytes(0),
+ });
+ }
+};
+
const DebugInfo = struct {
section: Section,
@@ -227,8 +265,10 @@ pub const Section = struct {
len: u64,
units: std.ArrayListUnmanaged(Unit),
- const Index = enum {
+ pub const Index = enum {
debug_abbrev,
+ debug_aranges,
+ debug_frame,
debug_info,
debug_line,
debug_line_str,
@@ -259,15 +299,17 @@ pub const Section = struct {
const unit: Unit.Index = @enumFromInt(sec.units.items.len);
const unit_ptr = try sec.units.addOne(dwarf.gpa);
errdefer sec.popUnit(dwarf.gpa);
+ const aligned_header_len: u32 = @intCast(sec.alignment.forward(header_len));
+ const aligned_trailer_len: u32 = @intCast(sec.alignment.forward(trailer_len));
unit_ptr.* = .{
.prev = sec.last,
.next = .none,
.first = .none,
.last = .none,
.off = 0,
- .header_len = header_len,
- .trailer_len = trailer_len,
- .len = header_len + trailer_len,
+ .header_len = aligned_header_len,
+ .trailer_len = aligned_trailer_len,
+ .len = aligned_header_len + aligned_trailer_len,
.entries = .{},
.cross_unit_relocs = .{},
.cross_section_relocs = .{},
@@ -288,8 +330,8 @@ pub const Section = struct {
const unit_ptr = sec.getUnit(unit);
if (unit_ptr.prev.unwrap()) |prev_unit| sec.getUnit(prev_unit).next = unit_ptr.next;
if (unit_ptr.next.unwrap()) |next_unit| sec.getUnit(next_unit).prev = unit_ptr.prev;
- if (sec.first.unwrap().? == unit) sec.first = unit_ptr.next;
- if (sec.last.unwrap().? == unit) sec.last = unit_ptr.prev;
+ if (sec.first == unit.toOptional()) sec.first = unit_ptr.next;
+ if (sec.last == unit.toOptional()) sec.last = unit_ptr.prev;
}
fn popUnit(sec: *Section, gpa: std.mem.Allocator) void {
@@ -303,10 +345,10 @@ pub const Section = struct {
return &sec.units.items[@intFromEnum(unit)];
}
- fn replaceEntry(sec: *Section, unit: Unit.Index, entry: Entry.Index, dwarf: *Dwarf, contents: []const u8) UpdateError!void {
+ fn resizeEntry(sec: *Section, unit: Unit.Index, entry: Entry.Index, dwarf: *Dwarf, len: u32) UpdateError!void {
const unit_ptr = sec.getUnit(unit);
const entry_ptr = unit_ptr.getEntry(entry);
- if (contents.len > 0) {
+ if (len > 0) {
if (entry_ptr.len == 0) {
assert(entry_ptr.prev == .none and entry_ptr.next == .none);
entry_ptr.off = if (unit_ptr.last.unwrap()) |last_entry| off: {
@@ -316,15 +358,27 @@ pub const Section = struct {
} else 0;
entry_ptr.prev = unit_ptr.last;
unit_ptr.last = entry.toOptional();
+ if (unit_ptr.first == .none) unit_ptr.first = unit_ptr.last;
+ if (entry_ptr.prev.unwrap()) |prev_entry| try unit_ptr.getEntry(prev_entry).pad(unit_ptr, sec, dwarf);
}
- try entry_ptr.replace(unit_ptr, sec, dwarf, contents);
+ try entry_ptr.resize(unit_ptr, sec, dwarf, len);
}
- assert(entry_ptr.len == contents.len);
+ assert(entry_ptr.len == len);
+ }
+
+ fn replaceEntry(sec: *Section, unit: Unit.Index, entry: Entry.Index, dwarf: *Dwarf, contents: []const u8) UpdateError!void {
+ try sec.resizeEntry(unit, entry, dwarf, @intCast(contents.len));
+ const unit_ptr = sec.getUnit(unit);
+ try unit_ptr.getEntry(entry).replace(unit_ptr, sec, dwarf, contents);
}
fn resize(sec: *Section, dwarf: *Dwarf, len: u64) UpdateError!void {
+ if (len <= sec.len) return;
if (dwarf.bin_file.cast(.elf)) |elf_file| {
- try elf_file.growNonAllocSection(sec.index, len, @intCast(sec.alignment.toByteUnits().?), true);
+ if (sec == &dwarf.debug_frame.section)
+ try elf_file.growAllocSection(sec.index, len)
+ else
+ try elf_file.growNonAllocSection(sec.index, len, @intCast(sec.alignment.toByteUnits().?), true);
const shdr = &elf_file.sections.items(.shdr)[sec.index];
sec.off = shdr.sh_offset;
sec.len = shdr.sh_size;
@@ -366,7 +420,7 @@ pub const Section = struct {
}
fn padToIdeal(sec: *Section, actual_size: anytype) @TypeOf(actual_size) {
- return if (sec.pad_to_ideal) Dwarf.padToIdeal(actual_size) else actual_size;
+ return @intCast(sec.alignment.forward(if (sec.pad_to_ideal) Dwarf.padToIdeal(actual_size) else actual_size));
}
};
@@ -562,6 +616,43 @@ const Unit = struct {
} else if (sec == &dwarf.debug_aranges.section) fill: {
trailer.appendNTimesAssumeCapacity(0, @intFromEnum(dwarf.address_size) * 2);
break :fill 0;
+ } else if (sec == &dwarf.debug_frame.section) fill: {
+ switch (dwarf.debug_frame.header.format) {
+ .none => {},
+ .debug_frame, .eh_frame => |format| {
+ const unit_len = len - dwarf.unitLengthBytes();
+ switch (dwarf.format) {
+ .@"32" => std.mem.writeInt(u32, trailer.addManyAsArrayAssumeCapacity(4), @intCast(unit_len), dwarf.endian),
+ .@"64" => {
+ std.mem.writeInt(u32, trailer.addManyAsArrayAssumeCapacity(4), std.math.maxInt(u32), dwarf.endian);
+ std.mem.writeInt(u64, trailer.addManyAsArrayAssumeCapacity(8), unit_len, dwarf.endian);
+ },
+ }
+ switch (format) {
+ .none => unreachable,
+ .debug_frame => {
+ switch (dwarf.format) {
+ .@"32" => std.mem.writeInt(u32, trailer.addManyAsArrayAssumeCapacity(4), std.math.maxInt(u32), dwarf.endian),
+ .@"64" => std.mem.writeInt(u64, trailer.addManyAsArrayAssumeCapacity(8), std.math.maxInt(u64), dwarf.endian),
+ }
+ trailer.appendAssumeCapacity(4);
+ trailer.appendSliceAssumeCapacity("\x00");
+ trailer.appendAssumeCapacity(@intFromEnum(dwarf.address_size));
+ trailer.appendAssumeCapacity(0);
+ },
+ .eh_frame => {
+ std.mem.writeInt(u32, trailer.addManyAsArrayAssumeCapacity(4), 0, dwarf.endian);
+ trailer.appendAssumeCapacity(1);
+ trailer.appendSliceAssumeCapacity("\x00");
+ },
+ }
+ uleb128(trailer.fixedWriter(), 1) catch unreachable;
+ sleb128(trailer.fixedWriter(), 1) catch unreachable;
+ uleb128(trailer.fixedWriter(), 0) catch unreachable;
+ },
+ }
+ trailer.appendNTimesAssumeCapacity(DW.CFA.nop, unit.trailer_len - trailer.items.len);
+ break :fill DW.CFA.nop;
} else if (sec == &dwarf.debug_info.section) fill: {
assert(uleb128Bytes(@intFromEnum(AbbrevCode.null)) == 1);
trailer.appendNTimesAssumeCapacity(@intFromEnum(AbbrevCode.null), 2);
@@ -571,7 +662,7 @@ const Unit = struct {
break :fill DW.RLE.end_of_list;
} else unreachable;
assert(trailer.items.len == unit.trailer_len);
- trailer.appendNTimesAssumeCapacity(fill_byte, len - trailer.items.len);
+ trailer.appendNTimesAssumeCapacity(fill_byte, len - unit.trailer_len);
assert(trailer.items.len == len);
try dwarf.getFile().?.pwriteAll(trailer.items, sec.off + start);
}
@@ -655,6 +746,23 @@ const Entry = struct {
fn pad(entry: *Entry, unit: *Unit, sec: *Section, dwarf: *Dwarf) UpdateError!void {
assert(entry.len > 0);
const start = entry.off + entry.len;
+ if (sec == &dwarf.debug_frame.section) {
+ const len = if (entry.next.unwrap()) |next_entry|
+ unit.getEntry(next_entry).off - entry.off
+ else
+ entry.len;
+ var unit_len: [8]u8 = undefined;
+ dwarf.writeInt(unit_len[0..dwarf.sectionOffsetBytes()], len - dwarf.unitLengthBytes());
+ try dwarf.getFile().?.pwriteAll(
+ unit_len[0..dwarf.sectionOffsetBytes()],
+ sec.off + unit.off + unit.header_len + entry.off,
+ );
+ const buf = try dwarf.gpa.alloc(u8, len - entry.len);
+ defer dwarf.gpa.free(buf);
+ @memset(buf, DW.CFA.nop);
+ try dwarf.getFile().?.pwriteAll(buf, sec.off + unit.off + unit.header_len + start);
+ return;
+ }
const len = unit.getEntry(entry.next.unwrap() orelse return).off - start;
var buf: [
@max(
@@ -711,18 +819,20 @@ const Entry = struct {
try dwarf.getFile().?.pwriteAll(fbs.getWritten(), sec.off + unit.off + unit.header_len + start);
}
- fn replace(entry_ptr: *Entry, unit: *Unit, sec: *Section, dwarf: *Dwarf, contents: []const u8) UpdateError!void {
+ fn resize(entry_ptr: *Entry, unit: *Unit, sec: *Section, dwarf: *Dwarf, len: u32) UpdateError!void {
+ assert(len > 0);
+ assert(sec.alignment.check(len));
+ if (entry_ptr.len == len) return;
const end = if (entry_ptr.next.unwrap()) |next_entry|
unit.getEntry(next_entry).off
else
unit.len -| (unit.header_len + unit.trailer_len);
- if (entry_ptr.off + contents.len > end) {
+ if (entry_ptr.off + len > end) {
if (entry_ptr.next.unwrap()) |next_entry| {
- if (entry_ptr.prev.unwrap()) |prev_entry| {
- const prev_entry_ptr = unit.getEntry(prev_entry);
- prev_entry_ptr.next = entry_ptr.next;
- try prev_entry_ptr.pad(unit, sec, dwarf);
- } else unit.first = entry_ptr.next;
+ if (entry_ptr.prev.unwrap()) |prev_entry|
+ unit.getEntry(prev_entry).next = entry_ptr.next
+ else
+ unit.first = entry_ptr.next;
const next_entry_ptr = unit.getEntry(next_entry);
const entry = next_entry_ptr.prev;
next_entry_ptr.prev = entry_ptr.prev;
@@ -733,12 +843,15 @@ const Entry = struct {
entry_ptr.off = last_entry_ptr.off + sec.padToIdeal(last_entry_ptr.len);
unit.last = entry;
}
- try unit.resize(sec, dwarf, 0, @intCast(unit.header_len + entry_ptr.off + sec.padToIdeal(contents.len) + unit.trailer_len));
+ try unit.resize(sec, dwarf, 0, @intCast(unit.header_len + entry_ptr.off + sec.padToIdeal(len) + unit.trailer_len));
}
- entry_ptr.len = @intCast(contents.len);
- if (entry_ptr.prev.unwrap()) |prev_entry| try unit.getEntry(prev_entry).pad(unit, sec, dwarf);
- try dwarf.getFile().?.pwriteAll(contents, sec.off + unit.off + unit.header_len + entry_ptr.off);
+ entry_ptr.len = len;
try entry_ptr.pad(unit, sec, dwarf);
+ }
+
+ fn replace(entry_ptr: *Entry, unit: *Unit, sec: *Section, dwarf: *Dwarf, contents: []const u8) UpdateError!void {
+ assert(contents.len == entry_ptr.len);
+ try dwarf.getFile().?.pwriteAll(contents, sec.off + unit.off + unit.header_len + entry_ptr.off);
if (false) {
const buf = try dwarf.gpa.alloc(u8, sec.len);
defer dwarf.gpa.free(buf);
@@ -844,6 +957,22 @@ const Entry = struct {
dwarf.sectionOffsetBytes(),
);
}
+ if (sec == &dwarf.debug_frame.section) switch (DebugFrame.format(dwarf)) {
+ .none, .debug_frame => {},
+ .eh_frame => return if (dwarf.bin_file.cast(.elf)) |elf_file| {
+ const zo = elf_file.zigObjectPtr().?;
+ const entry_addr: i64 = @intCast(entry_off - sec.off + elf_file.shdrs.items[sec.index].sh_addr);
+ for (entry.external_relocs.items) |reloc| {
+ const symbol = zo.symbol(reloc.target_sym);
+ try dwarf.resolveReloc(
+ entry_off + reloc.source_off,
+ @bitCast((symbol.address(.{}, elf_file) + @as(i64, @intCast(reloc.target_off))) -
+ (entry_addr + reloc.source_off + 4)),
+ 4,
+ );
+ }
+ } else unreachable,
+ };
if (dwarf.bin_file.cast(.elf)) |elf_file| {
const zo = elf_file.zigObjectPtr().?;
for (entry.external_relocs.items) |reloc| {
@@ -871,7 +1000,7 @@ const Entry = struct {
const CrossEntryReloc = struct {
source_off: u32 = 0,
- target_entry: Entry.Index,
+ target_entry: Entry.Index.Optional = .none,
target_off: u32 = 0,
};
const CrossUnitReloc = struct {
@@ -937,14 +1066,14 @@ pub const Loc = union(enum) {
}
}
- fn write(loc: Loc, wip: anytype) UpdateError!void {
- const writer = wip.infoWriter();
+ fn write(loc: Loc, adapter: anytype) UpdateError!void {
+ const writer = adapter.writer();
switch (loc) {
- .empty => unreachable,
+ .empty => {},
.addr => |addr| {
try writer.writeByte(DW.OP.addr);
switch (addr) {
- .sym => |sym_index| try wip.addrSym(sym_index),
+ .sym => |sym_index| try adapter.addrSym(sym_index),
}
},
.constu => |constu| if (std.math.cast(u5, constu)) |lit| {
@@ -953,45 +1082,45 @@ pub const Loc = union(enum) {
try writer.writeAll(&.{ DW.OP.const1u, const1u });
} else if (std.math.cast(u16, constu)) |const2u| {
try writer.writeByte(DW.OP.const2u);
- try writer.writeInt(u16, const2u, wip.dwarf.endian);
+ try writer.writeInt(u16, const2u, adapter.endian());
} else if (std.math.cast(u21, constu)) |const3u| {
try writer.writeByte(DW.OP.constu);
try uleb128(writer, const3u);
} else if (std.math.cast(u32, constu)) |const4u| {
try writer.writeByte(DW.OP.const4u);
- try writer.writeInt(u32, const4u, wip.dwarf.endian);
+ try writer.writeInt(u32, const4u, adapter.endian());
} else if (std.math.cast(u49, constu)) |const7u| {
try writer.writeByte(DW.OP.constu);
try uleb128(writer, const7u);
} else {
try writer.writeByte(DW.OP.const8u);
- try writer.writeInt(u64, constu, wip.dwarf.endian);
+ try writer.writeInt(u64, constu, adapter.endian());
},
.consts => |consts| if (std.math.cast(i8, consts)) |const1s| {
try writer.writeAll(&.{ DW.OP.const1s, @bitCast(const1s) });
} else if (std.math.cast(i16, consts)) |const2s| {
try writer.writeByte(DW.OP.const2s);
- try writer.writeInt(i16, const2s, wip.dwarf.endian);
+ try writer.writeInt(i16, const2s, adapter.endian());
} else if (std.math.cast(i21, consts)) |const3s| {
try writer.writeByte(DW.OP.consts);
try sleb128(writer, const3s);
} else if (std.math.cast(i32, consts)) |const4s| {
try writer.writeByte(DW.OP.const4s);
- try writer.writeInt(i32, const4s, wip.dwarf.endian);
+ try writer.writeInt(i32, const4s, adapter.endian());
} else if (std.math.cast(i49, consts)) |const7s| {
try writer.writeByte(DW.OP.consts);
try sleb128(writer, const7s);
} else {
try writer.writeByte(DW.OP.const8s);
- try writer.writeInt(i64, consts, wip.dwarf.endian);
+ try writer.writeInt(i64, consts, adapter.endian());
},
.plus => |plus| done: {
if (plus[0].getConst(u0)) |_| {
- try plus[1].write(wip);
+ try plus[1].write(adapter);
break :done;
}
if (plus[1].getConst(u0)) |_| {
- try plus[0].write(wip);
+ try plus[0].write(adapter);
break :done;
}
if (plus[0].getBaseReg()) |breg| {
@@ -1009,19 +1138,19 @@ pub const Loc = union(enum) {
}
}
if (plus[0].getConst(u64)) |uconst| {
- try plus[1].write(wip);
+ try plus[1].write(adapter);
try writer.writeByte(DW.OP.plus_uconst);
try uleb128(writer, uconst);
break :done;
}
if (plus[1].getConst(u64)) |uconst| {
- try plus[0].write(wip);
+ try plus[0].write(adapter);
try writer.writeByte(DW.OP.plus_uconst);
try uleb128(writer, uconst);
break :done;
}
- try plus[0].write(wip);
- try plus[1].write(wip);
+ try plus[0].write(adapter);
+ try plus[1].write(adapter);
try writer.writeByte(DW.OP.plus);
},
.reg => |reg| try writeReg(reg, DW.OP.reg0, DW.OP.regx, writer),
@@ -1031,7 +1160,7 @@ pub const Loc = union(enum) {
},
.push_object_address => try writer.writeByte(DW.OP.push_object_address),
.form_tls_address => |addr| {
- try addr.write(wip);
+ try addr.write(adapter);
try writer.writeByte(DW.OP.form_tls_address);
},
.implicit_value => |value| {
@@ -1040,7 +1169,7 @@ pub const Loc = union(enum) {
try writer.writeAll(value);
},
.stack_value => |value| {
- try value.write(wip);
+ try value.write(adapter);
try writer.writeByte(DW.OP.stack_value);
},
.wasm_ext => |wasm_ext| {
@@ -1055,7 +1184,7 @@ pub const Loc = union(enum) {
try uleb128(writer, global_u21);
} else {
try writer.writeByte(DW.OP.WASM_global_u32);
- try writer.writeInt(u32, global, wip.dwarf.endian);
+ try writer.writeInt(u32, global, adapter.endian());
},
.operand_stack => |operand_stack| {
try writer.writeByte(DW.OP.WASM_operand_stack);
@@ -1067,6 +1196,153 @@ pub const Loc = union(enum) {
}
};
+pub const Cfa = union(enum) {
+ nop,
+ advance_loc: u32,
+ offset: RegOff,
+ rel_offset: RegOff,
+ restore: u32,
+ undefined: u32,
+ same_value: u32,
+ register: [2]u32,
+ remember_state,
+ restore_state,
+ def_cfa: RegOff,
+ def_cfa_register: u32,
+ def_cfa_offset: i64,
+ adjust_cfa_offset: i64,
+ def_cfa_expression: Loc,
+ expression: RegExpr,
+ val_offset: RegOff,
+ val_expression: RegExpr,
+ escape: []const u8,
+
+ const RegOff = struct { reg: u32, off: i64 };
+ const RegExpr = struct { reg: u32, expr: Loc };
+
+ fn write(cfa: Cfa, wip_nav: *WipNav) UpdateError!void {
+ const writer = wip_nav.debug_frame.writer(wip_nav.dwarf.gpa);
+ switch (cfa) {
+ .nop => try writer.writeByte(DW.CFA.nop),
+ .advance_loc => |loc| {
+ const delta = @divExact(loc - wip_nav.cfi.loc, wip_nav.dwarf.debug_frame.header.code_alignment_factor);
+ if (delta == 0) {} else if (std.math.cast(u6, delta)) |small_delta|
+ try writer.writeByte(@as(u8, DW.CFA.advance_loc) + small_delta)
+ else if (std.math.cast(u8, delta)) |ubyte_delta|
+ try writer.writeAll(&.{ DW.CFA.advance_loc1, ubyte_delta })
+ else if (std.math.cast(u16, delta)) |uhalf_delta| {
+ try writer.writeByte(DW.CFA.advance_loc2);
+ try writer.writeInt(u16, uhalf_delta, wip_nav.dwarf.endian);
+ } else if (std.math.cast(u32, delta)) |uword_delta| {
+ try writer.writeByte(DW.CFA.advance_loc4);
+ try writer.writeInt(u32, uword_delta, wip_nav.dwarf.endian);
+ }
+ wip_nav.cfi.loc = loc;
+ },
+ .offset, .rel_offset => |reg_off| {
+ const factored_off = @divExact(reg_off.off - switch (cfa) {
+ else => unreachable,
+ .offset => 0,
+ .rel_offset => wip_nav.cfi.cfa.off,
+ }, wip_nav.dwarf.debug_frame.header.data_alignment_factor);
+ if (std.math.cast(u63, factored_off)) |unsigned_off| {
+ if (std.math.cast(u6, reg_off.reg)) |small_reg| {
+ try writer.writeByte(@as(u8, DW.CFA.offset) + small_reg);
+ } else {
+ try writer.writeByte(DW.CFA.offset_extended);
+ try uleb128(writer, reg_off.reg);
+ }
+ try uleb128(writer, unsigned_off);
+ } else {
+ try writer.writeByte(DW.CFA.offset_extended_sf);
+ try uleb128(writer, reg_off.reg);
+ try sleb128(writer, factored_off);
+ }
+ },
+ .restore => |reg| if (std.math.cast(u6, reg)) |small_reg|
+ try writer.writeByte(@as(u8, DW.CFA.restore) + small_reg)
+ else {
+ try writer.writeByte(DW.CFA.restore_extended);
+ try uleb128(writer, reg);
+ },
+ .undefined => |reg| {
+ try writer.writeByte(DW.CFA.undefined);
+ try uleb128(writer, reg);
+ },
+ .same_value => |reg| {
+ try writer.writeByte(DW.CFA.same_value);
+ try uleb128(writer, reg);
+ },
+ .register => |regs| if (regs[0] != regs[1]) {
+ try writer.writeByte(DW.CFA.register);
+ for (regs) |reg| try uleb128(writer, reg);
+ } else {
+ try writer.writeByte(DW.CFA.same_value);
+ try uleb128(writer, regs[0]);
+ },
+ .remember_state => try writer.writeByte(DW.CFA.remember_state),
+ .restore_state => try writer.writeByte(DW.CFA.restore_state),
+ .def_cfa, .def_cfa_register, .def_cfa_offset, .adjust_cfa_offset => {
+ const reg_off: RegOff = switch (cfa) {
+ else => unreachable,
+ .def_cfa => |reg_off| reg_off,
+ .def_cfa_register => |reg| .{ .reg = reg, .off = wip_nav.cfi.cfa.off },
+ .def_cfa_offset => |off| .{ .reg = wip_nav.cfi.cfa.reg, .off = off },
+ .adjust_cfa_offset => |off| .{ .reg = wip_nav.cfi.cfa.reg, .off = wip_nav.cfi.cfa.off + off },
+ };
+ const changed_reg = reg_off.reg != wip_nav.cfi.cfa.reg;
+ const unsigned_off = std.math.cast(u63, reg_off.off);
+ if (reg_off.off == wip_nav.cfi.cfa.off) {
+ if (changed_reg) {
+ try writer.writeByte(DW.CFA.def_cfa_register);
+ try uleb128(writer, reg_off.reg);
+ }
+ } else if (switch (wip_nav.dwarf.debug_frame.header.data_alignment_factor) {
+ 0 => unreachable,
+ 1 => unsigned_off != null,
+ else => |data_alignment_factor| @rem(reg_off.off, data_alignment_factor) != 0,
+ }) {
+ try writer.writeByte(if (changed_reg) DW.CFA.def_cfa else DW.CFA.def_cfa_offset);
+ if (changed_reg) try uleb128(writer, reg_off.reg);
+ try uleb128(writer, unsigned_off.?);
+ } else {
+ try writer.writeByte(if (changed_reg) DW.CFA.def_cfa_sf else DW.CFA.def_cfa_offset_sf);
+ if (changed_reg) try uleb128(writer, reg_off.reg);
+ try sleb128(writer, @divExact(reg_off.off, wip_nav.dwarf.debug_frame.header.data_alignment_factor));
+ }
+ wip_nav.cfi.cfa = reg_off;
+ },
+ .def_cfa_expression => |expr| {
+ try writer.writeByte(DW.CFA.def_cfa_expression);
+ try wip_nav.frameExprloc(expr);
+ },
+ .expression => |reg_expr| {
+ try writer.writeByte(DW.CFA.expression);
+ try uleb128(writer, reg_expr.reg);
+ try wip_nav.frameExprloc(reg_expr.expr);
+ },
+ .val_offset => |reg_off| {
+ const factored_off = @divExact(reg_off.off, wip_nav.dwarf.debug_frame.header.data_alignment_factor);
+ if (std.math.cast(u63, factored_off)) |unsigned_off| {
+ try writer.writeByte(DW.CFA.val_offset);
+ try uleb128(writer, reg_off.reg);
+ try uleb128(writer, unsigned_off);
+ } else {
+ try writer.writeByte(DW.CFA.val_offset_sf);
+ try uleb128(writer, reg_off.reg);
+ try sleb128(writer, factored_off);
+ }
+ },
+ .val_expression => |reg_expr| {
+ try writer.writeByte(DW.CFA.val_expression);
+ try uleb128(writer, reg_expr.reg);
+ try wip_nav.frameExprloc(reg_expr.expr);
+ },
+ .escape => |bytes| try writer.writeAll(bytes),
+ }
+ }
+};
+
pub const WipNav = struct {
dwarf: *Dwarf,
pt: Zcu.PerThread,
@@ -1080,6 +1356,11 @@ pub const WipNav = struct {
abbrev_code: u32,
high_reloc: u32,
}),
+ cfi: struct {
+ loc: u32,
+ cfa: Cfa.RegOff,
+ },
+ debug_frame: std.ArrayListUnmanaged(u8),
debug_info: std.ArrayListUnmanaged(u8),
debug_line: std.ArrayListUnmanaged(u8),
debug_loclists: std.ArrayListUnmanaged(u8),
@@ -1088,14 +1369,19 @@ pub const WipNav = struct {
pub fn deinit(wip_nav: *WipNav) void {
const gpa = wip_nav.dwarf.gpa;
if (wip_nav.func != .none) wip_nav.inlined_funcs.deinit(gpa);
+ wip_nav.debug_frame.deinit(gpa);
wip_nav.debug_info.deinit(gpa);
wip_nav.debug_line.deinit(gpa);
wip_nav.debug_loclists.deinit(gpa);
wip_nav.pending_types.deinit(gpa);
}
- pub fn infoWriter(wip_nav: *WipNav) std.ArrayListUnmanaged(u8).Writer {
- return wip_nav.debug_info.writer(wip_nav.dwarf.gpa);
+ pub fn genDebugFrame(wip_nav: *WipNav, loc: u32, cfa: Cfa) UpdateError!void {
+ assert(wip_nav.func != .none);
+ if (wip_nav.dwarf.debug_frame.header.format == .none) return;
+ const loc_cfa: Cfa = .{ .advance_loc = loc };
+ try loc_cfa.write(wip_nav);
+ try cfa.write(wip_nav);
}
pub const LocalTag = enum { local_arg, local_var };
@@ -1301,7 +1587,7 @@ pub const WipNav = struct {
} else {
try entry_ptr.cross_entry_relocs.append(gpa, .{
.source_off = @intCast(wip_nav.debug_info.items.len),
- .target_entry = entry,
+ .target_entry = entry.toOptional(),
.target_off = off,
});
}
@@ -1312,7 +1598,45 @@ pub const WipNav = struct {
try wip_nav.infoSectionOffset(.debug_str, StringSection.unit, try wip_nav.dwarf.debug_str.addString(wip_nav.dwarf, str), 0);
}
- fn addrSym(wip_nav: *WipNav, sym_index: u32) UpdateError!void {
+ const ExprLocCounter = struct {
+ const Stream = std.io.CountingWriter(std.io.NullWriter);
+ stream: Stream,
+ address_size: AddressSize,
+ fn writer(counter: *ExprLocCounter) Stream.Writer {
+ return counter.stream.writer();
+ }
+ fn endian(_: ExprLocCounter) std.builtin.Endian {
+ return @import("builtin").cpu.arch.endian();
+ }
+ fn addrSym(counter: *ExprLocCounter, _: u32) error{}!void {
+ counter.stream.bytes_written += @intFromEnum(counter.address_size);
+ }
+ };
+
+ fn exprloc(wip_nav: *WipNav, loc: Loc) UpdateError!void {
+ var counter: ExprLocCounter = .{
+ .stream = std.io.countingWriter(std.io.null_writer),
+ .address_size = wip_nav.dwarf.address_size,
+ };
+ try loc.write(&counter);
+
+ const adapter: struct {
+ wip_nav: *WipNav,
+ fn writer(ctx: @This()) std.ArrayListUnmanaged(u8).Writer {
+ return ctx.wip_nav.debug_info.writer(ctx.wip_nav.dwarf.gpa);
+ }
+ fn endian(ctx: @This()) std.builtin.Endian {
+ return ctx.wip_nav.dwarf.endian;
+ }
+ fn addrSym(ctx: @This(), sym_index: u32) UpdateError!void {
+ try ctx.wip_nav.infoAddrSym(sym_index);
+ }
+ } = .{ .wip_nav = wip_nav };
+ try uleb128(adapter.writer(), counter.stream.bytes_written);
+ try loc.write(adapter);
+ }
+
+ fn infoAddrSym(wip_nav: *WipNav, sym_index: u32) UpdateError!void {
const dwarf = wip_nav.dwarf;
try dwarf.debug_info.section.getUnit(wip_nav.unit).getEntry(wip_nav.entry).external_relocs.append(dwarf.gpa, .{
.source_off = @intCast(wip_nav.debug_info.items.len),
@@ -1321,25 +1645,36 @@ pub const WipNav = struct {
try wip_nav.debug_info.appendNTimes(dwarf.gpa, 0, @intFromEnum(dwarf.address_size));
}
- fn exprloc(wip_nav: *WipNav, loc: Loc) UpdateError!void {
- if (loc == .empty) return;
- var wip: struct {
- const Info = std.io.CountingWriter(std.io.NullWriter);
- dwarf: *Dwarf,
- debug_info: Info,
- fn infoWriter(wip: *@This()) Info.Writer {
- return wip.debug_info.writer();
+ fn frameExprloc(wip_nav: *WipNav, loc: Loc) UpdateError!void {
+ var counter: ExprLocCounter = .{
+ .stream = std.io.countingWriter(std.io.null_writer),
+ .address_size = wip_nav.dwarf.address_size,
+ };
+ try loc.write(&counter);
+
+ const adapter: struct {
+ wip_nav: *WipNav,
+ fn writer(ctx: @This()) std.ArrayListUnmanaged(u8).Writer {
+ return ctx.wip_nav.debug_frame.writer(ctx.wip_nav.dwarf.gpa);
}
- fn addrSym(wip: *@This(), _: u32) error{}!void {
- wip.debug_info.bytes_written += @intFromEnum(wip.dwarf.address_size);
+ fn endian(ctx: @This()) std.builtin.Endian {
+ return ctx.wip_nav.dwarf.endian;
}
- } = .{
- .dwarf = wip_nav.dwarf,
- .debug_info = std.io.countingWriter(std.io.null_writer),
- };
- try loc.write(&wip);
- try uleb128(wip_nav.debug_info.writer(wip_nav.dwarf.gpa), wip.debug_info.bytes_written);
- try loc.write(wip_nav);
+ fn addrSym(ctx: @This(), sym_index: u32) UpdateError!void {
+ try ctx.wip_nav.frameAddrSym(sym_index);
+ }
+ } = .{ .wip_nav = wip_nav };
+ try uleb128(adapter.writer(), counter.stream.bytes_written);
+ try loc.write(adapter);
+ }
+
+ fn frameAddrSym(wip_nav: *WipNav, sym_index: u32) UpdateError!void {
+ const dwarf = wip_nav.dwarf;
+ try dwarf.debug_frame.section.getUnit(wip_nav.unit).getEntry(wip_nav.entry).external_relocs.append(dwarf.gpa, .{
+ .source_off = @intCast(wip_nav.debug_frame.items.len),
+ .target_sym = sym_index,
+ });
+ try wip_nav.debug_frame.appendNTimes(dwarf.gpa, 0, @intFromEnum(dwarf.address_size));
}
fn getTypeEntry(wip_nav: *WipNav, ty: Type) UpdateError!struct { Unit.Index, Entry.Index } {
@@ -1387,7 +1722,7 @@ pub const WipNav = struct {
fn finishForward(wip_nav: *WipNav, reloc_index: u32) void {
const reloc = &wip_nav.dwarf.debug_info.section.getUnit(wip_nav.unit).getEntry(wip_nav.entry).cross_entry_relocs.items[reloc_index];
- reloc.target_entry = wip_nav.entry;
+ reloc.target_entry = wip_nav.entry.toOptional();
reloc.target_off = @intCast(wip_nav.debug_info.items.len);
}
@@ -1490,6 +1825,28 @@ pub fn init(lf: *link.File, format: DW.Format) Dwarf {
.debug_abbrev = .{ .section = Section.init },
.debug_aranges = .{ .section = Section.init },
+ .debug_frame = .{
+ .header = if (target.cpu.arch == .x86_64 and target.ofmt == .elf) header: {
+ const Register = @import("../arch/x86_64/bits.zig").Register;
+ break :header comptime .{
+ .format = .eh_frame,
+ .code_alignment_factor = 1,
+ .data_alignment_factor = -8,
+ .return_address_register = Register.rip.dwarfNum(),
+ .initial_instructions = &.{
+ .{ .def_cfa = .{ .reg = Register.rsp.dwarfNum(), .off = 8 } },
+ .{ .offset = .{ .reg = Register.rip.dwarfNum(), .off = -8 } },
+ },
+ };
+ } else .{
+ .format = .none,
+ .code_alignment_factor = undefined,
+ .data_alignment_factor = undefined,
+ .return_address_register = undefined,
+ .initial_instructions = &.{},
+ },
+ .section = Section.init,
+ },
.debug_info = .{ .section = Section.init },
.debug_line = .{
.header = switch (target.cpu.arch) {
@@ -1524,6 +1881,7 @@ pub fn reloadSectionMetadata(dwarf: *Dwarf) void {
for ([_]*Section{
&dwarf.debug_abbrev.section,
&dwarf.debug_aranges.section,
+ &dwarf.debug_frame.section,
&dwarf.debug_info.section,
&dwarf.debug_line.section,
&dwarf.debug_line_str.section,
@@ -1533,6 +1891,7 @@ pub fn reloadSectionMetadata(dwarf: *Dwarf) void {
}, [_]u32{
elf_file.debug_abbrev_section_index.?,
elf_file.debug_aranges_section_index.?,
+ elf_file.eh_frame_section_index.?,
elf_file.debug_info_section_index.?,
elf_file.debug_line_section_index.?,
elf_file.debug_line_str_section_index.?,
@@ -1612,6 +1971,12 @@ pub fn initMetadata(dwarf: *Dwarf) UpdateError!void {
dwarf.debug_aranges.section.pad_to_ideal = false;
dwarf.debug_aranges.section.alignment = InternPool.Alignment.fromNonzeroByteUnits(@intFromEnum(dwarf.address_size) * 2);
+ dwarf.debug_frame.section.alignment = switch (dwarf.debug_frame.header.format) {
+ .none => .@"1",
+ .debug_frame => InternPool.Alignment.fromNonzeroByteUnits(@intFromEnum(dwarf.address_size)),
+ .eh_frame => .@"4",
+ };
+
dwarf.debug_line_str.section.pad_to_ideal = false;
assert(try dwarf.debug_line_str.section.addUnit(0, 0, dwarf) == StringSection.unit);
errdefer dwarf.debug_line_str.section.popUnit(dwarf.gpa);
@@ -1633,6 +1998,7 @@ pub fn deinit(dwarf: *Dwarf) void {
dwarf.navs.deinit(gpa);
dwarf.debug_abbrev.section.deinit(gpa);
dwarf.debug_aranges.section.deinit(gpa);
+ dwarf.debug_frame.section.deinit(gpa);
dwarf.debug_info.section.deinit(gpa);
dwarf.debug_line.section.deinit(gpa);
dwarf.debug_line_str.deinit(gpa);
@@ -1660,6 +2026,12 @@ fn getUnit(dwarf: *Dwarf, mod: *Module) UpdateError!Unit.Index {
dwarf,
) == unit);
errdefer dwarf.debug_aranges.section.popUnit(dwarf.gpa);
+ assert(try dwarf.debug_frame.section.addUnit(
+ DebugFrame.headerBytes(dwarf),
+ DebugFrame.trailerBytes(dwarf),
+ dwarf,
+ ) == unit);
+ errdefer dwarf.debug_frame.section.popUnit(dwarf.gpa);
assert(try dwarf.debug_info.section.addUnit(
DebugInfo.headerBytes(dwarf),
DebugInfo.trailer_bytes,
@@ -1729,6 +2101,8 @@ pub fn initWipNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool.Nav.In
.func_sym_index = undefined,
.func_high_reloc = undefined,
.inlined_funcs = undefined,
+ .cfi = undefined,
+ .debug_frame = .{},
.debug_info = .{},
.debug_line = .{},
.debug_loclists = .{},
@@ -1870,6 +2244,50 @@ pub fn initWipNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool.Nav.In
wip_nav.func = nav_val.toIntern();
wip_nav.func_sym_index = sym_index;
wip_nav.inlined_funcs = .{};
+ if (dwarf.debug_frame.header.format != .none) wip_nav.cfi = .{
+ .loc = 0,
+ .cfa = dwarf.debug_frame.header.initial_instructions[0].def_cfa,
+ };
+
+ switch (dwarf.debug_frame.header.format) {
+ .none => {},
+ .debug_frame, .eh_frame => |format| {
+ const entry = dwarf.debug_frame.section.getUnit(wip_nav.unit).getEntry(wip_nav.entry);
+ const dfw = wip_nav.debug_frame.writer(dwarf.gpa);
+ switch (dwarf.format) {
+ .@"32" => try dfw.writeInt(u32, undefined, dwarf.endian),
+ .@"64" => {
+ try dfw.writeInt(u32, std.math.maxInt(u32), dwarf.endian);
+ try dfw.writeInt(u64, undefined, dwarf.endian);
+ },
+ }
+ switch (format) {
+ .none => unreachable,
+ .debug_frame => {
+ try entry.cross_entry_relocs.append(dwarf.gpa, .{
+ .source_off = @intCast(wip_nav.debug_frame.items.len),
+ });
+ try dfw.writeByteNTimes(0, dwarf.sectionOffsetBytes());
+ try entry.external_relocs.append(dwarf.gpa, .{
+ .source_off = @intCast(wip_nav.debug_frame.items.len),
+ .target_sym = sym_index,
+ });
+ try dfw.writeByteNTimes(0, @intFromEnum(dwarf.address_size));
+ try dfw.writeByteNTimes(undefined, @intFromEnum(dwarf.address_size));
+ },
+ .eh_frame => {
+ try dfw.writeInt(u32, undefined, dwarf.endian);
+ try entry.external_relocs.append(dwarf.gpa, .{
+ .source_off = @intCast(wip_nav.debug_frame.items.len),
+ .target_sym = sym_index,
+ });
+ try dfw.writeByteNTimes(0, dwarf.sectionOffsetBytes());
+ try dfw.writeInt(u32, undefined, dwarf.endian);
+ try uleb128(dfw, 0);
+ },
+ }
+ },
+ }
const diw = wip_nav.debug_info.writer(dwarf.gpa);
try wip_nav.abbrevCode(.decl_func);
@@ -1953,49 +2371,84 @@ pub fn finishWipNav(
log.debug("finishWipNav({})", .{nav.fqn.fmt(ip)});
if (wip_nav.func != .none) {
- const external_relocs = &dwarf.debug_info.section.getUnit(wip_nav.unit).getEntry(wip_nav.entry).external_relocs;
- external_relocs.items[wip_nav.func_high_reloc].target_off = sym.size;
- if (wip_nav.any_children) {
- const diw = wip_nav.debug_info.writer(dwarf.gpa);
- try uleb128(diw, @intFromEnum(AbbrevCode.null));
- } else std.leb.writeUnsignedFixed(
- AbbrevCode.decl_bytes,
- wip_nav.debug_info.items[0..AbbrevCode.decl_bytes],
- try dwarf.refAbbrevCode(.decl_empty_func),
- );
-
- var aranges_entry = [1]u8{0} ** (8 + 8);
- try dwarf.debug_aranges.section.getUnit(wip_nav.unit).getEntry(wip_nav.entry).external_relocs.append(dwarf.gpa, .{
- .target_sym = sym.index,
- });
- dwarf.writeInt(aranges_entry[0..@intFromEnum(dwarf.address_size)], 0);
- dwarf.writeInt(aranges_entry[@intFromEnum(dwarf.address_size)..][0..@intFromEnum(dwarf.address_size)], sym.size);
-
- @memset(aranges_entry[0..@intFromEnum(dwarf.address_size)], 0);
- try dwarf.debug_aranges.section.replaceEntry(
- wip_nav.unit,
- wip_nav.entry,
- dwarf,
- aranges_entry[0 .. @intFromEnum(dwarf.address_size) * 2],
- );
-
- try dwarf.debug_rnglists.section.getUnit(wip_nav.unit).getEntry(wip_nav.entry).external_relocs.appendSlice(dwarf.gpa, &.{
- .{
- .source_off = 1,
- .target_sym = sym.index,
- },
- .{
- .source_off = 1 + @intFromEnum(dwarf.address_size),
- .target_sym = sym.index,
- .target_off = sym.size,
+ {
+ const external_relocs = &dwarf.debug_aranges.section.getUnit(wip_nav.unit).getEntry(wip_nav.entry).external_relocs;
+ try external_relocs.append(dwarf.gpa, .{ .target_sym = sym.index });
+ var entry: [8 + 8]u8 = undefined;
+ @memset(entry[0..@intFromEnum(dwarf.address_size)], 0);
+ dwarf.writeInt(entry[@intFromEnum(dwarf.address_size)..][0..@intFromEnum(dwarf.address_size)], sym.size);
+ try dwarf.debug_aranges.section.replaceEntry(
+ wip_nav.unit,
+ wip_nav.entry,
+ dwarf,
+ entry[0 .. @intFromEnum(dwarf.address_size) * 2],
+ );
+ }
+ switch (dwarf.debug_frame.header.format) {
+ .none => {},
+ .debug_frame, .eh_frame => |format| {
+ try wip_nav.debug_frame.appendNTimes(
+ dwarf.gpa,
+ DW.CFA.nop,
+ @intCast(dwarf.debug_frame.section.alignment.forward(wip_nav.debug_frame.items.len) - wip_nav.debug_frame.items.len),
+ );
+ const contents = wip_nav.debug_frame.items;
+ try dwarf.debug_frame.section.resizeEntry(wip_nav.unit, wip_nav.entry, dwarf, @intCast(contents.len));
+ const unit = dwarf.debug_frame.section.getUnit(wip_nav.unit);
+ const entry = unit.getEntry(wip_nav.entry);
+ const unit_len = (if (entry.next.unwrap()) |next_entry|
+ unit.getEntry(next_entry).off - entry.off
+ else
+ entry.len) - dwarf.unitLengthBytes();
+ dwarf.writeInt(contents[dwarf.unitLengthBytes() - dwarf.sectionOffsetBytes() ..][0..dwarf.sectionOffsetBytes()], unit_len);
+ switch (format) {
+ .none => unreachable,
+ .debug_frame => dwarf.writeInt(contents[dwarf.unitLengthBytes() + dwarf.sectionOffsetBytes() +
+ @intFromEnum(dwarf.address_size) ..][0..@intFromEnum(dwarf.address_size)], sym.size),
+ .eh_frame => {
+ std.mem.writeInt(
+ u32,
+ contents[dwarf.unitLengthBytes()..][0..4],
+ unit.header_len + entry.off + dwarf.unitLengthBytes(),
+ dwarf.endian,
+ );
+ std.mem.writeInt(u32, contents[dwarf.unitLengthBytes() + 4 + 4 ..][0..4], @intCast(sym.size), dwarf.endian);
+ },
+ }
+ try entry.replace(unit, &dwarf.debug_frame.section, dwarf, contents);
},
- });
- try dwarf.debug_rnglists.section.replaceEntry(
- wip_nav.unit,
- wip_nav.entry,
- dwarf,
- ([1]u8{DW.RLE.start_end} ++ [1]u8{0} ** (8 + 8))[0 .. 1 + @intFromEnum(dwarf.address_size) + @intFromEnum(dwarf.address_size)],
- );
+ }
+ {
+ const external_relocs = &dwarf.debug_info.section.getUnit(wip_nav.unit).getEntry(wip_nav.entry).external_relocs;
+ external_relocs.items[wip_nav.func_high_reloc].target_off = sym.size;
+ if (wip_nav.any_children) {
+ const diw = wip_nav.debug_info.writer(dwarf.gpa);
+ try uleb128(diw, @intFromEnum(AbbrevCode.null));
+ } else std.leb.writeUnsignedFixed(
+ AbbrevCode.decl_bytes,
+ wip_nav.debug_info.items[0..AbbrevCode.decl_bytes],
+ try dwarf.refAbbrevCode(.decl_empty_func),
+ );
+ }
+ {
+ try dwarf.debug_rnglists.section.getUnit(wip_nav.unit).getEntry(wip_nav.entry).external_relocs.appendSlice(dwarf.gpa, &.{
+ .{
+ .source_off = 1,
+ .target_sym = sym.index,
+ },
+ .{
+ .source_off = 1 + @intFromEnum(dwarf.address_size),
+ .target_sym = sym.index,
+ .target_off = sym.size,
+ },
+ });
+ try dwarf.debug_rnglists.section.replaceEntry(
+ wip_nav.unit,
+ wip_nav.entry,
+ dwarf,
+ ([1]u8{DW.RLE.start_end} ++ [1]u8{0} ** (8 + 8))[0 .. 1 + @intFromEnum(dwarf.address_size) + @intFromEnum(dwarf.address_size)],
+ );
+ }
}
try dwarf.debug_info.section.replaceEntry(wip_nav.unit, wip_nav.entry, dwarf, wip_nav.debug_info.items);
@@ -2038,6 +2491,8 @@ pub fn updateComptimeNav(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPool
.func_sym_index = undefined,
.func_high_reloc = undefined,
.inlined_funcs = undefined,
+ .cfi = undefined,
+ .debug_frame = .{},
.debug_info = .{},
.debug_line = .{},
.debug_loclists = .{},
@@ -2546,6 +3001,8 @@ fn updateType(
.func_sym_index = undefined,
.func_high_reloc = undefined,
.inlined_funcs = undefined,
+ .cfi = undefined,
+ .debug_frame = .{},
.debug_info = .{},
.debug_line = .{},
.debug_loclists = .{},
@@ -2999,6 +3456,8 @@ pub fn updateContainerType(dwarf: *Dwarf, pt: Zcu.PerThread, type_index: InternP
.func_sym_index = undefined,
.func_high_reloc = undefined,
.inlined_funcs = undefined,
+ .cfi = undefined,
+ .debug_frame = .{},
.debug_info = .{},
.debug_line = .{},
.debug_loclists = .{},
@@ -3062,6 +3521,8 @@ pub fn updateContainerType(dwarf: *Dwarf, pt: Zcu.PerThread, type_index: InternP
.func_sym_index = undefined,
.func_high_reloc = undefined,
.inlined_funcs = undefined,
+ .cfi = undefined,
+ .debug_frame = .{},
.debug_info = .{},
.debug_line = .{},
.debug_loclists = .{},
@@ -3255,6 +3716,8 @@ pub fn flushModule(dwarf: *Dwarf, pt: Zcu.PerThread) FlushError!void {
.func_sym_index = undefined,
.func_high_reloc = undefined,
.inlined_funcs = undefined,
+ .cfi = undefined,
+ .debug_frame = .{},
.debug_info = .{},
.debug_line = .{},
.debug_loclists = .{},
@@ -3306,13 +3769,13 @@ pub fn flushModule(dwarf: *Dwarf, pt: Zcu.PerThread) FlushError!void {
else
dwarf.debug_aranges.section.len) - unit_ptr.off - dwarf.unitLengthBytes();
switch (dwarf.format) {
- .@"32" => std.mem.writeInt(u32, header.addManyAsArrayAssumeCapacity(@sizeOf(u32)), @intCast(unit_len), dwarf.endian),
+ .@"32" => std.mem.writeInt(u32, header.addManyAsArrayAssumeCapacity(4), @intCast(unit_len), dwarf.endian),
.@"64" => {
- std.mem.writeInt(u32, header.addManyAsArrayAssumeCapacity(@sizeOf(u32)), std.math.maxInt(u32), dwarf.endian);
- std.mem.writeInt(u64, header.addManyAsArrayAssumeCapacity(@sizeOf(u64)), unit_len, dwarf.endian);
+ std.mem.writeInt(u32, header.addManyAsArrayAssumeCapacity(4), std.math.maxInt(u32), dwarf.endian);
+ std.mem.writeInt(u64, header.addManyAsArrayAssumeCapacity(8), unit_len, dwarf.endian);
},
}
- std.mem.writeInt(u16, header.addManyAsArrayAssumeCapacity(@sizeOf(u16)), 2, dwarf.endian);
+ std.mem.writeInt(u16, header.addManyAsArrayAssumeCapacity(2), 2, dwarf.endian);
unit_ptr.cross_section_relocs.appendAssumeCapacity(.{
.source_off = @intCast(header.items.len),
.target_sec = .debug_info,
@@ -3326,6 +3789,49 @@ pub fn flushModule(dwarf: *Dwarf, pt: Zcu.PerThread) FlushError!void {
}
dwarf.debug_aranges.section.dirty = false;
}
+ if (dwarf.debug_frame.section.dirty) {
+ const target = dwarf.bin_file.comp.root_mod.resolved_target.result;
+ switch (dwarf.debug_frame.header.format) {
+ .none => {},
+ .debug_frame => unreachable,
+ .eh_frame => switch (target.cpu.arch) {
+ .x86_64 => {
+ dev.check(.x86_64_backend);
+ const Register = @import("../arch/x86_64/bits.zig").Register;
+ for (dwarf.debug_frame.section.units.items) |*unit| {
+ header.clearRetainingCapacity();
+ try header.ensureTotalCapacity(unit.header_len);
+ const unit_len = unit.header_len - dwarf.unitLengthBytes();
+ switch (dwarf.format) {
+ .@"32" => std.mem.writeInt(u32, header.addManyAsArrayAssumeCapacity(4), @intCast(unit_len), dwarf.endian),
+ .@"64" => {
+ std.mem.writeInt(u32, header.addManyAsArrayAssumeCapacity(4), std.math.maxInt(u32), dwarf.endian);
+ std.mem.writeInt(u64, header.addManyAsArrayAssumeCapacity(8), unit_len, dwarf.endian);
+ },
+ }
+ header.appendNTimesAssumeCapacity(0, 4);
+ header.appendAssumeCapacity(1);
+ header.appendSliceAssumeCapacity("zR\x00");
+ uleb128(header.fixedWriter(), dwarf.debug_frame.header.code_alignment_factor) catch unreachable;
+ sleb128(header.fixedWriter(), dwarf.debug_frame.header.data_alignment_factor) catch unreachable;
+ uleb128(header.fixedWriter(), dwarf.debug_frame.header.return_address_register) catch unreachable;
+ uleb128(header.fixedWriter(), 1) catch unreachable;
+ header.appendAssumeCapacity(0x10 | 0x08 | 0x03);
+ header.appendAssumeCapacity(DW.CFA.def_cfa_sf);
+ uleb128(header.fixedWriter(), Register.rsp.dwarfNum()) catch unreachable;
+ sleb128(header.fixedWriter(), -1) catch unreachable;
+ header.appendAssumeCapacity(@as(u8, DW.CFA.offset) + Register.rip.dwarfNum());
+ uleb128(header.fixedWriter(), 1) catch unreachable;
+ header.appendNTimesAssumeCapacity(DW.CFA.nop, unit.header_len - header.items.len);
+ try unit.replaceHeader(&dwarf.debug_frame.section, dwarf, header.items);
+ try unit.writeTrailer(&dwarf.debug_frame.section, dwarf);
+ }
+ },
+ else => unreachable,
+ },
+ }
+ dwarf.debug_frame.section.dirty = false;
+ }
if (dwarf.debug_info.section.dirty) {
for (dwarf.mods.keys(), dwarf.mods.values(), dwarf.debug_info.section.units.items, 0..) |mod, mod_info, *unit_ptr, unit_index| {
const unit: Unit.Index = @enumFromInt(unit_index);
@@ -3339,13 +3845,13 @@ pub fn flushModule(dwarf: *Dwarf, pt: Zcu.PerThread) FlushError!void {
else
dwarf.debug_info.section.len) - unit_ptr.off - dwarf.unitLengthBytes();
switch (dwarf.format) {
- .@"32" => std.mem.writeInt(u32, header.addManyAsArrayAssumeCapacity(@sizeOf(u32)), @intCast(unit_len), dwarf.endian),
+ .@"32" => std.mem.writeInt(u32, header.addManyAsArrayAssumeCapacity(4), @intCast(unit_len), dwarf.endian),
.@"64" => {
- std.mem.writeInt(u32, header.addManyAsArrayAssumeCapacity(@sizeOf(u32)), std.math.maxInt(u32), dwarf.endian);
- std.mem.writeInt(u64, header.addManyAsArrayAssumeCapacity(@sizeOf(u64)), unit_len, dwarf.endian);
+ std.mem.writeInt(u32, header.addManyAsArrayAssumeCapacity(4), std.math.maxInt(u32), dwarf.endian);
+ std.mem.writeInt(u64, header.addManyAsArrayAssumeCapacity(8), unit_len, dwarf.endian);
},
}
- std.mem.writeInt(u16, header.addManyAsArrayAssumeCapacity(@sizeOf(u16)), 5, dwarf.endian);
+ std.mem.writeInt(u16, header.addManyAsArrayAssumeCapacity(2), 5, dwarf.endian);
header.appendSliceAssumeCapacity(&.{ DW.UT.compile, @intFromEnum(dwarf.address_size) });
unit_ptr.cross_section_relocs.appendAssumeCapacity(.{
.source_off = @intCast(header.items.len),
@@ -3438,13 +3944,13 @@ pub fn flushModule(dwarf: *Dwarf, pt: Zcu.PerThread) FlushError!void {
else
dwarf.debug_line.section.len) - unit.off - dwarf.unitLengthBytes();
switch (dwarf.format) {
- .@"32" => std.mem.writeInt(u32, header.addManyAsArrayAssumeCapacity(@sizeOf(u32)), @intCast(unit_len), dwarf.endian),
+ .@"32" => std.mem.writeInt(u32, header.addManyAsArrayAssumeCapacity(4), @intCast(unit_len), dwarf.endian),
.@"64" => {
- std.mem.writeInt(u32, header.addManyAsArrayAssumeCapacity(@sizeOf(u32)), std.math.maxInt(u32), dwarf.endian);
- std.mem.writeInt(u64, header.addManyAsArrayAssumeCapacity(@sizeOf(u64)), unit_len, dwarf.endian);
+ std.mem.writeInt(u32, header.addManyAsArrayAssumeCapacity(4), std.math.maxInt(u32), dwarf.endian);
+ std.mem.writeInt(u64, header.addManyAsArrayAssumeCapacity(8), unit_len, dwarf.endian);
},
}
- std.mem.writeInt(u16, header.addManyAsArrayAssumeCapacity(@sizeOf(u16)), 5, dwarf.endian);
+ std.mem.writeInt(u16, header.addManyAsArrayAssumeCapacity(2), 5, dwarf.endian);
header.appendSliceAssumeCapacity(&.{ @intFromEnum(dwarf.address_size), 0 });
dwarf.writeInt(header.addManyAsSliceAssumeCapacity(dwarf.sectionOffsetBytes()), unit.header_len - header.items.len);
const StandardOpcode = DeclValEnum(DW.LNS);
@@ -3540,15 +4046,15 @@ pub fn flushModule(dwarf: *Dwarf, pt: Zcu.PerThread) FlushError!void {
else
dwarf.debug_rnglists.section.len) - unit.off - dwarf.unitLengthBytes();
switch (dwarf.format) {
- .@"32" => std.mem.writeInt(u32, header.addManyAsArrayAssumeCapacity(@sizeOf(u32)), @intCast(unit_len), dwarf.endian),
+ .@"32" => std.mem.writeInt(u32, header.addManyAsArrayAssumeCapacity(4), @intCast(unit_len), dwarf.endian),
.@"64" => {
- std.mem.writeInt(u32, header.addManyAsArrayAssumeCapacity(@sizeOf(u32)), std.math.maxInt(u32), dwarf.endian);
- std.mem.writeInt(u64, header.addManyAsArrayAssumeCapacity(@sizeOf(u64)), unit_len, dwarf.endian);
+ std.mem.writeInt(u32, header.addManyAsArrayAssumeCapacity(4), std.math.maxInt(u32), dwarf.endian);
+ std.mem.writeInt(u64, header.addManyAsArrayAssumeCapacity(8), unit_len, dwarf.endian);
},
}
- std.mem.writeInt(u16, header.addManyAsArrayAssumeCapacity(@sizeOf(u16)), 5, dwarf.endian);
+ std.mem.writeInt(u16, header.addManyAsArrayAssumeCapacity(2), 5, dwarf.endian);
header.appendSliceAssumeCapacity(&.{ @intFromEnum(dwarf.address_size), 0 });
- std.mem.writeInt(u32, header.addManyAsArrayAssumeCapacity(@sizeOf(u32)), 1, dwarf.endian);
+ std.mem.writeInt(u32, header.addManyAsArrayAssumeCapacity(4), 1, dwarf.endian);
dwarf.writeInt(header.addManyAsSliceAssumeCapacity(dwarf.sectionOffsetBytes()), dwarf.sectionOffsetBytes() * 1);
try unit.replaceHeader(&dwarf.debug_rnglists.section, dwarf, header.items);
try unit.writeTrailer(&dwarf.debug_rnglists.section, dwarf);
@@ -3557,6 +4063,7 @@ pub fn flushModule(dwarf: *Dwarf, pt: Zcu.PerThread) FlushError!void {
}
assert(!dwarf.debug_abbrev.section.dirty);
assert(!dwarf.debug_aranges.section.dirty);
+ assert(!dwarf.debug_frame.section.dirty);
assert(!dwarf.debug_info.section.dirty);
assert(!dwarf.debug_line.section.dirty);
assert(!dwarf.debug_line_str.section.dirty);
@@ -3569,6 +4076,7 @@ pub fn resolveRelocs(dwarf: *Dwarf) RelocError!void {
for ([_]*Section{
&dwarf.debug_abbrev.section,
&dwarf.debug_aranges.section,
+ &dwarf.debug_frame.section,
&dwarf.debug_info.section,
&dwarf.debug_line.section,
&dwarf.debug_line_str.section,
@@ -4147,6 +4655,7 @@ fn getFile(dwarf: *Dwarf) ?std.fs.File {
fn addCommonEntry(dwarf: *Dwarf, unit: Unit.Index) UpdateError!Entry.Index {
const entry = try dwarf.debug_aranges.section.getUnit(unit).addEntry(dwarf.gpa);
+ assert(try dwarf.debug_frame.section.getUnit(unit).addEntry(dwarf.gpa) == entry);
assert(try dwarf.debug_info.section.getUnit(unit).addEntry(dwarf.gpa) == entry);
assert(try dwarf.debug_line.section.getUnit(unit).addEntry(dwarf.gpa) == entry);
assert(try dwarf.debug_loclists.section.getUnit(unit).addEntry(dwarf.gpa) == entry);
@@ -4190,6 +4699,12 @@ fn uleb128Bytes(value: anytype) u32 {
return @intCast(cw.bytes_written);
}
+fn sleb128Bytes(value: anytype) u32 {
+ var cw = std.io.countingWriter(std.io.null_writer);
+ try sleb128(cw.writer(), value);
+ return @intCast(cw.bytes_written);
+}
+
/// overrides `-fno-incremental` for testing incremental debug info until `-fincremental` is functional
const force_incremental = false;
inline fn incremental(dwarf: Dwarf) bool {
@@ -4206,6 +4721,7 @@ const Zcu = @import("../Zcu.zig");
const Zir = std.zig.Zir;
const assert = std.debug.assert;
const codegen = @import("../codegen.zig");
+const dev = @import("../dev.zig");
const link = @import("../link.zig");
const log = std.log.scoped(.dwarf);
const sleb128 = std.leb.writeIleb128;
src/link/Elf.zig
@@ -569,9 +569,7 @@ pub fn growAllocSection(self: *Elf, shdr_index: u32, needed_size: u64) !void {
if (shdr.sh_type != elf.SHT_NOBITS) {
const allocated_size = self.allocatedSize(shdr.sh_offset);
- if (shdr.sh_offset + allocated_size == std.math.maxInt(u64)) {
- try self.base.file.?.setEndPos(shdr.sh_offset + needed_size);
- } else if (needed_size > allocated_size) {
+ if (needed_size > allocated_size) {
const existing_size = shdr.sh_size;
shdr.sh_size = 0;
// Must move the entire section.
@@ -590,6 +588,8 @@ pub fn growAllocSection(self: *Elf, shdr_index: u32, needed_size: u64) !void {
shdr.sh_offset = new_offset;
if (maybe_phdr) |phdr| phdr.p_offset = new_offset;
+ } else if (shdr.sh_offset + allocated_size == std.math.maxInt(u64)) {
+ try self.base.file.?.setEndPos(shdr.sh_offset + needed_size);
}
if (maybe_phdr) |phdr| phdr.p_filesz = needed_size;
}
@@ -621,9 +621,7 @@ pub fn growNonAllocSection(
assert(shdr.sh_flags & elf.SHF_ALLOC == 0);
const allocated_size = self.allocatedSize(shdr.sh_offset);
- if (shdr.sh_offset + allocated_size == std.math.maxInt(u64)) {
- try self.base.file.?.setEndPos(shdr.sh_offset + needed_size);
- } else if (needed_size > allocated_size) {
+ if (needed_size > allocated_size) {
const existing_size = shdr.sh_size;
shdr.sh_size = 0;
// Move all the symbols to a new file location.
@@ -646,6 +644,8 @@ pub fn growNonAllocSection(
}
shdr.sh_offset = new_offset;
+ } else if (shdr.sh_offset + allocated_size == std.math.maxInt(u64)) {
+ try self.base.file.?.setEndPos(shdr.sh_offset + needed_size);
}
shdr.sh_size = needed_size;
@@ -699,7 +699,7 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod
const sub_prog_node = prog_node.start("ELF Flush", 0);
defer sub_prog_node.end();
- const target = comp.root_mod.resolved_target.result;
+ const target = self.getTarget();
const link_mode = comp.config.link_mode;
const directory = self.base.emit.root_dir; // Just an alias to make it shorter to type.
const full_out_path = try directory.join(arena, &[_][]const u8{self.base.emit.sub_path});
@@ -1053,7 +1053,7 @@ fn dumpArgv(self: *Elf, comp: *Compilation) !void {
defer arena_allocator.deinit();
const arena = arena_allocator.allocator();
- const target = self.base.comp.root_mod.resolved_target.result;
+ const target = self.getTarget();
const link_mode = self.base.comp.config.link_mode;
const directory = self.base.emit.root_dir; // Just an alias to make it shorter to type.
const full_out_path = try directory.join(arena, &[_][]const u8{self.base.emit.sub_path});
@@ -1498,15 +1498,13 @@ fn parseLdScript(self: *Elf, lib: SystemLib) ParseError!void {
}
pub fn validateEFlags(self: *Elf, file_index: File.Index, e_flags: elf.Elf64_Word) !void {
- const target = self.base.comp.root_mod.resolved_target.result;
-
if (self.first_eflags == null) {
self.first_eflags = e_flags;
return; // there isn't anything to conflict with yet
}
const self_eflags: *elf.Elf64_Word = &self.first_eflags.?;
- switch (target.cpu.arch) {
+ switch (self.getTarget().cpu.arch) {
.riscv64 => {
if (e_flags != self_eflags.*) {
const riscv_eflags: riscv.RiscvEflags = @bitCast(e_flags);
@@ -1549,7 +1547,7 @@ fn accessLibPath(
link_mode: ?std.builtin.LinkMode,
) !bool {
const sep = fs.path.sep_str;
- const target = self.base.comp.root_mod.resolved_target.result;
+ const target = self.getTarget();
test_path.clearRetainingCapacity();
const prefix = if (link_mode != null) "lib" else "";
const suffix = if (link_mode) |mode| switch (mode) {
@@ -1779,7 +1777,7 @@ fn linkWithLLD(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: s
const is_exe_or_dyn_lib = is_dyn_lib or output_mode == .Exe;
const have_dynamic_linker = comp.config.link_libc and
link_mode == .dynamic and is_exe_or_dyn_lib;
- const target = comp.root_mod.resolved_target.result;
+ const target = self.getTarget();
const compiler_rt_path: ?[]const u8 = blk: {
if (comp.compiler_rt_lib) |x| break :blk x.full_object_path;
if (comp.compiler_rt_obj) |x| break :blk x.full_object_path;
@@ -2353,8 +2351,7 @@ fn linkWithLLD(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: s
pub fn writeShdrTable(self: *Elf) !void {
const gpa = self.base.comp.gpa;
- const target = self.base.comp.root_mod.resolved_target.result;
- const target_endian = target.cpu.arch.endian();
+ const target_endian = self.getTarget().cpu.arch.endian();
const foreign_endian = target_endian != builtin.cpu.arch.endian();
const shsize: u64 = switch (self.ptr_width) {
.p32 => @sizeOf(elf.Elf32_Shdr),
@@ -2410,8 +2407,7 @@ pub fn writeShdrTable(self: *Elf) !void {
fn writePhdrTable(self: *Elf) !void {
const gpa = self.base.comp.gpa;
- const target = self.base.comp.root_mod.resolved_target.result;
- const target_endian = target.cpu.arch.endian();
+ const target_endian = self.getTarget().cpu.arch.endian();
const foreign_endian = target_endian != builtin.cpu.arch.endian();
const phdr_table = &self.phdrs.items[self.phdr_table_index.?];
@@ -2464,7 +2460,7 @@ pub fn writeElfHeader(self: *Elf) !void {
};
index += 1;
- const target = comp.root_mod.resolved_target.result;
+ const target = self.getTarget();
const endian = target.cpu.arch.endian();
hdr_buf[index] = switch (endian) {
.little => elf.ELFDATA2LSB,
@@ -2772,21 +2768,25 @@ fn initOutputSections(self: *Elf) !void {
fn initSyntheticSections(self: *Elf) !void {
const comp = self.base.comp;
- const target = comp.root_mod.resolved_target.result;
+ const target = self.getTarget();
const ptr_size = self.ptrWidthBytes();
const needs_eh_frame = for (self.objects.items) |index| {
if (self.file(index).?.object.cies.items.len > 0) break true;
} else false;
if (needs_eh_frame) {
- self.eh_frame_section_index = try self.addSection(.{
- .name = try self.insertShString(".eh_frame"),
- .type = elf.SHT_PROGBITS,
- .flags = elf.SHF_ALLOC,
- .addralign = ptr_size,
- .offset = std.math.maxInt(u64),
- });
-
+ if (self.eh_frame_section_index == null) {
+ self.eh_frame_section_index = try self.addSection(.{
+ .name = try self.insertShString(".eh_frame"),
+ .type = if (target.cpu.arch == .x86_64)
+ elf.SHT_X86_64_UNWIND
+ else
+ elf.SHT_PROGBITS,
+ .flags = elf.SHF_ALLOC,
+ .addralign = ptr_size,
+ .offset = std.math.maxInt(u64),
+ });
+ }
if (comp.link_eh_frame_hdr) {
self.eh_frame_hdr_section_index = try self.addSection(.{
.name = try self.insertShString(".eh_frame_hdr"),
@@ -3446,7 +3446,6 @@ fn resetShdrIndexes(self: *Elf, backlinks: []const u32) void {
}
fn updateSectionSizes(self: *Elf) !void {
- const target = self.base.comp.root_mod.resolved_target.result;
const slice = self.sections.slice();
for (slice.items(.shdr), slice.items(.atom_list)) |*shdr, atom_list| {
if (atom_list.items.len == 0) continue;
@@ -3474,7 +3473,11 @@ fn updateSectionSizes(self: *Elf) !void {
const shdrs = slice.items(.shdr);
if (self.eh_frame_section_index) |index| {
- shdrs[index].sh_size = try eh_frame.calcEhFrameSize(self);
+ shdrs[index].sh_size = existing_size: {
+ const zo = self.zigObjectPtr() orelse break :existing_size 0;
+ const sym = zo.symbol(zo.eh_frame_index orelse break :existing_size 0);
+ break :existing_size sym.atom(self).?.size;
+ } + try eh_frame.calcEhFrameSize(self);
}
if (self.eh_frame_hdr_section_index) |index| {
@@ -3517,7 +3520,7 @@ fn updateSectionSizes(self: *Elf) !void {
}
if (self.interp_section_index) |index| {
- shdrs[index].sh_size = target.dynamic_linker.get().?.len + 1;
+ shdrs[index].sh_size = self.getTarget().dynamic_linker.get().?.len + 1;
}
if (self.hash_section_index) |index| {
@@ -3759,10 +3762,10 @@ pub fn allocateAllocSections(self: *Elf) !void {
}
const first = slice.items(.shdr)[cover.items[0]];
- var off = try self.findFreeSpace(filesz, @"align");
+ var new_offset = try self.findFreeSpace(filesz, @"align");
const phndx = try self.addPhdr(.{
.type = elf.PT_LOAD,
- .offset = off,
+ .offset = new_offset,
.addr = first.sh_addr,
.memsz = memsz,
.filesz = filesz,
@@ -3777,9 +3780,28 @@ pub fn allocateAllocSections(self: *Elf) !void {
shdr.sh_offset = 0;
continue;
}
- off = alignment.@"align"(shndx, shdr.sh_addralign, off);
- shdr.sh_offset = off;
- off += shdr.sh_size;
+ new_offset = alignment.@"align"(shndx, shdr.sh_addralign, new_offset);
+
+ if (shndx == self.eh_frame_section_index) eh_frame: {
+ const zo = self.zigObjectPtr() orelse break :eh_frame;
+ const sym = zo.symbol(zo.eh_frame_index orelse break :eh_frame);
+ const existing_size = sym.atom(self).?.size;
+ log.debug("moving {s} from 0x{x} to 0x{x}", .{
+ self.getShString(shdr.sh_name),
+ shdr.sh_offset,
+ new_offset,
+ });
+ const amt = try self.base.file.?.copyRangeAll(
+ shdr.sh_offset,
+ self.base.file.?,
+ new_offset,
+ existing_size,
+ );
+ if (amt != existing_size) return error.InputOutput;
+ }
+
+ shdr.sh_offset = new_offset;
+ new_offset += shdr.sh_size;
}
addr = mem.alignForward(u64, addr, self.page_size);
@@ -3910,9 +3932,9 @@ fn writeAtoms(self: *Elf) !void {
log.debug("writing atoms in '{s}' section", .{self.getShString(shdr.sh_name)});
// TODO really, really handle debug section separately
- const base_offset = if (self.isDebugSection(@intCast(shndx))) blk: {
+ const base_offset = if (self.isDebugSection(@intCast(shndx))) base_offset: {
const zo = self.zigObjectPtr().?;
- break :blk for ([_]Symbol.Index{
+ for ([_]Symbol.Index{
zo.debug_info_index.?,
zo.debug_abbrev_index.?,
zo.debug_aranges_index.?,
@@ -3924,8 +3946,13 @@ fn writeAtoms(self: *Elf) !void {
}) |sym_index| {
const sym = zo.symbol(sym_index);
const atom_ptr = sym.atom(self).?;
- if (atom_ptr.output_section_index == shndx) break atom_ptr.size;
- } else 0;
+ if (atom_ptr.output_section_index == shndx) break :base_offset atom_ptr.size;
+ }
+ break :base_offset 0;
+ } else if (@as(u32, @intCast(shndx)) == self.eh_frame_section_index) base_offset: {
+ const zo = self.zigObjectPtr() orelse break :base_offset 0;
+ const sym = zo.symbol(zo.eh_frame_index orelse break :base_offset 0);
+ break :base_offset sym.atom(self).?.size;
} else 0;
const sh_offset = shdr.sh_offset + base_offset;
const sh_size = math.cast(usize, shdr.sh_size - base_offset) orelse return error.Overflow;
@@ -4082,12 +4109,11 @@ pub fn updateSymtabSize(self: *Elf) !void {
fn writeSyntheticSections(self: *Elf) !void {
const gpa = self.base.comp.gpa;
- const target = self.getTarget();
const slice = self.sections.slice();
if (self.interp_section_index) |shndx| {
var buffer: [256]u8 = undefined;
- const interp = target.dynamic_linker.get().?;
+ const interp = self.getTarget().dynamic_linker.get().?;
@memcpy(buffer[0..interp.len], interp);
buffer[interp.len] = 0;
const contents = buffer[0 .. interp.len + 1];
@@ -4144,12 +4170,18 @@ fn writeSyntheticSections(self: *Elf) !void {
}
if (self.eh_frame_section_index) |shndx| {
+ const existing_size = existing_size: {
+ const zo = self.zigObjectPtr() orelse break :existing_size 0;
+ const sym = zo.symbol(zo.eh_frame_index orelse break :existing_size 0);
+ break :existing_size sym.atom(self).?.size;
+ };
const shdr = slice.items(.shdr)[shndx];
const sh_size = math.cast(usize, shdr.sh_size) orelse return error.Overflow;
- var buffer = try std.ArrayList(u8).initCapacity(gpa, sh_size);
+ var buffer = try std.ArrayList(u8).initCapacity(gpa, @intCast(sh_size - existing_size));
defer buffer.deinit();
try eh_frame.writeEhFrame(self, buffer.writer());
- try self.base.file.?.pwriteAll(buffer.items, shdr.sh_offset);
+ assert(buffer.items.len == sh_size - existing_size);
+ try self.base.file.?.pwriteAll(buffer.items, shdr.sh_offset + existing_size);
}
if (self.eh_frame_hdr_section_index) |shndx| {
@@ -4222,7 +4254,6 @@ pub fn writeShStrtab(self: *Elf) !void {
pub fn writeSymtab(self: *Elf) !void {
const gpa = self.base.comp.gpa;
- const target = self.getTarget();
const slice = self.sections.slice();
const symtab_shdr = slice.items(.shdr)[self.symtab_section_index.?];
const strtab_shdr = slice.items(.shdr)[self.strtab_section_index.?];
@@ -4292,7 +4323,7 @@ pub fn writeSymtab(self: *Elf) !void {
self.plt_got.writeSymtab(self);
}
- const foreign_endian = target.cpu.arch.endian() != builtin.cpu.arch.endian();
+ const foreign_endian = self.getTarget().cpu.arch.endian() != builtin.cpu.arch.endian();
switch (self.ptr_width) {
.p32 => {
const buf = try gpa.alloc(elf.Elf32_Sym, self.symtab.items.len);
@@ -4630,10 +4661,8 @@ pub fn isZigSection(self: Elf, shndx: u32) bool {
self.zig_data_rel_ro_section_index,
self.zig_data_section_index,
self.zig_bss_section_index,
- }) |maybe_index| {
- if (maybe_index) |index| {
- if (index == shndx) return true;
- }
+ }) |index| {
+ if (index == shndx) return true;
}
return false;
}
@@ -4648,10 +4677,8 @@ pub fn isDebugSection(self: Elf, shndx: u32) bool {
self.debug_line_str_section_index,
self.debug_loclists_section_index,
self.debug_rnglists_section_index,
- }) |maybe_index| {
- if (maybe_index) |index| {
- if (index == shndx) return true;
- }
+ }) |index| {
+ if (index == shndx) return true;
}
return false;
}
src/link/MachO.zig
@@ -3411,9 +3411,7 @@ fn growSectionNonRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !vo
if (!sect.isZerofill()) {
const allocated_size = self.allocatedSize(sect.offset);
- if (sect.offset + allocated_size == std.math.maxInt(u64)) {
- try self.base.file.?.setEndPos(sect.offset + needed_size);
- } else if (needed_size > allocated_size) {
+ if (needed_size > allocated_size) {
const existing_size = sect.size;
sect.size = 0;
@@ -3431,6 +3429,8 @@ fn growSectionNonRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !vo
try self.copyRangeAllZeroOut(sect.offset, new_offset, existing_size);
sect.offset = @intCast(new_offset);
+ } else if (sect.offset + allocated_size == std.math.maxInt(u64)) {
+ try self.base.file.?.setEndPos(sect.offset + needed_size);
}
seg.filesize = needed_size;
}
@@ -3456,9 +3456,7 @@ fn growSectionRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !void
if (!sect.isZerofill()) {
const allocated_size = self.allocatedSize(sect.offset);
- if (sect.offset + allocated_size == std.math.maxInt(u64)) {
- try self.base.file.?.setEndPos(sect.offset + needed_size);
- } else if (needed_size > allocated_size) {
+ if (needed_size > allocated_size) {
const existing_size = sect.size;
sect.size = 0;
@@ -3480,6 +3478,8 @@ fn growSectionRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !void
sect.offset = @intCast(new_offset);
sect.addr = new_addr;
+ } else if (sect.offset + allocated_size == std.math.maxInt(u64)) {
+ try self.base.file.?.setEndPos(sect.offset + needed_size);
}
}
sect.size = needed_size;