master
1pub const Fde = struct {
2 /// Includes 4byte size cell.
3 offset: usize,
4 size: usize,
5 cie_index: u32,
6 rel_index: u32 = 0,
7 rel_num: u32 = 0,
8 input_section_index: u32 = 0,
9 file_index: u32 = 0,
10 alive: bool = true,
11 /// Includes 4byte size cell.
12 out_offset: u64 = 0,
13
14 pub fn address(fde: Fde, elf_file: *Elf) u64 {
15 const base: u64 = if (elf_file.section_indexes.eh_frame) |shndx|
16 elf_file.sections.items(.shdr)[shndx].sh_addr
17 else
18 0;
19 return base + fde.out_offset;
20 }
21
22 pub fn data(fde: Fde, object: *Object) []u8 {
23 return object.eh_frame_data.items[fde.offset..][0..fde.calcSize()];
24 }
25
26 pub fn cie(fde: Fde, object: *Object) Cie {
27 return object.cies.items[fde.cie_index];
28 }
29
30 pub fn ciePointer(fde: Fde, object: *Object) u32 {
31 const fde_data = fde.data(object);
32 return std.mem.readInt(u32, fde_data[4..8], .little);
33 }
34
35 pub fn calcSize(fde: Fde) usize {
36 return fde.size + 4;
37 }
38
39 pub fn atom(fde: Fde, object: *Object) *Atom {
40 const rel = fde.relocs(object)[0];
41 const sym = object.symtab.items[rel.r_sym()];
42 const atom_index = object.atoms_indexes.items[sym.st_shndx];
43 return object.atom(atom_index).?;
44 }
45
46 pub fn relocs(fde: Fde, object: *Object) []const elf.Elf64_Rela {
47 return object.relocs.items[fde.rel_index..][0..fde.rel_num];
48 }
49
50 pub fn fmt(fde: Fde, elf_file: *Elf) std.fmt.Alt(Format, Format.default) {
51 return .{ .data = .{
52 .fde = fde,
53 .elf_file = elf_file,
54 } };
55 }
56
57 const Format = struct {
58 fde: Fde,
59 elf_file: *Elf,
60
61 fn default(f: Format, writer: *std.Io.Writer) std.Io.Writer.Error!void {
62 const fde = f.fde;
63 const elf_file = f.elf_file;
64 const base_addr = fde.address(elf_file);
65 const object = elf_file.file(fde.file_index).?.object;
66 const atom_name = fde.atom(object).name(elf_file);
67 try writer.print("@{x} : size({x}) : cie({d}) : {s}", .{
68 base_addr + fde.out_offset,
69 fde.calcSize(),
70 fde.cie_index,
71 atom_name,
72 });
73 if (!fde.alive) try writer.writeAll(" : [*]");
74 }
75 };
76};
77
78pub const Cie = struct {
79 /// Includes 4byte size cell.
80 offset: usize,
81 size: usize,
82 rel_index: u32 = 0,
83 rel_num: u32 = 0,
84 input_section_index: u32 = 0,
85 file_index: u32 = 0,
86 /// Includes 4byte size cell.
87 out_offset: u64 = 0,
88 alive: bool = false,
89
90 pub fn address(cie: Cie, elf_file: *Elf) u64 {
91 const base: u64 = if (elf_file.section_indexes.eh_frame) |shndx|
92 elf_file.sections.items(.shdr)[shndx].sh_addr
93 else
94 0;
95 return base + cie.out_offset;
96 }
97
98 pub fn data(cie: Cie, elf_file: *Elf) []u8 {
99 const object = elf_file.file(cie.file_index).?.object;
100 return object.eh_frame_data.items[cie.offset..][0..cie.calcSize()];
101 }
102
103 pub fn calcSize(cie: Cie) usize {
104 return cie.size + 4;
105 }
106
107 pub fn relocs(cie: Cie, elf_file: *Elf) []align(1) const elf.Elf64_Rela {
108 const object = elf_file.file(cie.file_index).?.object;
109 return object.relocs.items[cie.rel_index..][0..cie.rel_num];
110 }
111
112 pub fn eql(cie: Cie, other: Cie, elf_file: *Elf) bool {
113 if (!std.mem.eql(u8, cie.data(elf_file), other.data(elf_file))) return false;
114
115 const cie_relocs = cie.relocs(elf_file);
116 const other_relocs = other.relocs(elf_file);
117 if (cie_relocs.len != other_relocs.len) return false;
118
119 for (cie_relocs, other_relocs) |cie_rel, other_rel| {
120 if (cie_rel.r_offset - cie.offset != other_rel.r_offset - other.offset) return false;
121 if (cie_rel.r_type() != other_rel.r_type()) return false;
122 if (cie_rel.r_addend != other_rel.r_addend) return false;
123
124 const cie_object = elf_file.file(cie.file_index).?.object;
125 const cie_ref = cie_object.resolveSymbol(cie_rel.r_sym(), elf_file);
126 const other_object = elf_file.file(other.file_index).?.object;
127 const other_ref = other_object.resolveSymbol(other_rel.r_sym(), elf_file);
128 if (!cie_ref.eql(other_ref)) return false;
129 }
130 return true;
131 }
132
133 pub fn fmt(cie: Cie, elf_file: *Elf) std.fmt.Alt(Format, Format.default) {
134 return .{ .data = .{
135 .cie = cie,
136 .elf_file = elf_file,
137 } };
138 }
139
140 const Format = struct {
141 cie: Cie,
142 elf_file: *Elf,
143
144 fn default(f: Format, writer: *std.Io.Writer) std.Io.Writer.Error!void {
145 const cie = f.cie;
146 const elf_file = f.elf_file;
147 const base_addr = cie.address(elf_file);
148 try writer.print("@{x} : size({x})", .{
149 base_addr + cie.out_offset,
150 cie.calcSize(),
151 });
152 if (!cie.alive) try writer.writeAll(" : [*]");
153 }
154 };
155};
156
157pub const Iterator = struct {
158 data: []const u8,
159 pos: usize = 0,
160
161 pub const Record = struct {
162 tag: enum { fde, cie },
163 offset: usize,
164 size: usize,
165 };
166
167 pub fn next(it: *Iterator) !?Record {
168 if (it.pos >= it.data.len) return null;
169
170 var reader: std.Io.Reader = .fixed(it.data[it.pos..]);
171
172 const size = try reader.takeInt(u32, .little);
173 if (size == 0) return null;
174 if (size == 0xFFFFFFFF) @panic("TODO");
175
176 const id = try reader.takeInt(u32, .little);
177 const record: Record = .{
178 .tag = if (id == 0) .cie else .fde,
179 .offset = it.pos,
180 .size = size,
181 };
182 it.pos += size + 4;
183
184 return record;
185 }
186};
187
188pub fn calcEhFrameSize(elf_file: *Elf) !usize {
189 const comp = elf_file.base.comp;
190 const gpa = comp.gpa;
191
192 var offset: usize = if (elf_file.zigObjectPtr()) |zo| blk: {
193 const sym = zo.symbol(zo.eh_frame_index orelse break :blk 0);
194 break :blk math.cast(usize, sym.atom(elf_file).?.size) orelse return error.Overflow;
195 } else 0;
196
197 var cies = std.array_list.Managed(Cie).init(gpa);
198 defer cies.deinit();
199
200 for (elf_file.objects.items) |index| {
201 const object = elf_file.file(index).?.object;
202
203 outer: for (object.cies.items) |*cie| {
204 for (cies.items) |other| {
205 if (other.eql(cie.*, elf_file)) {
206 // We already have a CIE record that has the exact same contents, so instead of
207 // duplicating them, we mark this one dead and set its output offset to be
208 // equal to that of the alive record. This way, we won't have to rewrite
209 // Fde.cie_index field when committing the records to file.
210 cie.out_offset = other.out_offset;
211 continue :outer;
212 }
213 }
214 cie.alive = true;
215 cie.out_offset = offset;
216 offset += cie.calcSize();
217 try cies.append(cie.*);
218 }
219 }
220
221 for (elf_file.objects.items) |index| {
222 const object = elf_file.file(index).?.object;
223 for (object.fdes.items) |*fde| {
224 if (!fde.alive) continue;
225 fde.out_offset = offset;
226 offset += fde.calcSize();
227 }
228 }
229
230 if (!elf_file.base.isRelocatable()) {
231 offset += 4; // NULL terminator
232 }
233
234 return offset;
235}
236
237fn haveEhFrameHdrSearchTable(elf_file: *Elf) bool {
238 // Seach table generation is not implemented for the ZigObject. Also, it would be wasteful to
239 // re-do this work on every single incremental update.
240 return elf_file.zigObjectPtr() == null;
241}
242
243pub fn calcEhFrameHdrSize(elf_file: *Elf) usize {
244 if (!haveEhFrameHdrSearchTable(elf_file)) return 8;
245 var count: usize = 0;
246 for (elf_file.objects.items) |index| {
247 for (elf_file.file(index).?.object.fdes.items) |fde| {
248 if (!fde.alive) continue;
249 count += 1;
250 }
251 }
252 return 12 + count * 8;
253}
254
255pub fn calcEhFrameRelocs(elf_file: *Elf) usize {
256 var count: usize = 0;
257 if (elf_file.zigObjectPtr()) |zo| zo: {
258 const sym_index = zo.eh_frame_index orelse break :zo;
259 const sym = zo.symbol(sym_index);
260 const atom_ptr = zo.atom(sym.ref.index).?;
261 if (!atom_ptr.alive) break :zo;
262 count += atom_ptr.relocs(elf_file).len;
263 }
264 for (elf_file.objects.items) |index| {
265 const object = elf_file.file(index).?.object;
266 for (object.cies.items) |cie| {
267 if (!cie.alive) continue;
268 count += cie.relocs(elf_file).len;
269 }
270 for (object.fdes.items) |fde| {
271 if (!fde.alive) continue;
272 count += fde.relocs(object).len;
273 }
274 }
275 return count;
276}
277
278fn resolveReloc(rec: anytype, sym: *const Symbol, rel: elf.Elf64_Rela, elf_file: *Elf, contents: []u8) !void {
279 const cpu_arch = elf_file.getTarget().cpu.arch;
280 const offset = std.math.cast(usize, rel.r_offset - rec.offset) orelse return error.Overflow;
281 const P = math.cast(i64, rec.address(elf_file) + offset) orelse return error.Overflow;
282 const S = math.cast(i64, sym.address(.{}, elf_file)) orelse return error.Overflow;
283 const A = rel.r_addend;
284
285 relocs_log.debug(" {f}: {x}: [{x} => {x}] ({s})", .{
286 relocation.fmtRelocType(rel.r_type(), cpu_arch),
287 offset,
288 P,
289 S + A,
290 sym.name(elf_file),
291 });
292
293 switch (cpu_arch) {
294 .x86_64 => try x86_64.resolveReloc(rec, elf_file, rel, P, S + A, contents[offset..]),
295 .aarch64, .aarch64_be => try aarch64.resolveReloc(rec, elf_file, rel, P, S + A, contents[offset..]),
296 .riscv64, .riscv64be => try riscv.resolveReloc(rec, elf_file, rel, P, S + A, contents[offset..]),
297 else => return error.UnsupportedCpuArch,
298 }
299}
300
301pub fn writeEhFrame(elf_file: *Elf, writer: anytype) !void {
302 relocs_log.debug("{x}: .eh_frame", .{
303 elf_file.sections.items(.shdr)[elf_file.section_indexes.eh_frame.?].sh_addr,
304 });
305
306 var has_reloc_errors = false;
307
308 for (elf_file.objects.items) |index| {
309 const object = elf_file.file(index).?.object;
310
311 for (object.cies.items) |cie| {
312 if (!cie.alive) continue;
313
314 const contents = cie.data(elf_file);
315
316 for (cie.relocs(elf_file)) |rel| {
317 const ref = object.resolveSymbol(rel.r_sym(), elf_file);
318 const sym = elf_file.symbol(ref).?;
319 resolveReloc(cie, sym, rel, elf_file, contents) catch |err| switch (err) {
320 error.RelocFailure => has_reloc_errors = true,
321 else => |e| return e,
322 };
323 }
324
325 try writer.writeAll(contents);
326 }
327 }
328
329 for (elf_file.objects.items) |index| {
330 const object = elf_file.file(index).?.object;
331
332 for (object.fdes.items) |fde| {
333 if (!fde.alive) continue;
334
335 const contents = fde.data(object);
336
337 std.mem.writeInt(
338 i32,
339 contents[4..8],
340 @truncate(@as(i64, @intCast(fde.out_offset + 4)) - @as(i64, @intCast(fde.cie(object).out_offset))),
341 .little,
342 );
343
344 for (fde.relocs(object)) |rel| {
345 const ref = object.resolveSymbol(rel.r_sym(), elf_file);
346 const sym = elf_file.symbol(ref).?;
347 resolveReloc(fde, sym, rel, elf_file, contents) catch |err| switch (err) {
348 error.RelocFailure => has_reloc_errors = true,
349 else => |e| return e,
350 };
351 }
352
353 try writer.writeAll(contents);
354 }
355 }
356
357 try writer.writeInt(u32, 0, .little);
358
359 if (has_reloc_errors) return error.RelocFailure;
360}
361
362pub fn writeEhFrameRelocatable(elf_file: *Elf, writer: anytype) !void {
363 for (elf_file.objects.items) |index| {
364 const object = elf_file.file(index).?.object;
365
366 for (object.cies.items) |cie| {
367 if (!cie.alive) continue;
368 try writer.writeAll(cie.data(elf_file));
369 }
370 }
371
372 for (elf_file.objects.items) |index| {
373 const object = elf_file.file(index).?.object;
374
375 for (object.fdes.items) |fde| {
376 if (!fde.alive) continue;
377
378 const contents = fde.data(object);
379
380 std.mem.writeInt(
381 i32,
382 contents[4..8],
383 @truncate(@as(i64, @intCast(fde.out_offset + 4)) - @as(i64, @intCast(fde.cie(object).out_offset))),
384 .little,
385 );
386
387 try writer.writeAll(contents);
388 }
389 }
390}
391
392fn emitReloc(elf_file: *Elf, r_offset: u64, sym: *const Symbol, rel: elf.Elf64_Rela) elf.Elf64_Rela {
393 const cpu_arch = elf_file.getTarget().cpu.arch;
394 const r_type = rel.r_type();
395 var r_addend = rel.r_addend;
396 var r_sym: u32 = 0;
397 switch (sym.type(elf_file)) {
398 elf.STT_SECTION => {
399 r_addend += @intCast(sym.address(.{}, elf_file));
400 r_sym = sym.outputShndx(elf_file).?;
401 },
402 else => {
403 r_sym = sym.outputSymtabIndex(elf_file) orelse 0;
404 },
405 }
406
407 relocs_log.debug(" {f}: [{x} => {d}({s})] + {x}", .{
408 relocation.fmtRelocType(r_type, cpu_arch),
409 r_offset,
410 r_sym,
411 sym.name(elf_file),
412 r_addend,
413 });
414
415 return .{
416 .r_offset = r_offset,
417 .r_addend = r_addend,
418 .r_info = (@as(u64, @intCast(r_sym)) << 32) | r_type,
419 };
420}
421
422pub fn writeEhFrameRelocs(elf_file: *Elf, relocs: *std.array_list.Managed(elf.Elf64_Rela)) !void {
423 relocs_log.debug("{x}: .eh_frame", .{
424 elf_file.sections.items(.shdr)[elf_file.section_indexes.eh_frame.?].sh_addr,
425 });
426
427 if (elf_file.zigObjectPtr()) |zo| zo: {
428 const sym_index = zo.eh_frame_index orelse break :zo;
429 const sym = zo.symbol(sym_index);
430 const atom_ptr = zo.atom(sym.ref.index).?;
431 if (!atom_ptr.alive) break :zo;
432 for (atom_ptr.relocs(elf_file)) |rel| {
433 const ref = zo.resolveSymbol(rel.r_sym(), elf_file);
434 const target = elf_file.symbol(ref).?;
435 try relocs.append(emitReloc(elf_file, rel.r_offset, target, rel));
436 }
437 }
438
439 for (elf_file.objects.items) |index| {
440 const object = elf_file.file(index).?.object;
441
442 for (object.cies.items) |cie| {
443 if (!cie.alive) continue;
444 for (cie.relocs(elf_file)) |rel| {
445 const ref = object.resolveSymbol(rel.r_sym(), elf_file);
446 const sym = elf_file.symbol(ref).?;
447 const r_offset = cie.address(elf_file) + rel.r_offset - cie.offset;
448 try relocs.append(emitReloc(elf_file, r_offset, sym, rel));
449 }
450 }
451
452 for (object.fdes.items) |fde| {
453 if (!fde.alive) continue;
454 for (fde.relocs(object)) |rel| {
455 const ref = object.resolveSymbol(rel.r_sym(), elf_file);
456 const sym = elf_file.symbol(ref).?;
457 const r_offset = fde.address(elf_file) + rel.r_offset - fde.offset;
458 try relocs.append(emitReloc(elf_file, r_offset, sym, rel));
459 }
460 }
461 }
462}
463
464pub fn writeEhFrameHdr(elf_file: *Elf, writer: anytype) !void {
465 const endian = elf_file.getTarget().cpu.arch.endian();
466 const have_table = haveEhFrameHdrSearchTable(elf_file);
467
468 try writer.writeByte(1); // version
469 try writer.writeByte(@bitCast(@as(DW_EH_PE, .{ .type = .sdata4, .rel = .pcrel }))); // eh_frame_ptr_enc
470 if (have_table) {
471 try writer.writeByte(@bitCast(@as(DW_EH_PE, .{ .type = .udata4, .rel = .abs }))); // fde_count_enc
472 try writer.writeByte(@bitCast(@as(DW_EH_PE, .{ .type = .sdata4, .rel = .datarel }))); // table_enc
473 } else {
474 try writer.writeByte(@bitCast(DW_EH_PE.omit)); // fde_count_enc
475 try writer.writeByte(@bitCast(DW_EH_PE.omit)); // table_enc
476 }
477
478 const shdrs = elf_file.sections.items(.shdr);
479 const eh_frame_shdr = shdrs[elf_file.section_indexes.eh_frame.?];
480 const eh_frame_hdr_shdr = shdrs[elf_file.section_indexes.eh_frame_hdr.?];
481 // eh_frame_ptr
482 try writer.writeInt(
483 u32,
484 @as(u32, @bitCast(@as(
485 i32,
486 @truncate(@as(i64, @intCast(eh_frame_shdr.sh_addr)) - @as(i64, @intCast(eh_frame_hdr_shdr.sh_addr)) - 4),
487 ))),
488 .little,
489 );
490
491 if (!have_table) return;
492
493 const gpa = elf_file.base.comp.gpa;
494
495 // This must be an `extern struct` because we will write the bytes directly to the file.
496 const Entry = extern struct {
497 first_pc_rel: i32,
498 fde_addr_rel: i32,
499 fn lessThan(_: void, lhs: @This(), rhs: @This()) bool {
500 return lhs.first_pc_rel < rhs.first_pc_rel;
501 }
502 };
503 // The number of entries was already computed by `calcEhFrameHdrSize`.
504 const num_fdes: u32 = @intCast(@divExact(eh_frame_hdr_shdr.sh_size - 12, 8));
505 try writer.writeInt(u32, num_fdes, endian);
506
507 var entries: std.ArrayList(Entry) = try .initCapacity(gpa, num_fdes);
508 defer entries.deinit(gpa);
509 for (elf_file.objects.items) |file_index| {
510 const object = elf_file.file(file_index).?.object;
511 for (object.fdes.items) |fde| {
512 if (!fde.alive) continue;
513 const relocs = fde.relocs(object);
514 // Should `relocs.len == 0` be an error? Things are completely broken anyhow in that case...
515 const rel = relocs[0];
516 const ref = object.resolveSymbol(rel.r_sym(), elf_file);
517 const sym = elf_file.symbol(ref).?;
518 const fde_addr_abs: i64 = @intCast(fde.address(elf_file));
519 const fde_addr_rel: i64 = fde_addr_abs - @as(i64, @intCast(eh_frame_hdr_shdr.sh_addr));
520 const first_pc_abs: i64 = @as(i64, @intCast(sym.address(.{}, elf_file))) + rel.r_addend;
521 const first_pc_rel: i64 = first_pc_abs - @as(i64, @intCast(eh_frame_hdr_shdr.sh_addr));
522 entries.appendAssumeCapacity(.{
523 .first_pc_rel = @truncate(first_pc_rel),
524 .fde_addr_rel = @truncate(fde_addr_rel),
525 });
526 }
527 }
528 assert(entries.items.len == num_fdes);
529 std.mem.sort(Entry, entries.items, {}, Entry.lessThan);
530 if (endian != builtin.cpu.arch.endian()) {
531 std.mem.byteSwapAllElements(Entry, entries.items);
532 }
533 try writer.writeAll(@ptrCast(entries.items));
534}
535
536const x86_64 = struct {
537 fn resolveReloc(rec: anytype, elf_file: *Elf, rel: elf.Elf64_Rela, source: i64, target: i64, data: []u8) !void {
538 const r_type: elf.R_X86_64 = @enumFromInt(rel.r_type());
539 switch (r_type) {
540 .NONE => {},
541 .@"32" => std.mem.writeInt(i32, data[0..4], @as(i32, @truncate(target)), .little),
542 .@"64" => std.mem.writeInt(i64, data[0..8], target, .little),
543 .PC32 => std.mem.writeInt(i32, data[0..4], @as(i32, @intCast(target - source)), .little),
544 .PC64 => std.mem.writeInt(i64, data[0..8], target - source, .little),
545 else => try reportInvalidReloc(rec, elf_file, rel),
546 }
547 }
548};
549
550const aarch64 = struct {
551 fn resolveReloc(rec: anytype, elf_file: *Elf, rel: elf.Elf64_Rela, source: i64, target: i64, data: []u8) !void {
552 const r_type: elf.R_AARCH64 = @enumFromInt(rel.r_type());
553 switch (r_type) {
554 .NONE => {},
555 .ABS64 => std.mem.writeInt(i64, data[0..8], target, .little),
556 .PREL32 => std.mem.writeInt(i32, data[0..4], @as(i32, @intCast(target - source)), .little),
557 .PREL64 => std.mem.writeInt(i64, data[0..8], target - source, .little),
558 else => try reportInvalidReloc(rec, elf_file, rel),
559 }
560 }
561};
562
563const riscv = struct {
564 fn resolveReloc(rec: anytype, elf_file: *Elf, rel: elf.Elf64_Rela, source: i64, target: i64, data: []u8) !void {
565 const r_type: elf.R_RISCV = @enumFromInt(rel.r_type());
566 switch (r_type) {
567 .NONE => {},
568 .@"32_PCREL" => std.mem.writeInt(i32, data[0..4], @as(i32, @intCast(target - source)), .little),
569 else => try reportInvalidReloc(rec, elf_file, rel),
570 }
571 }
572};
573
574fn reportInvalidReloc(rec: anytype, elf_file: *Elf, rel: elf.Elf64_Rela) !void {
575 const diags = &elf_file.base.comp.link_diags;
576 var err = try diags.addErrorWithNotes(1);
577 try err.addMsg("invalid relocation type {f} at offset 0x{x}", .{
578 relocation.fmtRelocType(rel.r_type(), elf_file.getTarget().cpu.arch),
579 rel.r_offset,
580 });
581 err.addNote("in {f}:.eh_frame", .{elf_file.file(rec.file_index).?.fmtPath()});
582 return error.RelocFailure;
583}
584
585const std = @import("std");
586const assert = std.debug.assert;
587const elf = std.elf;
588const math = std.math;
589const relocs_log = std.log.scoped(.link_relocs);
590const relocation = @import("relocation.zig");
591
592const Allocator = std.mem.Allocator;
593const Atom = @import("Atom.zig");
594const DW_EH_PE = std.dwarf.EH.PE;
595const Elf = @import("../Elf.zig");
596const Object = @import("Object.zig");
597const Symbol = @import("Symbol.zig");
598
599const builtin = @import("builtin");