master
1/// Address offset allocated for this Atom wrt to its section start address.
2value: u64 = 0,
3
4/// Name of this Atom.
5name: MachO.String = .{},
6
7/// Index into linker's input file table.
8file: File.Index = 0,
9
10/// Size of this atom
11size: u64 = 0,
12
13/// Alignment of this atom as a power of two.
14alignment: Alignment = .@"1",
15
16/// Index of the input section.
17n_sect: u32 = 0,
18
19/// Index of the output section.
20out_n_sect: u8 = 0,
21
22/// Offset within the parent section pointed to by n_sect.
23/// off + size <= parent section size.
24off: u64 = 0,
25
26/// Index of this atom in the linker's atoms table.
27atom_index: Index = 0,
28
29/// Specifies whether this atom is alive or has been garbage collected.
30alive: AtomicBool = AtomicBool.init(true),
31
32/// Specifies if this atom has been visited during garbage collection.
33visited: AtomicBool = AtomicBool.init(false),
34
35/// Points to the previous and next neighbors, based on the `text_offset`.
36/// This can be used to find, for example, the capacity of this `TextBlock`.
37prev_index: Index = 0,
38next_index: Index = 0,
39
40extra: u32 = 0,
41
42pub fn getName(self: Atom, macho_file: *MachO) [:0]const u8 {
43 return switch (self.getFile(macho_file)) {
44 .dylib => unreachable,
45 inline else => |x| x.getString(self.name),
46 };
47}
48
49pub fn getFile(self: Atom, macho_file: *MachO) File {
50 return macho_file.getFile(self.file).?;
51}
52
53pub fn getRelocs(self: Atom, macho_file: *MachO) []const Relocation {
54 return switch (self.getFile(macho_file)) {
55 .dylib => unreachable,
56 inline else => |x| x.getAtomRelocs(self, macho_file),
57 };
58}
59
60pub fn getInputSection(self: Atom, macho_file: *MachO) macho.section_64 {
61 return switch (self.getFile(macho_file)) {
62 .dylib => unreachable,
63 .zig_object => |x| x.getInputSection(self, macho_file),
64 .object => |x| x.sections.items(.header)[self.n_sect],
65 .internal => |x| x.sections.items(.header)[self.n_sect],
66 };
67}
68
69pub fn getInputAddress(self: Atom, macho_file: *MachO) u64 {
70 return self.getInputSection(macho_file).addr + self.off;
71}
72
73pub fn getAddress(self: Atom, macho_file: *MachO) u64 {
74 const header = macho_file.sections.items(.header)[self.out_n_sect];
75 return header.addr + self.value;
76}
77
78pub fn getPriority(self: Atom, macho_file: *MachO) u64 {
79 const file = self.getFile(macho_file);
80 return (@as(u64, @intCast(file.getIndex())) << 32) | @as(u64, @intCast(self.n_sect));
81}
82
83pub fn getUnwindRecords(self: Atom, macho_file: *MachO) []const UnwindInfo.Record.Index {
84 const extra = self.getExtra(macho_file);
85 return switch (self.getFile(macho_file)) {
86 .dylib => unreachable,
87 .zig_object, .internal => &[0]UnwindInfo.Record.Index{},
88 .object => |x| x.unwind_records_indexes.items[extra.unwind_index..][0..extra.unwind_count],
89 };
90}
91
92pub fn markUnwindRecordsDead(self: Atom, macho_file: *MachO) void {
93 const object = self.getFile(macho_file).object;
94 for (self.getUnwindRecords(macho_file)) |cu_index| {
95 const cu = object.getUnwindRecord(cu_index);
96 cu.alive = false;
97
98 if (cu.getFdePtr(macho_file)) |fde| {
99 fde.alive = false;
100 }
101 }
102}
103
104pub fn isAlive(self: Atom) bool {
105 return self.alive.load(.seq_cst);
106}
107
108pub fn setAlive(self: *Atom, alive: bool) void {
109 _ = self.alive.swap(alive, .seq_cst);
110}
111
112pub fn getThunk(self: Atom, macho_file: *MachO) *Thunk {
113 const extra = self.getExtra(macho_file);
114 return macho_file.getThunk(extra.thunk);
115}
116
117const AddExtraOpts = struct {
118 thunk: ?u32 = null,
119 rel_index: ?u32 = null,
120 rel_count: ?u32 = null,
121 rel_out_index: ?u32 = null,
122 rel_out_count: ?u32 = null,
123 unwind_index: ?u32 = null,
124 unwind_count: ?u32 = null,
125 literal_pool_index: ?u32 = null,
126 literal_symbol_index: ?u32 = null,
127};
128
129pub fn addExtra(atom: *Atom, opts: AddExtraOpts, macho_file: *MachO) void {
130 const file = atom.getFile(macho_file);
131 var extra = file.getAtomExtra(atom.extra);
132 inline for (@typeInfo(@TypeOf(opts)).@"struct".fields) |field| {
133 if (@field(opts, field.name)) |x| {
134 @field(extra, field.name) = x;
135 }
136 }
137 file.setAtomExtra(atom.extra, extra);
138}
139
140pub inline fn getExtra(atom: Atom, macho_file: *MachO) Extra {
141 return atom.getFile(macho_file).getAtomExtra(atom.extra);
142}
143
144pub inline fn setExtra(atom: Atom, extra: Extra, macho_file: *MachO) void {
145 atom.getFile(macho_file).setAtomExtra(atom.extra, extra);
146}
147
148pub fn initOutputSection(sect: macho.section_64, macho_file: *MachO) !u8 {
149 if (macho_file.base.isRelocatable()) {
150 const osec = macho_file.getSectionByName(sect.segName(), sect.sectName()) orelse
151 try macho_file.addSection(
152 sect.segName(),
153 sect.sectName(),
154 .{ .flags = sect.flags },
155 );
156 return osec;
157 }
158
159 const segname, const sectname, const flags = blk: {
160 if (sect.isCode()) break :blk .{
161 "__TEXT",
162 "__text",
163 macho.S_REGULAR | macho.S_ATTR_PURE_INSTRUCTIONS | macho.S_ATTR_SOME_INSTRUCTIONS,
164 };
165
166 switch (sect.type()) {
167 macho.S_4BYTE_LITERALS,
168 macho.S_8BYTE_LITERALS,
169 macho.S_16BYTE_LITERALS,
170 => break :blk .{ "__TEXT", "__const", macho.S_REGULAR },
171
172 macho.S_CSTRING_LITERALS => {
173 if (mem.startsWith(u8, sect.sectName(), "__objc")) break :blk .{
174 sect.segName(), sect.sectName(), macho.S_REGULAR,
175 };
176 break :blk .{ "__TEXT", "__cstring", macho.S_CSTRING_LITERALS };
177 },
178
179 macho.S_MOD_INIT_FUNC_POINTERS,
180 macho.S_MOD_TERM_FUNC_POINTERS,
181 => break :blk .{ "__DATA_CONST", sect.sectName(), sect.flags },
182
183 macho.S_LITERAL_POINTERS,
184 macho.S_ZEROFILL,
185 macho.S_GB_ZEROFILL,
186 macho.S_THREAD_LOCAL_VARIABLES,
187 macho.S_THREAD_LOCAL_VARIABLE_POINTERS,
188 macho.S_THREAD_LOCAL_REGULAR,
189 macho.S_THREAD_LOCAL_ZEROFILL,
190 => break :blk .{ sect.segName(), sect.sectName(), sect.flags },
191
192 macho.S_COALESCED => break :blk .{
193 sect.segName(),
194 sect.sectName(),
195 macho.S_REGULAR,
196 },
197
198 macho.S_REGULAR => {
199 const segname = sect.segName();
200 const sectname = sect.sectName();
201 if (mem.eql(u8, segname, "__DATA")) {
202 if (mem.eql(u8, sectname, "__cfstring") or
203 mem.eql(u8, sectname, "__objc_classlist") or
204 mem.eql(u8, sectname, "__objc_imageinfo")) break :blk .{
205 "__DATA_CONST",
206 sectname,
207 macho.S_REGULAR,
208 };
209 }
210 break :blk .{ segname, sectname, sect.flags };
211 },
212
213 else => break :blk .{ sect.segName(), sect.sectName(), sect.flags },
214 }
215 };
216 return macho_file.getSectionByName(segname, sectname) orelse try macho_file.addSection(
217 segname,
218 sectname,
219 .{ .flags = flags },
220 );
221}
222
223/// Returns how much room there is to grow in virtual address space.
224/// File offset relocation happens transparently, so it is not included in
225/// this calculation.
226pub fn capacity(self: Atom, macho_file: *MachO) u64 {
227 const zo = macho_file.getZigObject().?;
228 const next_addr = if (zo.getAtom(self.next_index)) |next|
229 next.getAddress(macho_file)
230 else
231 std.math.maxInt(u32);
232 return next_addr - self.getAddress(macho_file);
233}
234
235pub fn freeListEligible(self: Atom, macho_file: *MachO) bool {
236 // No need to keep a free list node for the last block.
237 const zo = macho_file.getZigObject().?;
238 const next = zo.getAtom(self.next_index) orelse return false;
239 const cap = next.getAddress(macho_file) - self.getAddress(macho_file);
240 const ideal_cap = MachO.padToIdeal(self.size);
241 if (cap <= ideal_cap) return false;
242 const surplus = cap - ideal_cap;
243 return surplus >= MachO.min_text_capacity;
244}
245
246pub fn allocate(self: *Atom, macho_file: *MachO) !void {
247 const zo = macho_file.getZigObject().?;
248 const sect = &macho_file.sections.items(.header)[self.out_n_sect];
249 const free_list = &macho_file.sections.items(.free_list)[self.out_n_sect];
250 const last_atom_index = &macho_file.sections.items(.last_atom_index)[self.out_n_sect];
251 const new_atom_ideal_capacity = MachO.padToIdeal(self.size);
252
253 // We use these to indicate our intention to update metadata, placing the new atom,
254 // and possibly removing a free list node.
255 // It would be simpler to do it inside the for loop below, but that would cause a
256 // problem if an error was returned later in the function. So this action
257 // is actually carried out at the end of the function, when errors are no longer possible.
258 var atom_placement: ?Atom.Index = null;
259 var free_list_removal: ?usize = null;
260
261 // First we look for an appropriately sized free list node.
262 // The list is unordered. We'll just take the first thing that works.
263 self.value = blk: {
264 var i: usize = free_list.items.len;
265 while (i < free_list.items.len) {
266 const big_atom_index = free_list.items[i];
267 const big_atom = zo.getAtom(big_atom_index).?;
268 // We now have a pointer to a live atom that has too much capacity.
269 // Is it enough that we could fit this new atom?
270 const cap = big_atom.capacity(macho_file);
271 const ideal_capacity = MachO.padToIdeal(cap);
272 const ideal_capacity_end_vaddr = std.math.add(u64, big_atom.value, ideal_capacity) catch ideal_capacity;
273 const capacity_end_vaddr = big_atom.value + cap;
274 const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity;
275 const new_start_vaddr = self.alignment.backward(new_start_vaddr_unaligned);
276 if (new_start_vaddr < ideal_capacity_end_vaddr) {
277 // Additional bookkeeping here to notice if this free list node
278 // should be deleted because the block that it points to has grown to take up
279 // more of the extra capacity.
280 if (!big_atom.freeListEligible(macho_file)) {
281 _ = free_list.swapRemove(i);
282 } else {
283 i += 1;
284 }
285 continue;
286 }
287 // At this point we know that we will place the new block here. But the
288 // remaining question is whether there is still yet enough capacity left
289 // over for there to still be a free list node.
290 const remaining_capacity = new_start_vaddr - ideal_capacity_end_vaddr;
291 const keep_free_list_node = remaining_capacity >= MachO.min_text_capacity;
292
293 // Set up the metadata to be updated, after errors are no longer possible.
294 atom_placement = big_atom_index;
295 if (!keep_free_list_node) {
296 free_list_removal = i;
297 }
298 break :blk new_start_vaddr;
299 } else if (zo.getAtom(last_atom_index.*)) |last| {
300 const ideal_capacity = MachO.padToIdeal(last.size);
301 const ideal_capacity_end_vaddr = last.value + ideal_capacity;
302 const new_start_vaddr = self.alignment.forward(ideal_capacity_end_vaddr);
303 // Set up the metadata to be updated, after errors are no longer possible.
304 atom_placement = last.atom_index;
305 break :blk new_start_vaddr;
306 } else {
307 break :blk 0;
308 }
309 };
310
311 log.debug("allocated atom({d}) : '{s}' at 0x{x} to 0x{x}", .{
312 self.atom_index,
313 self.getName(macho_file),
314 self.getAddress(macho_file),
315 self.getAddress(macho_file) + self.size,
316 });
317
318 const expand_section = if (atom_placement) |placement_index|
319 zo.getAtom(placement_index).?.next_index == 0
320 else
321 true;
322 if (expand_section) {
323 const needed_size = self.value + self.size;
324 try macho_file.growSection(self.out_n_sect, needed_size);
325 last_atom_index.* = self.atom_index;
326
327 // const zig_object = macho_file_file.getZigObject().?;
328 // if (zig_object.dwarf) |_| {
329 // // The .debug_info section has `low_pc` and `high_pc` values which is the virtual address
330 // // range of the compilation unit. When we expand the text section, this range changes,
331 // // so the DW_TAG.compile_unit tag of the .debug_info section becomes dirty.
332 // zig_object.debug_info_header_dirty = true;
333 // // This becomes dirty for the same reason. We could potentially make this more
334 // // fine-grained with the addition of support for more compilation units. It is planned to
335 // // model each package as a different compilation unit.
336 // zig_object.debug_aranges_section_dirty = true;
337 // }
338 }
339 sect.@"align" = @max(sect.@"align", self.alignment.toLog2Units());
340
341 // This function can also reallocate an atom.
342 // In this case we need to "unplug" it from its previous location before
343 // plugging it in to its new location.
344 if (zo.getAtom(self.prev_index)) |prev| {
345 prev.next_index = self.next_index;
346 }
347 if (zo.getAtom(self.next_index)) |next| {
348 next.prev_index = self.prev_index;
349 }
350
351 if (atom_placement) |big_atom_index| {
352 const big_atom = zo.getAtom(big_atom_index).?;
353 self.prev_index = big_atom_index;
354 self.next_index = big_atom.next_index;
355 big_atom.next_index = self.atom_index;
356 } else {
357 self.prev_index = 0;
358 self.next_index = 0;
359 }
360 if (free_list_removal) |i| {
361 _ = free_list.swapRemove(i);
362 }
363
364 self.setAlive(true);
365}
366
367pub fn shrink(self: *Atom, macho_file: *MachO) void {
368 _ = self;
369 _ = macho_file;
370}
371
372pub fn grow(self: *Atom, macho_file: *MachO) !void {
373 if (!self.alignment.check(self.value) or self.size > self.capacity(macho_file))
374 try self.allocate(macho_file);
375}
376
377pub fn free(self: *Atom, macho_file: *MachO) void {
378 log.debug("freeAtom {d} ({s})", .{ self.atom_index, self.getName(macho_file) });
379
380 const comp = macho_file.base.comp;
381 const gpa = comp.gpa;
382 const zo = macho_file.getZigObject().?;
383 const free_list = &macho_file.sections.items(.free_list)[self.out_n_sect];
384 const last_atom_index = &macho_file.sections.items(.last_atom_index)[self.out_n_sect];
385 var already_have_free_list_node = false;
386 {
387 var i: usize = 0;
388 // TODO turn free_list into a hash map
389 while (i < free_list.items.len) {
390 if (free_list.items[i] == self.atom_index) {
391 _ = free_list.swapRemove(i);
392 continue;
393 }
394 if (free_list.items[i] == self.prev_index) {
395 already_have_free_list_node = true;
396 }
397 i += 1;
398 }
399 }
400
401 if (zo.getAtom(last_atom_index.*)) |last_atom| {
402 if (last_atom.atom_index == self.atom_index) {
403 if (zo.getAtom(self.prev_index)) |_| {
404 // TODO shrink the section size here
405 last_atom_index.* = self.prev_index;
406 } else {
407 last_atom_index.* = 0;
408 }
409 }
410 }
411
412 if (zo.getAtom(self.prev_index)) |prev| {
413 prev.next_index = self.next_index;
414 if (!already_have_free_list_node and prev.*.freeListEligible(macho_file)) {
415 // The free list is heuristics, it doesn't have to be perfect, so we can
416 // ignore the OOM here.
417 free_list.append(gpa, prev.atom_index) catch {};
418 }
419 } else {
420 self.prev_index = 0;
421 }
422
423 if (zo.getAtom(self.next_index)) |next| {
424 next.prev_index = self.prev_index;
425 } else {
426 self.next_index = 0;
427 }
428
429 // TODO create relocs free list
430 self.freeRelocs(macho_file);
431 // TODO figure out how to free input section mappind in ZigModule
432 // const zig_object = macho_file.zigObjectPtr().?
433 // assert(zig_object.atoms.swapRemove(self.atom_index));
434 self.* = .{};
435}
436
437pub fn addReloc(self: *Atom, macho_file: *MachO, reloc: Relocation) !void {
438 const gpa = macho_file.base.comp.gpa;
439 const file = self.getFile(macho_file);
440 assert(file == .zig_object);
441 var extra = self.getExtra(macho_file);
442 const rels = &file.zig_object.relocs.items[extra.rel_index];
443 try rels.append(gpa, reloc);
444 extra.rel_count += 1;
445 self.setExtra(extra, macho_file);
446}
447
448pub fn freeRelocs(self: *Atom, macho_file: *MachO) void {
449 self.getFile(macho_file).zig_object.freeAtomRelocs(self.*, macho_file);
450 var extra = self.getExtra(macho_file);
451 extra.rel_count = 0;
452 self.setExtra(extra, macho_file);
453}
454
455pub fn scanRelocs(self: Atom, macho_file: *MachO) !void {
456 const tracy = trace(@src());
457 defer tracy.end();
458 assert(self.isAlive());
459
460 const relocs = self.getRelocs(macho_file);
461
462 for (relocs) |rel| {
463 if (try self.reportUndefSymbol(rel, macho_file)) continue;
464
465 switch (rel.type) {
466 .branch => {
467 const symbol = rel.getTargetSymbol(self, macho_file);
468 if (symbol.flags.import or (symbol.flags.@"export" and symbol.flags.weak) or symbol.flags.interposable) {
469 symbol.setSectionFlags(.{ .stubs = true });
470 if (symbol.flags.weak) {
471 macho_file.binds_to_weak.store(true, .seq_cst);
472 }
473 } else if (mem.startsWith(u8, symbol.getName(macho_file), "_objc_msgSend$")) {
474 symbol.setSectionFlags(.{ .objc_stubs = true });
475 }
476 },
477
478 .got_load,
479 .got_load_page,
480 .got_load_pageoff,
481 => {
482 const symbol = rel.getTargetSymbol(self, macho_file);
483 if (symbol.flags.import or
484 (symbol.flags.@"export" and symbol.flags.weak) or
485 symbol.flags.interposable or
486 macho_file.getTarget().cpu.arch == .aarch64) // TODO relax on arm64
487 {
488 symbol.setSectionFlags(.{ .needs_got = true });
489 if (symbol.flags.weak) {
490 macho_file.binds_to_weak.store(true, .seq_cst);
491 }
492 }
493 },
494
495 .got => {
496 rel.getTargetSymbol(self, macho_file).setSectionFlags(.{ .needs_got = true });
497 },
498
499 .tlv,
500 .tlvp_page,
501 .tlvp_pageoff,
502 => {
503 const symbol = rel.getTargetSymbol(self, macho_file);
504 if (!symbol.flags.tlv) {
505 try macho_file.reportParseError2(
506 self.getFile(macho_file).getIndex(),
507 "{s}: illegal thread-local variable reference to regular symbol {s}",
508 .{ self.getName(macho_file), symbol.getName(macho_file) },
509 );
510 }
511 if (symbol.flags.import or (symbol.flags.@"export" and symbol.flags.weak) or symbol.flags.interposable) {
512 symbol.setSectionFlags(.{ .tlv_ptr = true });
513 if (symbol.flags.weak) {
514 macho_file.binds_to_weak.store(true, .seq_cst);
515 }
516 }
517 },
518
519 .unsigned => {
520 if (rel.meta.length == 3) { // TODO this really should check if this is pointer width
521 if (rel.tag == .@"extern") {
522 const symbol = rel.getTargetSymbol(self, macho_file);
523 if (symbol.isTlvInit(macho_file)) {
524 macho_file.has_tlv.store(true, .seq_cst);
525 continue;
526 }
527 if (symbol.flags.import) {
528 if (symbol.flags.weak) {
529 macho_file.binds_to_weak.store(true, .seq_cst);
530 }
531 continue;
532 }
533 if (symbol.flags.@"export" and symbol.flags.weak) {
534 macho_file.binds_to_weak.store(true, .seq_cst);
535 }
536 }
537 }
538 },
539
540 .signed,
541 .signed1,
542 .signed2,
543 .signed4,
544 .page,
545 .pageoff,
546 .subtractor,
547 => {},
548 }
549 }
550}
551
552fn reportUndefSymbol(self: Atom, rel: Relocation, macho_file: *MachO) !bool {
553 if (rel.tag == .local) return false;
554
555 const file = self.getFile(macho_file);
556 const ref = file.getSymbolRef(rel.target, macho_file);
557 if (ref.getFile(macho_file) == null) {
558 macho_file.undefs_mutex.lock();
559 defer macho_file.undefs_mutex.unlock();
560 const gpa = macho_file.base.comp.gpa;
561 const gop = try macho_file.undefs.getOrPut(gpa, file.getGlobals()[rel.target]);
562 if (!gop.found_existing) {
563 gop.value_ptr.* = .{ .refs = .{} };
564 }
565 try gop.value_ptr.refs.append(gpa, .{ .index = self.atom_index, .file = self.file });
566 return true;
567 }
568
569 return false;
570}
571
572pub fn resolveRelocs(self: Atom, macho_file: *MachO, buffer: []u8) !void {
573 const tracy = trace(@src());
574 defer tracy.end();
575
576 assert(!self.getInputSection(macho_file).isZerofill());
577 const file = self.getFile(macho_file);
578 const name = self.getName(macho_file);
579 const relocs = self.getRelocs(macho_file);
580
581 relocs_log.debug("{x}: {s}", .{ self.value, name });
582
583 var has_error = false;
584 var stream: Writer = .fixed(buffer);
585 var i: usize = 0;
586 while (i < relocs.len) : (i += 1) {
587 const rel = relocs[i];
588 const rel_offset: usize = @intCast(rel.offset - self.off);
589 const subtractor = if (rel.meta.has_subtractor) relocs[i - 1] else null;
590
591 if (rel.tag == .@"extern") {
592 if (rel.getTargetSymbol(self, macho_file).getFile(macho_file) == null) continue;
593 }
594
595 stream.end = rel_offset;
596 self.resolveRelocInner(rel, subtractor, buffer, macho_file, &stream) catch |err| {
597 switch (err) {
598 error.RelaxFail => {
599 const target = switch (rel.tag) {
600 .@"extern" => rel.getTargetSymbol(self, macho_file).getName(macho_file),
601 .local => rel.getTargetAtom(self, macho_file).getName(macho_file),
602 };
603 try macho_file.reportParseError2(
604 file.getIndex(),
605 "{s}: 0x{x}: 0x{x}: failed to relax relocation: type {f}, target {s}",
606 .{
607 name,
608 self.getAddress(macho_file),
609 rel.offset,
610 rel.fmtPretty(macho_file.getTarget().cpu.arch),
611 target,
612 },
613 );
614 has_error = true;
615 },
616 error.RelaxFailUnexpectedInstruction => has_error = true,
617 else => |e| return e,
618 }
619 };
620 }
621
622 if (has_error) return error.ResolveFailed;
623}
624
625const ResolveError = error{
626 RelaxFail,
627 RelaxFailUnexpectedInstruction,
628 NoSpaceLeft,
629 DivisionByZero,
630 UnexpectedRemainder,
631 Overflow,
632 OutOfMemory,
633 WriteFailed,
634};
635
636fn resolveRelocInner(
637 self: Atom,
638 rel: Relocation,
639 subtractor: ?Relocation,
640 code: []u8,
641 macho_file: *MachO,
642 writer: *Writer,
643) ResolveError!void {
644 const t = &macho_file.base.comp.root_mod.resolved_target.result;
645 const cpu_arch = t.cpu.arch;
646 const rel_offset = math.cast(usize, rel.offset - self.off) orelse return error.Overflow;
647 const P = @as(i64, @intCast(self.getAddress(macho_file))) + @as(i64, @intCast(rel_offset));
648 const A = rel.addend + rel.getRelocAddend(cpu_arch);
649 const S: i64 = @intCast(rel.getTargetAddress(self, macho_file));
650 const G: i64 = @intCast(rel.getGotTargetAddress(self, macho_file));
651 const TLS = @as(i64, @intCast(macho_file.getTlsAddress()));
652 const SUB = if (subtractor) |sub| @as(i64, @intCast(sub.getTargetAddress(self, macho_file))) else 0;
653
654 const divExact = struct {
655 fn divExact(atom: Atom, r: Relocation, num: u12, den: u12, ctx: *MachO) !u12 {
656 return math.divExact(u12, num, den) catch {
657 try ctx.reportParseError2(atom.getFile(ctx).getIndex(), "{s}: unexpected remainder when resolving {f} at offset 0x{x}", .{
658 atom.getName(ctx),
659 r.fmtPretty(ctx.getTarget().cpu.arch),
660 r.offset,
661 });
662 return error.UnexpectedRemainder;
663 };
664 }
665 }.divExact;
666
667 switch (rel.tag) {
668 .local => relocs_log.debug(" {x}<+{d}>: {f}: [=> {x}] atom({d})", .{
669 P,
670 rel_offset,
671 rel.fmtPretty(cpu_arch),
672 S + A - SUB,
673 rel.getTargetAtom(self, macho_file).atom_index,
674 }),
675 .@"extern" => relocs_log.debug(" {x}<+{d}>: {f}: [=> {x}] G({x}) ({s})", .{
676 P,
677 rel_offset,
678 rel.fmtPretty(cpu_arch),
679 S + A - SUB,
680 G + A,
681 rel.getTargetSymbol(self, macho_file).getName(macho_file),
682 }),
683 }
684
685 switch (rel.type) {
686 .subtractor => {},
687
688 .unsigned => {
689 assert(!rel.meta.pcrel);
690 if (rel.meta.length == 3) {
691 if (rel.tag == .@"extern") {
692 const sym = rel.getTargetSymbol(self, macho_file);
693 if (sym.isTlvInit(macho_file)) {
694 try writer.writeInt(u64, @intCast(S - TLS), .little);
695 return;
696 }
697 if (sym.flags.import) return;
698 }
699 try writer.writeInt(u64, @bitCast(S + A - SUB), .little);
700 } else if (rel.meta.length == 2) {
701 try writer.writeInt(u32, @bitCast(@as(i32, @truncate(S + A - SUB))), .little);
702 } else unreachable;
703 },
704
705 .got => {
706 assert(rel.tag == .@"extern");
707 assert(rel.meta.length == 2);
708 assert(rel.meta.pcrel);
709 try writer.writeInt(i32, @intCast(G + A - P), .little);
710 },
711
712 .branch => {
713 assert(rel.meta.length == 2);
714 assert(rel.meta.pcrel);
715 assert(rel.tag == .@"extern");
716
717 switch (cpu_arch) {
718 .x86_64 => try writer.writeInt(i32, @intCast(S + A - P), .little),
719 .aarch64 => {
720 const disp: i28 = math.cast(i28, S + A - P) orelse blk: {
721 const thunk = self.getThunk(macho_file);
722 const S_: i64 = @intCast(thunk.getTargetAddress(rel.getTargetSymbolRef(self, macho_file), macho_file));
723 break :blk math.cast(i28, S_ + A - P) orelse return error.Overflow;
724 };
725 aarch64.writeBranchImm(disp, code[rel_offset..][0..4]);
726 },
727 else => unreachable,
728 }
729 },
730
731 .got_load => {
732 assert(rel.tag == .@"extern");
733 assert(rel.meta.length == 2);
734 assert(rel.meta.pcrel);
735 if (rel.getTargetSymbol(self, macho_file).getSectionFlags().has_got) {
736 try writer.writeInt(i32, @intCast(G + A - P), .little);
737 } else {
738 try x86_64.relaxGotLoad(self, code[rel_offset - 3 ..], rel, macho_file);
739 try writer.writeInt(i32, @intCast(S + A - P), .little);
740 }
741 },
742
743 .tlv => {
744 assert(rel.tag == .@"extern");
745 assert(rel.meta.length == 2);
746 assert(rel.meta.pcrel);
747 const sym = rel.getTargetSymbol(self, macho_file);
748 if (sym.getSectionFlags().tlv_ptr) {
749 const S_: i64 = @intCast(sym.getTlvPtrAddress(macho_file));
750 try writer.writeInt(i32, @intCast(S_ + A - P), .little);
751 } else {
752 try x86_64.relaxTlv(code[rel_offset - 3 ..], t);
753 try writer.writeInt(i32, @intCast(S + A - P), .little);
754 }
755 },
756
757 .signed, .signed1, .signed2, .signed4 => {
758 assert(rel.meta.length == 2);
759 assert(rel.meta.pcrel);
760 try writer.writeInt(i32, @intCast(S + A - P), .little);
761 },
762
763 .page,
764 .got_load_page,
765 .tlvp_page,
766 => {
767 assert(rel.tag == .@"extern");
768 assert(rel.meta.length == 2);
769 assert(rel.meta.pcrel);
770 const sym = rel.getTargetSymbol(self, macho_file);
771 const source = math.cast(u64, P) orelse return error.Overflow;
772 const target = target: {
773 const target = switch (rel.type) {
774 .page => S + A,
775 .got_load_page => G + A,
776 .tlvp_page => if (sym.getSectionFlags().tlv_ptr) blk: {
777 const S_: i64 = @intCast(sym.getTlvPtrAddress(macho_file));
778 break :blk S_ + A;
779 } else S + A,
780 else => unreachable,
781 };
782 break :target math.cast(u64, target) orelse return error.Overflow;
783 };
784 aarch64.writeAdrInst(try aarch64.calcNumberOfPages(@intCast(source), @intCast(target)), code[rel_offset..][0..aarch64.encoding.Instruction.size]);
785 },
786
787 .pageoff => {
788 assert(rel.tag == .@"extern");
789 assert(rel.meta.length == 2);
790 assert(!rel.meta.pcrel);
791 const target = math.cast(u64, S + A) orelse return error.Overflow;
792 const inst_code = code[rel_offset..][0..aarch64.encoding.Instruction.size];
793 var inst: aarch64.encoding.Instruction = .read(inst_code);
794 switch (inst.decode()) {
795 else => unreachable,
796 .data_processing_immediate => aarch64.writeAddImmInst(@truncate(target), inst_code),
797 .load_store => |load_store| {
798 inst.load_store.register_unsigned_immediate.group.imm12 = switch (load_store.register_unsigned_immediate.decode()) {
799 .integer => |integer| try divExact(self, rel, @truncate(target), @as(u4, 1) << @intFromEnum(integer.group.size), macho_file),
800 .vector => |vector| try divExact(self, rel, @truncate(target), @as(u5, 1) << @intFromEnum(vector.group.opc1.decode(vector.group.size)), macho_file),
801 };
802 try writer.writeInt(u32, @bitCast(inst), .little);
803 },
804 }
805 },
806
807 .got_load_pageoff => {
808 assert(rel.tag == .@"extern");
809 assert(rel.meta.length == 2);
810 assert(!rel.meta.pcrel);
811 const target = math.cast(u64, G + A) orelse return error.Overflow;
812 aarch64.writeLoadStoreRegInst(try divExact(self, rel, @truncate(target), 8, macho_file), code[rel_offset..][0..4]);
813 },
814
815 .tlvp_pageoff => {
816 assert(rel.tag == .@"extern");
817 assert(rel.meta.length == 2);
818 assert(!rel.meta.pcrel);
819
820 const sym = rel.getTargetSymbol(self, macho_file);
821 const target = target: {
822 const target = if (sym.getSectionFlags().tlv_ptr) blk: {
823 const S_: i64 = @intCast(sym.getTlvPtrAddress(macho_file));
824 break :blk S_ + A;
825 } else S + A;
826 break :target math.cast(u64, target) orelse return error.Overflow;
827 };
828
829 const inst_code = code[rel_offset..][0..4];
830 const rd, const rn = switch (aarch64.encoding.Instruction.read(inst_code).decode()) {
831 else => unreachable,
832 .data_processing_immediate => |decoded| .{
833 decoded.add_subtract_immediate.group.Rd.decode(.{ .sp = true }),
834 decoded.add_subtract_immediate.group.Rn.decode(.{ .sp = true }),
835 },
836 .load_store => |decoded| .{
837 decoded.register_unsigned_immediate.integer.group.Rt.decode(.{}),
838 decoded.register_unsigned_immediate.group.Rn.decode(.{ .sp = true }),
839 },
840 };
841
842 try writer.writeInt(u32, @bitCast(@as(
843 aarch64.encoding.Instruction,
844 if (sym.getSectionFlags().tlv_ptr) .ldr(rd.x(), .{ .unsigned_offset = .{
845 .base = rn.x(),
846 .offset = try divExact(self, rel, @truncate(target), 8, macho_file) * 8,
847 } }) else .add(rd.x(), rn.x(), .{ .immediate = @truncate(target) }),
848 )), .little);
849 },
850 }
851}
852
853const x86_64 = struct {
854 fn relaxGotLoad(self: Atom, code: []u8, rel: Relocation, macho_file: *MachO) ResolveError!void {
855 dev.check(.x86_64_backend);
856 const t = &macho_file.base.comp.root_mod.resolved_target.result;
857 const diags = &macho_file.base.comp.link_diags;
858 const old_inst = disassemble(code) orelse return error.RelaxFail;
859 switch (old_inst.encoding.mnemonic) {
860 .mov => {
861 const inst = Instruction.new(old_inst.prefix, .lea, &old_inst.ops, t) catch return error.RelaxFail;
862 relocs_log.debug(" relaxing {f} => {f}", .{ old_inst.encoding, inst.encoding });
863 encode(&.{inst}, code) catch return error.RelaxFail;
864 },
865 else => |x| {
866 var err = try diags.addErrorWithNotes(2);
867 try err.addMsg("{s}: 0x{x}: 0x{x}: failed to relax relocation of type {f}", .{
868 self.getName(macho_file),
869 self.getAddress(macho_file),
870 rel.offset,
871 rel.fmtPretty(.x86_64),
872 });
873 err.addNote("expected .mov instruction but found .{s}", .{@tagName(x)});
874 err.addNote("while parsing {f}", .{self.getFile(macho_file).fmtPath()});
875 return error.RelaxFailUnexpectedInstruction;
876 },
877 }
878 }
879
880 fn relaxTlv(code: []u8, t: *const std.Target) error{RelaxFail}!void {
881 dev.check(.x86_64_backend);
882 const old_inst = disassemble(code) orelse return error.RelaxFail;
883 switch (old_inst.encoding.mnemonic) {
884 .mov => {
885 const inst = Instruction.new(old_inst.prefix, .lea, &old_inst.ops, t) catch return error.RelaxFail;
886 relocs_log.debug(" relaxing {f} => {f}", .{ old_inst.encoding, inst.encoding });
887 encode(&.{inst}, code) catch return error.RelaxFail;
888 },
889 else => return error.RelaxFail,
890 }
891 }
892
893 fn disassemble(code: []const u8) ?Instruction {
894 var disas = Disassembler.init(code);
895 const inst = disas.next() catch return null;
896 return inst;
897 }
898
899 fn encode(insts: []const Instruction, code: []u8) !void {
900 var stream: Writer = .fixed(code);
901 for (insts) |inst| try inst.encode(&stream, .{});
902 }
903
904 const bits = @import("../../codegen/x86_64/bits.zig");
905 const encoder = @import("../../codegen/x86_64/encoder.zig");
906 const Disassembler = @import("../../codegen/x86_64/Disassembler.zig");
907 const Immediate = bits.Immediate;
908 const Instruction = encoder.Instruction;
909};
910
911pub fn calcNumRelocs(self: Atom, macho_file: *MachO) u32 {
912 const relocs = self.getRelocs(macho_file);
913 switch (macho_file.getTarget().cpu.arch) {
914 .aarch64 => {
915 var nreloc: u32 = 0;
916 for (relocs) |rel| {
917 nreloc += 1;
918 switch (rel.type) {
919 .page, .pageoff => if (rel.addend > 0) {
920 nreloc += 1;
921 },
922 else => {},
923 }
924 }
925 return nreloc;
926 },
927 .x86_64 => return @intCast(relocs.len),
928 else => unreachable,
929 }
930}
931
932pub fn writeRelocs(self: Atom, macho_file: *MachO, code: []u8, buffer: []macho.relocation_info) error{ LinkFailure, OutOfMemory }!void {
933 const tracy = trace(@src());
934 defer tracy.end();
935
936 relocs_log.debug("{x}: {s}", .{ self.getAddress(macho_file), self.getName(macho_file) });
937
938 const cpu_arch = macho_file.getTarget().cpu.arch;
939 const relocs = self.getRelocs(macho_file);
940
941 var i: usize = 0;
942 for (relocs) |rel| {
943 defer i += 1;
944 const rel_offset = try macho_file.cast(usize, rel.offset - self.off);
945 const r_address: i32 = try macho_file.cast(i32, self.value + rel_offset);
946 assert(r_address >= 0);
947 const r_symbolnum = r_symbolnum: {
948 const r_symbolnum: u32 = switch (rel.tag) {
949 .local => rel.getTargetAtom(self, macho_file).out_n_sect + 1,
950 .@"extern" => rel.getTargetSymbol(self, macho_file).getOutputSymtabIndex(macho_file).?,
951 };
952 break :r_symbolnum try macho_file.cast(u24, r_symbolnum);
953 };
954 const r_extern = rel.tag == .@"extern";
955 var addend = rel.addend + rel.getRelocAddend(cpu_arch);
956 if (rel.tag == .local) {
957 const target: i64 = @intCast(rel.getTargetAddress(self, macho_file));
958 addend += target;
959 }
960
961 switch (rel.tag) {
962 .local => relocs_log.debug(" {f}: [{x} => {d}({s},{s})] + {x}", .{
963 rel.fmtPretty(cpu_arch),
964 r_address,
965 r_symbolnum,
966 macho_file.sections.items(.header)[r_symbolnum - 1].segName(),
967 macho_file.sections.items(.header)[r_symbolnum - 1].sectName(),
968 addend,
969 }),
970 .@"extern" => relocs_log.debug(" {f}: [{x} => {d}({s})] + {x}", .{
971 rel.fmtPretty(cpu_arch),
972 r_address,
973 r_symbolnum,
974 rel.getTargetSymbol(self, macho_file).getName(macho_file),
975 addend,
976 }),
977 }
978
979 switch (cpu_arch) {
980 .aarch64 => {
981 if (rel.type == .unsigned) switch (rel.meta.length) {
982 0, 1 => unreachable,
983 2 => mem.writeInt(i32, code[rel_offset..][0..4], @truncate(addend), .little),
984 3 => mem.writeInt(i64, code[rel_offset..][0..8], addend, .little),
985 } else if (addend > 0) {
986 buffer[i] = .{
987 .r_address = r_address,
988 .r_symbolnum = @bitCast(try macho_file.cast(i24, addend)),
989 .r_pcrel = 0,
990 .r_length = 2,
991 .r_extern = 0,
992 .r_type = @intFromEnum(macho.reloc_type_arm64.ARM64_RELOC_ADDEND),
993 };
994 i += 1;
995 }
996
997 const r_type: macho.reloc_type_arm64 = switch (rel.type) {
998 .page => .ARM64_RELOC_PAGE21,
999 .pageoff => .ARM64_RELOC_PAGEOFF12,
1000 .got_load_page => .ARM64_RELOC_GOT_LOAD_PAGE21,
1001 .got_load_pageoff => .ARM64_RELOC_GOT_LOAD_PAGEOFF12,
1002 .tlvp_page => .ARM64_RELOC_TLVP_LOAD_PAGE21,
1003 .tlvp_pageoff => .ARM64_RELOC_TLVP_LOAD_PAGEOFF12,
1004 .branch => .ARM64_RELOC_BRANCH26,
1005 .got => .ARM64_RELOC_POINTER_TO_GOT,
1006 .subtractor => .ARM64_RELOC_SUBTRACTOR,
1007 .unsigned => .ARM64_RELOC_UNSIGNED,
1008
1009 .signed,
1010 .signed1,
1011 .signed2,
1012 .signed4,
1013 .got_load,
1014 .tlv,
1015 => unreachable,
1016 };
1017 buffer[i] = .{
1018 .r_address = r_address,
1019 .r_symbolnum = r_symbolnum,
1020 .r_pcrel = @intFromBool(rel.meta.pcrel),
1021 .r_extern = @intFromBool(r_extern),
1022 .r_length = rel.meta.length,
1023 .r_type = @intFromEnum(r_type),
1024 };
1025 },
1026 .x86_64 => {
1027 if (rel.meta.pcrel) {
1028 if (rel.tag == .local) {
1029 addend -= @as(i64, @intCast(self.getAddress(macho_file) + rel_offset));
1030 } else {
1031 addend += 4;
1032 }
1033 }
1034 switch (rel.meta.length) {
1035 0, 1 => unreachable,
1036 2 => mem.writeInt(i32, code[rel_offset..][0..4], @truncate(addend), .little),
1037 3 => mem.writeInt(i64, code[rel_offset..][0..8], addend, .little),
1038 }
1039
1040 const r_type: macho.reloc_type_x86_64 = switch (rel.type) {
1041 .signed => .X86_64_RELOC_SIGNED,
1042 .signed1 => .X86_64_RELOC_SIGNED_1,
1043 .signed2 => .X86_64_RELOC_SIGNED_2,
1044 .signed4 => .X86_64_RELOC_SIGNED_4,
1045 .got_load => .X86_64_RELOC_GOT_LOAD,
1046 .tlv => .X86_64_RELOC_TLV,
1047 .branch => .X86_64_RELOC_BRANCH,
1048 .got => .X86_64_RELOC_GOT,
1049 .subtractor => .X86_64_RELOC_SUBTRACTOR,
1050 .unsigned => .X86_64_RELOC_UNSIGNED,
1051
1052 .page,
1053 .pageoff,
1054 .got_load_page,
1055 .got_load_pageoff,
1056 .tlvp_page,
1057 .tlvp_pageoff,
1058 => unreachable,
1059 };
1060 buffer[i] = .{
1061 .r_address = r_address,
1062 .r_symbolnum = r_symbolnum,
1063 .r_pcrel = @intFromBool(rel.meta.pcrel),
1064 .r_extern = @intFromBool(r_extern),
1065 .r_length = rel.meta.length,
1066 .r_type = @intFromEnum(r_type),
1067 };
1068 },
1069 else => unreachable,
1070 }
1071 }
1072
1073 assert(i == buffer.len);
1074}
1075
1076pub fn fmt(atom: Atom, macho_file: *MachO) std.fmt.Alt(Format, Format.print) {
1077 return .{ .data = .{
1078 .atom = atom,
1079 .macho_file = macho_file,
1080 } };
1081}
1082
1083const Format = struct {
1084 atom: Atom,
1085 macho_file: *MachO,
1086
1087 fn print(f: Format, w: *Writer) Writer.Error!void {
1088 const atom = f.atom;
1089 const macho_file = f.macho_file;
1090 const file = atom.getFile(macho_file);
1091 try w.print("atom({d}) : {s} : @{x} : sect({d}) : align({x}) : size({x}) : nreloc({d}) : thunk({d})", .{
1092 atom.atom_index, atom.getName(macho_file), atom.getAddress(macho_file),
1093 atom.out_n_sect, atom.alignment, atom.size,
1094 atom.getRelocs(macho_file).len, atom.getExtra(macho_file).thunk,
1095 });
1096 if (!atom.isAlive()) try w.writeAll(" : [*]");
1097 if (atom.getUnwindRecords(macho_file).len > 0) {
1098 try w.writeAll(" : unwind{ ");
1099 const extra = atom.getExtra(macho_file);
1100 for (atom.getUnwindRecords(macho_file), extra.unwind_index..) |index, i| {
1101 const rec = file.object.getUnwindRecord(index);
1102 try w.print("{d}", .{index});
1103 if (!rec.alive) try w.writeAll("([*])");
1104 if (i < extra.unwind_index + extra.unwind_count - 1) try w.writeAll(", ");
1105 }
1106 try w.writeAll(" }");
1107 }
1108 }
1109};
1110
1111pub const Index = u32;
1112
1113pub const Extra = struct {
1114 /// Index of the range extension thunk of this atom.
1115 thunk: u32 = 0,
1116
1117 /// Start index of relocations belonging to this atom.
1118 rel_index: u32 = 0,
1119
1120 /// Count of relocations belonging to this atom.
1121 rel_count: u32 = 0,
1122
1123 /// Start index of relocations being written out to file for this atom.
1124 rel_out_index: u32 = 0,
1125
1126 /// Count of relocations written out to file for this atom.
1127 rel_out_count: u32 = 0,
1128
1129 /// Start index of relocations belonging to this atom.
1130 unwind_index: u32 = 0,
1131
1132 /// Count of relocations belonging to this atom.
1133 unwind_count: u32 = 0,
1134
1135 /// Index into LiteralPool entry for this atom.
1136 literal_pool_index: u32 = 0,
1137
1138 /// Index into the File's symbol table for local symbol representing this literal atom.
1139 literal_symbol_index: u32 = 0,
1140};
1141
1142pub const Alignment = @import("../../InternPool.zig").Alignment;
1143
1144const std = @import("std");
1145const assert = std.debug.assert;
1146const macho = std.macho;
1147const math = std.math;
1148const mem = std.mem;
1149const log = std.log.scoped(.link);
1150const relocs_log = std.log.scoped(.link_relocs);
1151const Writer = std.Io.Writer;
1152const Allocator = mem.Allocator;
1153const AtomicBool = std.atomic.Value(bool);
1154
1155const aarch64 = @import("../aarch64.zig");
1156const trace = @import("../../tracy.zig").trace;
1157const Atom = @This();
1158const File = @import("file.zig").File;
1159const MachO = @import("../MachO.zig");
1160const Object = @import("Object.zig");
1161const Relocation = @import("Relocation.zig");
1162const Symbol = @import("Symbol.zig");
1163const Thunk = @import("Thunk.zig");
1164const UnwindInfo = @import("UnwindInfo.zig");
1165const dev = @import("../../dev.zig");