master
1pub fn flushObject(macho_file: *MachO, comp: *Compilation, module_obj_path: ?Path) link.File.FlushError!void {
2 const gpa = macho_file.base.comp.gpa;
3 const diags = &macho_file.base.comp.link_diags;
4
5 // TODO: "positional arguments" is a CLI concept, not a linker concept. Delete this unnecessary array list.
6 var positionals = std.array_list.Managed(link.Input).init(gpa);
7 defer positionals.deinit();
8 try positionals.ensureUnusedCapacity(comp.link_inputs.len);
9 positionals.appendSliceAssumeCapacity(comp.link_inputs);
10
11 for (comp.c_object_table.keys()) |key| {
12 try positionals.append(try link.openObjectInput(diags, key.status.success.object_path));
13 }
14
15 if (module_obj_path) |path| try positionals.append(try link.openObjectInput(diags, path));
16
17 if (macho_file.getZigObject() == null and positionals.items.len == 1) {
18 // Instead of invoking a full-blown `-r` mode on the input which sadly will strip all
19 // debug info segments/sections (this is apparently by design by Apple), we copy
20 // the *only* input file over.
21 const path = positionals.items[0].path().?;
22 const in_file = path.root_dir.handle.openFile(path.sub_path, .{}) catch |err|
23 return diags.fail("failed to open {f}: {s}", .{ path, @errorName(err) });
24 const stat = in_file.stat() catch |err|
25 return diags.fail("failed to stat {f}: {s}", .{ path, @errorName(err) });
26 const amt = in_file.copyRangeAll(0, macho_file.base.file.?, 0, stat.size) catch |err|
27 return diags.fail("failed to copy range of file {f}: {s}", .{ path, @errorName(err) });
28 if (amt != stat.size)
29 return diags.fail("unexpected short write in copy range of file {f}", .{path});
30 return;
31 }
32
33 for (positionals.items) |link_input| {
34 macho_file.classifyInputFile(link_input) catch |err|
35 diags.addParseError(link_input.path().?, "failed to read input file: {s}", .{@errorName(err)});
36 }
37
38 if (diags.hasErrors()) return error.LinkFailure;
39
40 try macho_file.parseInputFiles();
41
42 if (diags.hasErrors()) return error.LinkFailure;
43
44 try macho_file.resolveSymbols();
45 macho_file.dedupLiterals() catch |err| switch (err) {
46 error.OutOfMemory => return error.OutOfMemory,
47 error.LinkFailure => return error.LinkFailure,
48 else => |e| return diags.fail("failed to update ar size: {s}", .{@errorName(e)}),
49 };
50 markExports(macho_file);
51 claimUnresolved(macho_file);
52 try initOutputSections(macho_file);
53 try macho_file.sortSections();
54 try macho_file.addAtomsToSections();
55 try calcSectionSizes(macho_file);
56
57 try createSegment(macho_file);
58 allocateSections(macho_file) catch |err| switch (err) {
59 error.LinkFailure => return error.LinkFailure,
60 else => |e| return diags.fail("failed to allocate sections: {s}", .{@errorName(e)}),
61 };
62 allocateSegment(macho_file);
63
64 if (build_options.enable_logging) {
65 state_log.debug("{f}", .{macho_file.dumpState()});
66 }
67
68 try writeSections(macho_file);
69 sortRelocs(macho_file);
70 try writeSectionsToFile(macho_file);
71
72 // In order to please Apple ld (and possibly other MachO linkers in the wild),
73 // we will now sanitize segment names of Zig-specific segments.
74 sanitizeZigSections(macho_file);
75
76 const ncmds, const sizeofcmds = try writeLoadCommands(macho_file);
77 try writeHeader(macho_file, ncmds, sizeofcmds);
78}
79
80pub fn flushStaticLib(macho_file: *MachO, comp: *Compilation, module_obj_path: ?Path) link.File.FlushError!void {
81 const gpa = comp.gpa;
82 const diags = &macho_file.base.comp.link_diags;
83
84 var positionals = std.array_list.Managed(link.Input).init(gpa);
85 defer positionals.deinit();
86
87 try positionals.ensureUnusedCapacity(comp.link_inputs.len);
88 positionals.appendSliceAssumeCapacity(comp.link_inputs);
89
90 for (comp.c_object_table.keys()) |key| {
91 try positionals.append(try link.openObjectInput(diags, key.status.success.object_path));
92 }
93
94 if (module_obj_path) |path| try positionals.append(try link.openObjectInput(diags, path));
95
96 if (comp.compiler_rt_strat == .obj) {
97 try positionals.append(try link.openObjectInput(diags, comp.compiler_rt_obj.?.full_object_path));
98 }
99
100 if (comp.ubsan_rt_strat == .obj) {
101 try positionals.append(try link.openObjectInput(diags, comp.ubsan_rt_obj.?.full_object_path));
102 }
103
104 for (positionals.items) |link_input| {
105 macho_file.classifyInputFile(link_input) catch |err|
106 diags.addParseError(link_input.path().?, "failed to read input file: {s}", .{@errorName(err)});
107 }
108
109 if (diags.hasErrors()) return error.LinkFailure;
110
111 try parseInputFilesAr(macho_file);
112
113 if (diags.hasErrors()) return error.LinkFailure;
114
115 // First, we flush relocatable object file generated with our backends.
116 if (macho_file.getZigObject()) |zo| {
117 try zo.resolveSymbols(macho_file);
118 zo.asFile().markExportsRelocatable(macho_file);
119 zo.asFile().claimUnresolvedRelocatable(macho_file);
120 try macho_file.sortSections();
121 try macho_file.addAtomsToSections();
122 try calcSectionSizes(macho_file);
123 try createSegment(macho_file);
124 allocateSections(macho_file) catch |err|
125 return diags.fail("failed to allocate sections: {s}", .{@errorName(err)});
126 allocateSegment(macho_file);
127
128 if (build_options.enable_logging) {
129 state_log.debug("{f}", .{macho_file.dumpState()});
130 }
131
132 try writeSections(macho_file);
133 sortRelocs(macho_file);
134 try writeSectionsToFile(macho_file);
135
136 // In order to please Apple ld (and possibly other MachO linkers in the wild),
137 // we will now sanitize segment names of Zig-specific segments.
138 sanitizeZigSections(macho_file);
139
140 const ncmds, const sizeofcmds = try writeLoadCommands(macho_file);
141 try writeHeader(macho_file, ncmds, sizeofcmds);
142
143 try zo.readFileContents(macho_file);
144 }
145
146 var files = std.array_list.Managed(File.Index).init(gpa);
147 defer files.deinit();
148 try files.ensureTotalCapacityPrecise(macho_file.objects.items.len + 1);
149 if (macho_file.getZigObject()) |zo| files.appendAssumeCapacity(zo.index);
150 for (macho_file.objects.items) |index| files.appendAssumeCapacity(index);
151
152 const format: Archive.Format = .p32;
153 const ptr_width = Archive.ptrWidth(format);
154
155 // Update ar symtab from parsed objects
156 var ar_symtab: Archive.ArSymtab = .{};
157 defer ar_symtab.deinit(gpa);
158
159 for (files.items) |index| {
160 try macho_file.getFile(index).?.updateArSymtab(&ar_symtab, macho_file);
161 }
162
163 ar_symtab.sort();
164
165 // Update sizes of contributing objects
166 for (files.items) |index| {
167 macho_file.getFile(index).?.updateArSize(macho_file) catch |err|
168 return diags.fail("failed to update ar size: {s}", .{@errorName(err)});
169 }
170
171 // Update file offsets of contributing objects
172 const total_size: usize = blk: {
173 var pos: usize = Archive.SARMAG;
174 pos += @sizeOf(Archive.ar_hdr);
175 pos += mem.alignForward(usize, Archive.SYMDEF.len + 1, ptr_width);
176 pos += ar_symtab.size(format);
177
178 for (files.items) |index| {
179 const file = macho_file.getFile(index).?;
180 switch (file) {
181 .zig_object => |zo| {
182 const state = &zo.output_ar_state;
183 pos = mem.alignForward(usize, pos, 2);
184 state.file_off = pos;
185 pos += @sizeOf(Archive.ar_hdr);
186 pos += mem.alignForward(usize, zo.basename.len + 1, ptr_width);
187 pos += try macho_file.cast(usize, state.size);
188 },
189 .object => |o| {
190 const state = &o.output_ar_state;
191 pos = mem.alignForward(usize, pos, 2);
192 state.file_off = pos;
193 pos += @sizeOf(Archive.ar_hdr);
194 pos += mem.alignForward(usize, std.fs.path.basename(o.path).len + 1, ptr_width);
195 pos += try macho_file.cast(usize, state.size);
196 },
197 else => unreachable,
198 }
199 }
200
201 break :blk pos;
202 };
203
204 if (build_options.enable_logging) {
205 state_log.debug("ar_symtab\n{f}\n", .{ar_symtab.fmt(macho_file)});
206 }
207
208 const buffer = try gpa.alloc(u8, total_size);
209 defer gpa.free(buffer);
210 var writer: Writer = .fixed(buffer);
211
212 // Write magic
213 writer.writeAll(Archive.ARMAG) catch unreachable;
214
215 // Write symtab
216 ar_symtab.write(format, macho_file, &writer) catch |err|
217 return diags.fail("failed to write archive symbol table: {t}", .{err});
218
219 // Write object files
220 for (files.items) |index| {
221 const aligned = mem.alignForward(usize, writer.end, 2);
222 const padding = aligned - writer.end;
223 if (padding > 0) {
224 writer.splatByteAll(0, padding) catch unreachable;
225 }
226 macho_file.getFile(index).?.writeAr(format, macho_file, &writer) catch |err|
227 return diags.fail("failed to write archive: {t}", .{err});
228 }
229
230 assert(writer.end == total_size);
231
232 try macho_file.setEndPos(total_size);
233 try macho_file.pwriteAll(writer.buffered(), 0);
234
235 if (diags.hasErrors()) return error.LinkFailure;
236}
237
238fn parseInputFilesAr(macho_file: *MachO) !void {
239 const tracy = trace(@src());
240 defer tracy.end();
241
242 for (macho_file.objects.items) |index| {
243 macho_file.getFile(index).?.parseAr(macho_file) catch |err| switch (err) {
244 error.InvalidMachineType => {}, // already reported
245 else => |e| try macho_file.reportParseError2(index, "unexpected error: parsing input file failed with error {s}", .{@errorName(e)}),
246 };
247 }
248}
249
250fn markExports(macho_file: *MachO) void {
251 if (macho_file.getZigObject()) |zo| {
252 zo.asFile().markExportsRelocatable(macho_file);
253 }
254 for (macho_file.objects.items) |index| {
255 macho_file.getFile(index).?.markExportsRelocatable(macho_file);
256 }
257}
258
259pub fn claimUnresolved(macho_file: *MachO) void {
260 if (macho_file.getZigObject()) |zo| {
261 zo.asFile().claimUnresolvedRelocatable(macho_file);
262 }
263 for (macho_file.objects.items) |index| {
264 macho_file.getFile(index).?.claimUnresolvedRelocatable(macho_file);
265 }
266}
267
268fn initOutputSections(macho_file: *MachO) !void {
269 for (macho_file.objects.items) |index| {
270 const file = macho_file.getFile(index).?;
271 for (file.getAtoms()) |atom_index| {
272 const atom = file.getAtom(atom_index) orelse continue;
273 if (!atom.isAlive()) continue;
274 atom.out_n_sect = try Atom.initOutputSection(atom.getInputSection(macho_file), macho_file);
275 }
276 }
277
278 const needs_unwind_info = for (macho_file.objects.items) |index| {
279 if (macho_file.getFile(index).?.object.hasUnwindRecords()) break true;
280 } else false;
281 if (needs_unwind_info) {
282 macho_file.unwind_info_sect_index = try macho_file.addSection("__LD", "__compact_unwind", .{
283 .flags = macho.S_ATTR_DEBUG,
284 });
285 }
286
287 const needs_eh_frame = for (macho_file.objects.items) |index| {
288 if (macho_file.getFile(index).?.object.hasEhFrameRecords()) break true;
289 } else false;
290 if (needs_eh_frame) {
291 assert(needs_unwind_info);
292 macho_file.eh_frame_sect_index = try macho_file.addSection("__TEXT", "__eh_frame", .{});
293 }
294}
295
296fn calcSectionSizes(macho_file: *MachO) !void {
297 const tracy = trace(@src());
298 defer tracy.end();
299
300 const diags = &macho_file.base.comp.link_diags;
301
302 if (macho_file.getZigObject()) |zo| {
303 // TODO this will create a race as we need to track merging of debug sections which we currently don't
304 zo.calcNumRelocs(macho_file);
305 }
306
307 {
308 for (macho_file.sections.items(.atoms), 0..) |atoms, i| {
309 if (atoms.items.len == 0) continue;
310 calcSectionSizeWorker(macho_file, @as(u8, @intCast(i)));
311 }
312
313 if (macho_file.eh_frame_sect_index) |_| {
314 calcEhFrameSizeWorker(macho_file);
315 }
316
317 if (macho_file.unwind_info_sect_index) |_| {
318 for (macho_file.objects.items) |index| {
319 Object.calcCompactUnwindSizeRelocatable(
320 macho_file.getFile(index).?.object,
321 macho_file,
322 );
323 }
324 }
325
326 for (macho_file.objects.items) |index| {
327 File.calcSymtabSize(macho_file.getFile(index).?, macho_file);
328 }
329 if (macho_file.getZigObject()) |zo| {
330 File.calcSymtabSize(zo.asFile(), macho_file);
331 }
332
333 MachO.updateLinkeditSizeWorker(macho_file, .data_in_code);
334 }
335
336 if (macho_file.unwind_info_sect_index) |_| {
337 calcCompactUnwindSize(macho_file);
338 }
339 try calcSymtabSize(macho_file);
340
341 if (diags.hasErrors()) return error.LinkFailure;
342}
343
344fn calcSectionSizeWorker(macho_file: *MachO, sect_id: u8) void {
345 const tracy = trace(@src());
346 defer tracy.end();
347
348 const slice = macho_file.sections.slice();
349 const header = &slice.items(.header)[sect_id];
350 const atoms = slice.items(.atoms)[sect_id].items;
351 for (atoms) |ref| {
352 const atom = ref.getAtom(macho_file).?;
353 const atom_alignment = atom.alignment.toByteUnits() orelse 1;
354 const offset = mem.alignForward(u64, header.size, atom_alignment);
355 const padding = offset - header.size;
356 atom.value = offset;
357 header.size += padding + atom.size;
358 header.@"align" = @max(header.@"align", atom.alignment.toLog2Units());
359 const nreloc = atom.calcNumRelocs(macho_file);
360 atom.addExtra(.{ .rel_out_index = header.nreloc, .rel_out_count = nreloc }, macho_file);
361 header.nreloc += nreloc;
362 }
363}
364
365fn calcEhFrameSizeWorker(macho_file: *MachO) void {
366 const tracy = trace(@src());
367 defer tracy.end();
368
369 const diags = &macho_file.base.comp.link_diags;
370
371 const doWork = struct {
372 fn doWork(mfile: *MachO, header: *macho.section_64) !void {
373 header.size = try eh_frame.calcSize(mfile);
374 header.@"align" = 3;
375 header.nreloc = eh_frame.calcNumRelocs(mfile);
376 }
377 }.doWork;
378
379 const header = &macho_file.sections.items(.header)[macho_file.eh_frame_sect_index.?];
380 doWork(macho_file, header) catch |err|
381 diags.addError("failed to calculate size of section '__TEXT,__eh_frame': {s}", .{@errorName(err)});
382}
383
384fn calcCompactUnwindSize(macho_file: *MachO) void {
385 const tracy = trace(@src());
386 defer tracy.end();
387
388 var nrec: u32 = 0;
389 var nreloc: u32 = 0;
390
391 for (macho_file.objects.items) |index| {
392 const ctx = &macho_file.getFile(index).?.object.compact_unwind_ctx;
393 ctx.rec_index = nrec;
394 ctx.reloc_index = nreloc;
395 nrec += ctx.rec_count;
396 nreloc += ctx.reloc_count;
397 }
398
399 const sect = &macho_file.sections.items(.header)[macho_file.unwind_info_sect_index.?];
400 sect.size = nrec * @sizeOf(macho.compact_unwind_entry);
401 sect.nreloc = nreloc;
402 sect.@"align" = 3;
403}
404
405fn calcSymtabSize(macho_file: *MachO) error{OutOfMemory}!void {
406 const tracy = trace(@src());
407 defer tracy.end();
408
409 const gpa = macho_file.base.comp.gpa;
410
411 var nlocals: u32 = 0;
412 var nstabs: u32 = 0;
413 var nexports: u32 = 0;
414 var nimports: u32 = 0;
415 var strsize: u32 = 1;
416
417 var objects = try std.array_list.Managed(File.Index).initCapacity(gpa, macho_file.objects.items.len + 1);
418 defer objects.deinit();
419 if (macho_file.getZigObject()) |zo| objects.appendAssumeCapacity(zo.index);
420 objects.appendSliceAssumeCapacity(macho_file.objects.items);
421
422 for (objects.items) |index| {
423 const ctx = switch (macho_file.getFile(index).?) {
424 inline else => |x| &x.output_symtab_ctx,
425 };
426 ctx.ilocal = nlocals;
427 ctx.istab = nstabs;
428 ctx.iexport = nexports;
429 ctx.iimport = nimports;
430 ctx.stroff = strsize;
431 nlocals += ctx.nlocals;
432 nstabs += ctx.nstabs;
433 nexports += ctx.nexports;
434 nimports += ctx.nimports;
435 strsize += ctx.strsize;
436 }
437
438 for (objects.items) |index| {
439 const ctx = switch (macho_file.getFile(index).?) {
440 inline else => |x| &x.output_symtab_ctx,
441 };
442 ctx.istab += nlocals;
443 ctx.iexport += nlocals + nstabs;
444 ctx.iimport += nlocals + nstabs + nexports;
445 }
446
447 {
448 const cmd = &macho_file.symtab_cmd;
449 cmd.nsyms = nlocals + nstabs + nexports + nimports;
450 cmd.strsize = strsize;
451 }
452
453 {
454 const cmd = &macho_file.dysymtab_cmd;
455 cmd.ilocalsym = 0;
456 cmd.nlocalsym = nlocals + nstabs;
457 cmd.iextdefsym = nlocals + nstabs;
458 cmd.nextdefsym = nexports;
459 cmd.iundefsym = nlocals + nstabs + nexports;
460 cmd.nundefsym = nimports;
461 }
462}
463
464fn allocateSections(macho_file: *MachO) !void {
465 const slice = macho_file.sections.slice();
466 for (slice.items(.header)) |*header| {
467 const needed_size = header.size;
468 header.size = 0;
469 const alignment = try macho_file.alignPow(header.@"align");
470 if (!header.isZerofill()) {
471 if (needed_size > macho_file.allocatedSize(header.offset)) {
472 header.offset = try macho_file.cast(u32, try macho_file.findFreeSpace(needed_size, alignment));
473 }
474 }
475 if (needed_size > macho_file.allocatedSizeVirtual(header.addr)) {
476 header.addr = macho_file.findFreeSpaceVirtual(needed_size, alignment);
477 }
478 header.size = needed_size;
479 }
480
481 var fileoff: u32 = 0;
482 for (slice.items(.header)) |header| {
483 fileoff = @max(fileoff, header.offset + @as(u32, @intCast(header.size)));
484 }
485
486 for (slice.items(.header)) |*header| {
487 if (header.nreloc == 0) continue;
488 header.reloff = mem.alignForward(u32, fileoff, @alignOf(macho.relocation_info));
489 fileoff = header.reloff + header.nreloc * @sizeOf(macho.relocation_info);
490 }
491
492 // In -r mode, there is no LINKEDIT segment and so we allocate required LINKEDIT commands
493 // as if they were detached or part of the single segment.
494
495 // DATA_IN_CODE
496 {
497 const cmd = &macho_file.data_in_code_cmd;
498 cmd.dataoff = fileoff;
499 fileoff += cmd.datasize;
500 fileoff = mem.alignForward(u32, fileoff, @alignOf(u64));
501 }
502
503 // SYMTAB
504 {
505 const cmd = &macho_file.symtab_cmd;
506 cmd.symoff = fileoff;
507 fileoff += cmd.nsyms * @sizeOf(macho.nlist_64);
508 fileoff = mem.alignForward(u32, fileoff, @alignOf(u32));
509 cmd.stroff = fileoff;
510 }
511}
512
513/// Renames segment names in Zig sections to standard MachO segment names such as
514/// `__TEXT`, `__DATA_CONST` and `__DATA`.
515/// TODO: I think I may be able to get rid of this if I rework section/segment
516/// allocation mechanism to not rely so much on having `_ZIG` sections always
517/// pushed to the back. For instance, this is not a problem in ELF linker.
518/// Then, we can create sections with the correct name from the start in `MachO.initMetadata`.
519fn sanitizeZigSections(macho_file: *MachO) void {
520 if (macho_file.zig_text_sect_index) |index| {
521 const header = &macho_file.sections.items(.header)[index];
522 header.segname = MachO.makeStaticString("__TEXT");
523 }
524 if (macho_file.zig_const_sect_index) |index| {
525 const header = &macho_file.sections.items(.header)[index];
526 header.segname = MachO.makeStaticString("__DATA_CONST");
527 }
528 if (macho_file.zig_data_sect_index) |index| {
529 const header = &macho_file.sections.items(.header)[index];
530 header.segname = MachO.makeStaticString("__DATA");
531 }
532 if (macho_file.zig_bss_sect_index) |index| {
533 const header = &macho_file.sections.items(.header)[index];
534 header.segname = MachO.makeStaticString("__DATA");
535 }
536}
537
538fn createSegment(macho_file: *MachO) !void {
539 const gpa = macho_file.base.comp.gpa;
540
541 // For relocatable, we only ever need a single segment so create it now.
542 const prot: macho.vm_prot_t = macho.PROT.READ | macho.PROT.WRITE | macho.PROT.EXEC;
543 try macho_file.segments.append(gpa, .{
544 .cmdsize = @sizeOf(macho.segment_command_64),
545 .segname = MachO.makeStaticString(""),
546 .maxprot = prot,
547 .initprot = prot,
548 });
549 const seg = &macho_file.segments.items[0];
550 seg.nsects = @intCast(macho_file.sections.items(.header).len);
551 seg.cmdsize += seg.nsects * @sizeOf(macho.section_64);
552}
553
554fn allocateSegment(macho_file: *MachO) void {
555 // Allocate the single segment.
556 const seg = &macho_file.segments.items[0];
557 var vmaddr: u64 = 0;
558 var fileoff: u64 = load_commands.calcLoadCommandsSizeObject(macho_file) + @sizeOf(macho.mach_header_64);
559 seg.vmaddr = vmaddr;
560 seg.fileoff = fileoff;
561
562 for (macho_file.sections.items(.header)) |header| {
563 vmaddr = @max(vmaddr, header.addr + header.size);
564 if (!header.isZerofill()) {
565 fileoff = @max(fileoff, header.offset + header.size);
566 }
567 }
568
569 seg.vmsize = vmaddr - seg.vmaddr;
570 seg.filesize = fileoff - seg.fileoff;
571}
572
573// We need to sort relocations in descending order to be compatible with Apple's linker.
574fn sortReloc(ctx: void, lhs: macho.relocation_info, rhs: macho.relocation_info) bool {
575 _ = ctx;
576 return lhs.r_address > rhs.r_address;
577}
578
579fn sortRelocs(macho_file: *MachO) void {
580 const tracy = trace(@src());
581 defer tracy.end();
582
583 for (macho_file.sections.items(.relocs)) |*relocs| {
584 mem.sort(macho.relocation_info, relocs.items, {}, sortReloc);
585 }
586}
587
588fn writeSections(macho_file: *MachO) link.File.FlushError!void {
589 const tracy = trace(@src());
590 defer tracy.end();
591
592 const gpa = macho_file.base.comp.gpa;
593 const diags = &macho_file.base.comp.link_diags;
594 const cpu_arch = macho_file.getTarget().cpu.arch;
595 const slice = macho_file.sections.slice();
596 for (slice.items(.header), slice.items(.out), slice.items(.relocs), 0..) |header, *out, *relocs, n_sect| {
597 if (header.isZerofill()) continue;
598 if (!macho_file.isZigSection(@intCast(n_sect))) { // TODO this is wrong; what about debug sections?
599 const size = try macho_file.cast(usize, header.size);
600 try out.resize(gpa, size);
601 const padding_byte: u8 = if (header.isCode() and cpu_arch == .x86_64) 0xcc else 0;
602 @memset(out.items, padding_byte);
603 }
604 try relocs.resize(gpa, header.nreloc);
605 }
606
607 const cmd = macho_file.symtab_cmd;
608 try macho_file.symtab.resize(gpa, cmd.nsyms);
609 try macho_file.strtab.resize(gpa, cmd.strsize);
610 macho_file.strtab.items[0] = 0;
611
612 {
613 for (macho_file.objects.items) |index| {
614 writeAtomsWorker(macho_file, macho_file.getFile(index).?);
615 File.writeSymtab(macho_file.getFile(index).?, macho_file, macho_file);
616 }
617
618 if (macho_file.getZigObject()) |zo| {
619 writeAtomsWorker(macho_file, zo.asFile());
620 File.writeSymtab(zo.asFile(), macho_file, macho_file);
621 }
622
623 if (macho_file.eh_frame_sect_index) |_| {
624 writeEhFrameWorker(macho_file);
625 }
626
627 if (macho_file.unwind_info_sect_index) |_| {
628 for (macho_file.objects.items) |index| {
629 writeCompactUnwindWorker(macho_file, macho_file.getFile(index).?.object);
630 }
631 }
632 }
633
634 if (diags.hasErrors()) return error.LinkFailure;
635
636 if (macho_file.getZigObject()) |zo| {
637 try zo.writeRelocs(macho_file);
638 }
639}
640
641fn writeAtomsWorker(macho_file: *MachO, file: File) void {
642 const tracy = trace(@src());
643 defer tracy.end();
644 file.writeAtomsRelocatable(macho_file) catch |err| {
645 macho_file.reportParseError2(file.getIndex(), "failed to write atoms: {s}", .{
646 @errorName(err),
647 }) catch {};
648 };
649}
650
651fn writeEhFrameWorker(macho_file: *MachO) void {
652 const tracy = trace(@src());
653 defer tracy.end();
654
655 const diags = &macho_file.base.comp.link_diags;
656 const sect_index = macho_file.eh_frame_sect_index.?;
657 const buffer = macho_file.sections.items(.out)[sect_index];
658 const relocs = macho_file.sections.items(.relocs)[sect_index];
659 eh_frame.writeRelocs(macho_file, buffer.items, relocs.items) catch |err|
660 diags.addError("failed to write '__LD,__eh_frame' section: {s}", .{@errorName(err)});
661}
662
663fn writeCompactUnwindWorker(macho_file: *MachO, object: *Object) void {
664 const tracy = trace(@src());
665 defer tracy.end();
666
667 const diags = &macho_file.base.comp.link_diags;
668 object.writeCompactUnwindRelocatable(macho_file) catch |err|
669 diags.addError("failed to write '__LD,__eh_frame' section: {s}", .{@errorName(err)});
670}
671
672fn writeSectionsToFile(macho_file: *MachO) !void {
673 const tracy = trace(@src());
674 defer tracy.end();
675
676 const slice = macho_file.sections.slice();
677 for (slice.items(.header), slice.items(.out), slice.items(.relocs)) |header, out, relocs| {
678 try macho_file.pwriteAll(out.items, header.offset);
679 try macho_file.pwriteAll(@ptrCast(relocs.items), header.reloff);
680 }
681
682 try macho_file.writeDataInCode();
683 try macho_file.pwriteAll(@ptrCast(macho_file.symtab.items), macho_file.symtab_cmd.symoff);
684 try macho_file.pwriteAll(macho_file.strtab.items, macho_file.symtab_cmd.stroff);
685}
686
687fn writeLoadCommands(macho_file: *MachO) error{ LinkFailure, OutOfMemory }!struct { usize, usize } {
688 const gpa = macho_file.base.comp.gpa;
689 const needed_size = load_commands.calcLoadCommandsSizeObject(macho_file);
690 const buffer = try gpa.alloc(u8, needed_size);
691 defer gpa.free(buffer);
692
693 var writer: Writer = .fixed(buffer);
694
695 var ncmds: usize = 0;
696
697 // Segment and section load commands
698 {
699 assert(macho_file.segments.items.len == 1);
700 const seg = macho_file.segments.items[0];
701 writer.writeStruct(seg, .little) catch |err| switch (err) {
702 error.WriteFailed => unreachable,
703 };
704 for (macho_file.sections.items(.header)) |header| {
705 writer.writeStruct(header, .little) catch |err| switch (err) {
706 error.WriteFailed => unreachable,
707 };
708 }
709 ncmds += 1;
710 }
711
712 writer.writeStruct(macho_file.data_in_code_cmd, .little) catch |err| switch (err) {
713 error.WriteFailed => unreachable,
714 };
715 ncmds += 1;
716 writer.writeStruct(macho_file.symtab_cmd, .little) catch |err| switch (err) {
717 error.WriteFailed => unreachable,
718 };
719 ncmds += 1;
720 writer.writeStruct(macho_file.dysymtab_cmd, .little) catch |err| switch (err) {
721 error.WriteFailed => unreachable,
722 };
723 ncmds += 1;
724
725 if (macho_file.platform.isBuildVersionCompatible()) {
726 load_commands.writeBuildVersionLC(macho_file.platform, macho_file.sdk_version, &writer) catch |err| switch (err) {
727 error.WriteFailed => unreachable,
728 };
729 ncmds += 1;
730 } else {
731 load_commands.writeVersionMinLC(macho_file.platform, macho_file.sdk_version, &writer) catch |err| switch (err) {
732 error.WriteFailed => unreachable,
733 };
734 ncmds += 1;
735 }
736
737 assert(writer.end == needed_size);
738
739 try macho_file.pwriteAll(buffer, @sizeOf(macho.mach_header_64));
740
741 return .{ ncmds, buffer.len };
742}
743
744fn writeHeader(macho_file: *MachO, ncmds: usize, sizeofcmds: usize) !void {
745 var header: macho.mach_header_64 = .{};
746 header.filetype = macho.MH_OBJECT;
747
748 const subsections_via_symbols = for (macho_file.objects.items) |index| {
749 const object = macho_file.getFile(index).?.object;
750 if (object.hasSubsections()) break true;
751 } else false;
752 if (subsections_via_symbols) {
753 header.flags |= macho.MH_SUBSECTIONS_VIA_SYMBOLS;
754 }
755
756 switch (macho_file.getTarget().cpu.arch) {
757 .aarch64 => {
758 header.cputype = macho.CPU_TYPE_ARM64;
759 header.cpusubtype = macho.CPU_SUBTYPE_ARM_ALL;
760 },
761 .x86_64 => {
762 header.cputype = macho.CPU_TYPE_X86_64;
763 header.cpusubtype = macho.CPU_SUBTYPE_X86_64_ALL;
764 },
765 else => {},
766 }
767
768 header.ncmds = @intCast(ncmds);
769 header.sizeofcmds = @intCast(sizeofcmds);
770
771 try macho_file.pwriteAll(mem.asBytes(&header), 0);
772}
773
774const std = @import("std");
775const Path = std.Build.Cache.Path;
776const WaitGroup = std.Thread.WaitGroup;
777const assert = std.debug.assert;
778const log = std.log.scoped(.link);
779const macho = std.macho;
780const math = std.math;
781const mem = std.mem;
782const state_log = std.log.scoped(.link_state);
783const Writer = std.Io.Writer;
784
785const Archive = @import("Archive.zig");
786const Atom = @import("Atom.zig");
787const Compilation = @import("../../Compilation.zig");
788const File = @import("file.zig").File;
789const MachO = @import("../MachO.zig");
790const Object = @import("Object.zig");
791const Symbol = @import("Symbol.zig");
792const build_options = @import("build_options");
793const eh_frame = @import("eh_frame.zig");
794const fat = @import("fat.zig");
795const link = @import("../../link.zig");
796const load_commands = @import("load_commands.zig");
797const trace = @import("../../tracy.zig").trace;