master
1pub const Atom = @import("MachO/Atom.zig");
2pub const DebugSymbols = @import("MachO/DebugSymbols.zig");
3pub const Relocation = @import("MachO/Relocation.zig");
4
5base: link.File,
6
7rpath_list: []const []const u8,
8
9/// Debug symbols bundle (or dSym).
10d_sym: ?DebugSymbols = null,
11
12/// A list of all input files.
13/// Index of each input file also encodes the priority or precedence of one input file
14/// over another.
15files: std.MultiArrayList(File.Entry) = .{},
16/// Long-lived list of all file descriptors.
17/// We store them globally rather than per actual File so that we can re-use
18/// one file handle per every object file within an archive.
19file_handles: std.ArrayList(File.Handle) = .empty,
20zig_object: ?File.Index = null,
21internal_object: ?File.Index = null,
22objects: std.ArrayList(File.Index) = .empty,
23dylibs: std.ArrayList(File.Index) = .empty,
24
25segments: std.ArrayList(macho.segment_command_64) = .empty,
26sections: std.MultiArrayList(Section) = .{},
27
28resolver: SymbolResolver = .{},
29/// This table will be populated after `scanRelocs` has run.
30/// Key is symbol index.
31undefs: std.AutoArrayHashMapUnmanaged(SymbolResolver.Index, UndefRefs) = .empty,
32undefs_mutex: std.Thread.Mutex = .{},
33dupes: std.AutoArrayHashMapUnmanaged(SymbolResolver.Index, std.ArrayList(File.Index)) = .empty,
34dupes_mutex: std.Thread.Mutex = .{},
35
36dyld_info_cmd: macho.dyld_info_command = .{},
37symtab_cmd: macho.symtab_command = .{},
38dysymtab_cmd: macho.dysymtab_command = .{},
39function_starts_cmd: macho.linkedit_data_command = .{ .cmd = .FUNCTION_STARTS },
40data_in_code_cmd: macho.linkedit_data_command = .{ .cmd = .DATA_IN_CODE },
41uuid_cmd: macho.uuid_command = .{ .uuid = [_]u8{0} ** 16 },
42codesig_cmd: macho.linkedit_data_command = .{ .cmd = .CODE_SIGNATURE },
43
44pagezero_seg_index: ?u8 = null,
45text_seg_index: ?u8 = null,
46linkedit_seg_index: ?u8 = null,
47text_sect_index: ?u8 = null,
48data_sect_index: ?u8 = null,
49got_sect_index: ?u8 = null,
50stubs_sect_index: ?u8 = null,
51stubs_helper_sect_index: ?u8 = null,
52la_symbol_ptr_sect_index: ?u8 = null,
53tlv_ptr_sect_index: ?u8 = null,
54eh_frame_sect_index: ?u8 = null,
55unwind_info_sect_index: ?u8 = null,
56objc_stubs_sect_index: ?u8 = null,
57
58thunks: std.ArrayList(Thunk) = .empty,
59
60/// Output synthetic sections
61symtab: std.ArrayList(macho.nlist_64) = .empty,
62strtab: std.ArrayList(u8) = .empty,
63indsymtab: Indsymtab = .{},
64got: GotSection = .{},
65stubs: StubsSection = .{},
66stubs_helper: StubsHelperSection = .{},
67objc_stubs: ObjcStubsSection = .{},
68la_symbol_ptr: LaSymbolPtrSection = .{},
69tlv_ptr: TlvPtrSection = .{},
70rebase_section: Rebase = .{},
71bind_section: Bind = .{},
72weak_bind_section: WeakBind = .{},
73lazy_bind_section: LazyBind = .{},
74export_trie: ExportTrie = .{},
75unwind_info: UnwindInfo = .{},
76data_in_code: DataInCode = .{},
77
78/// Tracked loadable segments during incremental linking.
79zig_text_seg_index: ?u8 = null,
80zig_const_seg_index: ?u8 = null,
81zig_data_seg_index: ?u8 = null,
82zig_bss_seg_index: ?u8 = null,
83
84/// Tracked section headers with incremental updates to Zig object.
85zig_text_sect_index: ?u8 = null,
86zig_const_sect_index: ?u8 = null,
87zig_data_sect_index: ?u8 = null,
88zig_bss_sect_index: ?u8 = null,
89
90/// Tracked DWARF section headers that apply only when we emit relocatable.
91/// For executable and loadable images, DWARF is tracked directly by dSYM bundle object.
92debug_info_sect_index: ?u8 = null,
93debug_abbrev_sect_index: ?u8 = null,
94debug_str_sect_index: ?u8 = null,
95debug_aranges_sect_index: ?u8 = null,
96debug_line_sect_index: ?u8 = null,
97debug_line_str_sect_index: ?u8 = null,
98debug_loclists_sect_index: ?u8 = null,
99debug_rnglists_sect_index: ?u8 = null,
100
101has_tlv: AtomicBool = AtomicBool.init(false),
102binds_to_weak: AtomicBool = AtomicBool.init(false),
103weak_defines: AtomicBool = AtomicBool.init(false),
104
105/// Options
106/// SDK layout
107sdk_layout: ?SdkLayout,
108/// Size of the __PAGEZERO segment.
109pagezero_size: ?u64,
110/// Minimum space for future expansion of the load commands.
111headerpad_size: ?u32,
112/// Set enough space as if all paths were MATPATHLEN.
113headerpad_max_install_names: bool,
114/// Remove dylibs that are unreachable by the entry point or exported symbols.
115dead_strip_dylibs: bool,
116/// Treatment of undefined symbols
117undefined_treatment: UndefinedTreatment,
118/// TODO: delete this, libraries need to be resolved by the frontend instead
119lib_directories: []const Directory,
120/// Resolved list of framework search directories
121framework_dirs: []const []const u8,
122/// List of input frameworks
123frameworks: []const Framework,
124/// Install name for the dylib.
125/// TODO: unify with soname
126install_name: ?[]const u8,
127/// Path to entitlements file.
128entitlements: ?[]const u8,
129compatibility_version: ?std.SemanticVersion,
130/// Entry name
131entry_name: ?[]const u8,
132platform: Platform,
133sdk_version: ?std.SemanticVersion,
134/// When set to true, the linker will hoist all dylibs including system dependent dylibs.
135no_implicit_dylibs: bool = false,
136/// Whether the linker should parse and always force load objects containing ObjC in archives.
137// TODO: in Zig we currently take -ObjC as always on
138force_load_objc: bool = true,
139/// Whether local symbols should be discarded from the symbol table.
140discard_local_symbols: bool = false,
141
142/// Hot-code swapping state.
143hot_state: if (is_hot_update_compatible) HotUpdateState else struct {} = .{},
144
145/// When adding a new field, remember to update `hashAddFrameworks`.
146pub const Framework = struct {
147 needed: bool = false,
148 weak: bool = false,
149 path: Path,
150};
151
152pub fn hashAddFrameworks(man: *Cache.Manifest, hm: []const Framework) !void {
153 for (hm) |value| {
154 man.hash.add(value.needed);
155 man.hash.add(value.weak);
156 _ = try man.addFilePath(value.path, null);
157 }
158}
159
160pub fn createEmpty(
161 arena: Allocator,
162 comp: *Compilation,
163 emit: Path,
164 options: link.File.OpenOptions,
165) !*MachO {
166 const target = &comp.root_mod.resolved_target.result;
167 assert(target.ofmt == .macho);
168
169 const gpa = comp.gpa;
170 const use_llvm = comp.config.use_llvm;
171 const opt_zcu = comp.zcu;
172 const optimize_mode = comp.root_mod.optimize_mode;
173 const output_mode = comp.config.output_mode;
174 const link_mode = comp.config.link_mode;
175
176 const allow_shlib_undefined = options.allow_shlib_undefined orelse false;
177
178 const self = try arena.create(MachO);
179 self.* = .{
180 .base = .{
181 .tag = .macho,
182 .comp = comp,
183 .emit = emit,
184 .zcu_object_basename = if (use_llvm)
185 try std.fmt.allocPrint(arena, "{s}_zcu.o", .{fs.path.stem(emit.sub_path)})
186 else
187 null,
188 .gc_sections = options.gc_sections orelse (optimize_mode != .Debug),
189 .print_gc_sections = options.print_gc_sections,
190 .stack_size = options.stack_size orelse 16777216,
191 .allow_shlib_undefined = allow_shlib_undefined,
192 .file = null,
193 .build_id = options.build_id,
194 },
195 .rpath_list = options.rpath_list,
196 .pagezero_size = options.pagezero_size,
197 .headerpad_size = options.headerpad_size,
198 .headerpad_max_install_names = options.headerpad_max_install_names,
199 .dead_strip_dylibs = options.dead_strip_dylibs,
200 .sdk_layout = options.darwin_sdk_layout,
201 .frameworks = options.frameworks,
202 .install_name = options.install_name,
203 .entitlements = options.entitlements,
204 .compatibility_version = options.compatibility_version,
205 .entry_name = switch (options.entry) {
206 .disabled => null,
207 .default => if (output_mode != .Exe) null else default_entry_symbol_name,
208 .enabled => default_entry_symbol_name,
209 .named => |name| name,
210 },
211 .platform = Platform.fromTarget(target),
212 .sdk_version = if (options.darwin_sdk_layout) |layout| inferSdkVersion(comp, layout) else null,
213 .undefined_treatment = if (allow_shlib_undefined) .dynamic_lookup else .@"error",
214 // TODO delete this, directories must instead be resolved by the frontend
215 .lib_directories = options.lib_directories,
216 .framework_dirs = options.framework_dirs,
217 .force_load_objc = options.force_load_objc,
218 .discard_local_symbols = options.discard_local_symbols,
219 };
220 errdefer self.base.destroy();
221
222 self.base.file = try emit.root_dir.handle.createFile(emit.sub_path, .{
223 .truncate = true,
224 .read = true,
225 .mode = link.File.determineMode(output_mode, link_mode),
226 });
227
228 // Append null file
229 try self.files.append(gpa, .null);
230 // Append empty string to string tables
231 try self.strtab.append(gpa, 0);
232
233 if (opt_zcu) |zcu| {
234 if (!use_llvm) {
235 const index: File.Index = @intCast(try self.files.addOne(gpa));
236 self.files.set(index, .{ .zig_object = .{
237 .index = index,
238 .basename = try std.fmt.allocPrint(arena, "{s}.o", .{
239 fs.path.stem(zcu.main_mod.root_src_path),
240 }),
241 } });
242 self.zig_object = index;
243 const zo = self.getZigObject().?;
244 try zo.init(self);
245
246 try self.initMetadata(.{
247 .emit = emit,
248 .zo = zo,
249 .symbol_count_hint = options.symbol_count_hint,
250 .program_code_size_hint = options.program_code_size_hint,
251 });
252 }
253 }
254
255 return self;
256}
257
258pub fn open(
259 arena: Allocator,
260 comp: *Compilation,
261 emit: Path,
262 options: link.File.OpenOptions,
263) !*MachO {
264 // TODO: restore saved linker state, don't truncate the file, and
265 // participate in incremental compilation.
266 return createEmpty(arena, comp, emit, options);
267}
268
269pub fn deinit(self: *MachO) void {
270 const gpa = self.base.comp.gpa;
271
272 if (self.d_sym) |*d_sym| {
273 d_sym.deinit();
274 }
275
276 for (self.file_handles.items) |handle| {
277 handle.close();
278 }
279 self.file_handles.deinit(gpa);
280
281 for (self.files.items(.tags), self.files.items(.data)) |tag, *data| switch (tag) {
282 .null => {},
283 .zig_object => data.zig_object.deinit(gpa),
284 .internal => data.internal.deinit(gpa),
285 .object => data.object.deinit(gpa),
286 .dylib => data.dylib.deinit(gpa),
287 };
288 self.files.deinit(gpa);
289 self.objects.deinit(gpa);
290 self.dylibs.deinit(gpa);
291
292 self.segments.deinit(gpa);
293 for (
294 self.sections.items(.atoms),
295 self.sections.items(.out),
296 self.sections.items(.thunks),
297 self.sections.items(.relocs),
298 ) |*atoms, *out, *thnks, *relocs| {
299 atoms.deinit(gpa);
300 out.deinit(gpa);
301 thnks.deinit(gpa);
302 relocs.deinit(gpa);
303 }
304 self.sections.deinit(gpa);
305
306 self.resolver.deinit(gpa);
307
308 for (self.undefs.values()) |*val| {
309 val.deinit(gpa);
310 }
311 self.undefs.deinit(gpa);
312 for (self.dupes.values()) |*val| {
313 val.deinit(gpa);
314 }
315 self.dupes.deinit(gpa);
316
317 self.symtab.deinit(gpa);
318 self.strtab.deinit(gpa);
319 self.got.deinit(gpa);
320 self.stubs.deinit(gpa);
321 self.objc_stubs.deinit(gpa);
322 self.tlv_ptr.deinit(gpa);
323 self.rebase_section.deinit(gpa);
324 self.bind_section.deinit(gpa);
325 self.weak_bind_section.deinit(gpa);
326 self.lazy_bind_section.deinit(gpa);
327 self.export_trie.deinit(gpa);
328 self.unwind_info.deinit(gpa);
329 self.data_in_code.deinit(gpa);
330
331 for (self.thunks.items) |*thunk| thunk.deinit(gpa);
332 self.thunks.deinit(gpa);
333}
334
335pub fn flush(
336 self: *MachO,
337 arena: Allocator,
338 tid: Zcu.PerThread.Id,
339 prog_node: std.Progress.Node,
340) link.File.FlushError!void {
341 const tracy = trace(@src());
342 defer tracy.end();
343
344 const comp = self.base.comp;
345 const gpa = comp.gpa;
346 const diags = &self.base.comp.link_diags;
347
348 const sub_prog_node = prog_node.start("MachO Flush", 0);
349 defer sub_prog_node.end();
350
351 const zcu_obj_path: ?Path = if (self.base.zcu_object_basename) |raw| p: {
352 break :p try comp.resolveEmitPathFlush(arena, .temp, raw);
353 } else null;
354
355 // --verbose-link
356 if (comp.verbose_link) try self.dumpArgv(comp);
357
358 if (self.getZigObject()) |zo| try zo.flush(self, tid);
359 if (self.base.isStaticLib()) return relocatable.flushStaticLib(self, comp, zcu_obj_path);
360 if (self.base.isObject()) return relocatable.flushObject(self, comp, zcu_obj_path);
361
362 var positionals = std.array_list.Managed(link.Input).init(gpa);
363 defer positionals.deinit();
364
365 try positionals.ensureUnusedCapacity(comp.link_inputs.len);
366
367 for (comp.link_inputs) |link_input| switch (link_input) {
368 .dso => continue, // handled below
369 .object, .archive => positionals.appendAssumeCapacity(link_input),
370 .dso_exact => @panic("TODO"),
371 .res => unreachable,
372 };
373
374 // This is a set of object files emitted by clang in a single `build-exe` invocation.
375 // For instance, the implicit `a.o` as compiled by `zig build-exe a.c` will end up
376 // in this set.
377 try positionals.ensureUnusedCapacity(comp.c_object_table.keys().len);
378 for (comp.c_object_table.keys()) |key| {
379 positionals.appendAssumeCapacity(try link.openObjectInput(diags, key.status.success.object_path));
380 }
381
382 if (zcu_obj_path) |path| try positionals.append(try link.openObjectInput(diags, path));
383
384 if (comp.config.any_sanitize_thread) {
385 try positionals.append(try link.openObjectInput(diags, comp.tsan_lib.?.full_object_path));
386 }
387
388 if (comp.config.any_fuzz) {
389 try positionals.append(try link.openArchiveInput(diags, comp.fuzzer_lib.?.full_object_path, false, false));
390 }
391
392 if (comp.ubsan_rt_lib) |crt_file| {
393 const path = crt_file.full_object_path;
394 self.classifyInputFile(try link.openArchiveInput(diags, path, false, false)) catch |err|
395 diags.addParseError(path, "failed to parse archive: {s}", .{@errorName(err)});
396 } else if (comp.ubsan_rt_obj) |crt_file| {
397 const path = crt_file.full_object_path;
398 self.classifyInputFile(try link.openObjectInput(diags, path)) catch |err|
399 diags.addParseError(path, "failed to parse archive: {s}", .{@errorName(err)});
400 }
401
402 for (positionals.items) |link_input| {
403 self.classifyInputFile(link_input) catch |err|
404 diags.addParseError(link_input.path().?, "failed to read input file: {s}", .{@errorName(err)});
405 }
406
407 var system_libs = std.array_list.Managed(SystemLib).init(gpa);
408 defer system_libs.deinit();
409
410 // frameworks
411 try system_libs.ensureUnusedCapacity(self.frameworks.len);
412 for (self.frameworks) |info| {
413 system_libs.appendAssumeCapacity(.{
414 .needed = info.needed,
415 .weak = info.weak,
416 .path = info.path,
417 });
418 }
419
420 // libc++ dep
421 if (comp.config.link_libcpp) {
422 try system_libs.ensureUnusedCapacity(2);
423 system_libs.appendAssumeCapacity(.{ .path = comp.libcxxabi_static_lib.?.full_object_path });
424 system_libs.appendAssumeCapacity(.{ .path = comp.libcxx_static_lib.?.full_object_path });
425 }
426
427 const is_exe_or_dyn_lib = comp.config.output_mode == .Exe or
428 (comp.config.output_mode == .Lib and comp.config.link_mode == .dynamic);
429
430 if (comp.config.link_libc and is_exe_or_dyn_lib) {
431 if (comp.zigc_static_lib) |zigc| {
432 const path = zigc.full_object_path;
433 self.classifyInputFile(try link.openArchiveInput(diags, path, false, false)) catch |err|
434 diags.addParseError(path, "failed to parse archive: {s}", .{@errorName(err)});
435 }
436 }
437
438 // libc/libSystem dep
439 self.resolveLibSystem(arena, comp, &system_libs) catch |err| switch (err) {
440 error.MissingLibSystem => {}, // already reported
441 else => |e| return diags.fail("failed to resolve libSystem: {s}", .{@errorName(e)}),
442 };
443
444 for (comp.link_inputs) |link_input| switch (link_input) {
445 .object, .archive, .dso_exact => continue,
446 .res => unreachable,
447 .dso => {
448 self.classifyInputFile(link_input) catch |err|
449 diags.addParseError(link_input.path().?, "failed to parse input file: {s}", .{@errorName(err)});
450 },
451 };
452
453 for (system_libs.items) |lib| {
454 switch (Compilation.classifyFileExt(lib.path.sub_path)) {
455 .shared_library => {
456 const dso_input = try link.openDsoInput(diags, lib.path, lib.needed, lib.weak, lib.reexport);
457 self.classifyInputFile(dso_input) catch |err|
458 diags.addParseError(lib.path, "failed to parse input file: {s}", .{@errorName(err)});
459 },
460 .static_library => {
461 const archive_input = try link.openArchiveInput(diags, lib.path, lib.must_link, lib.hidden);
462 self.classifyInputFile(archive_input) catch |err|
463 diags.addParseError(lib.path, "failed to parse input file: {s}", .{@errorName(err)});
464 },
465 else => unreachable,
466 }
467 }
468
469 // Finally, link against compiler_rt.
470 if (comp.compiler_rt_lib) |crt_file| {
471 const path = crt_file.full_object_path;
472 self.classifyInputFile(try link.openArchiveInput(diags, path, false, false)) catch |err|
473 diags.addParseError(path, "failed to parse archive: {s}", .{@errorName(err)});
474 } else if (comp.compiler_rt_obj) |crt_file| {
475 const path = crt_file.full_object_path;
476 self.classifyInputFile(try link.openObjectInput(diags, path)) catch |err|
477 diags.addParseError(path, "failed to parse archive: {s}", .{@errorName(err)});
478 }
479
480 try self.parseInputFiles();
481 self.parseDependentDylibs() catch |err| {
482 switch (err) {
483 error.MissingLibraryDependencies => {},
484 else => |e| return diags.fail("failed to parse dependent libraries: {s}", .{@errorName(e)}),
485 }
486 };
487
488 if (diags.hasErrors()) return error.LinkFailure;
489
490 {
491 const index = @as(File.Index, @intCast(try self.files.addOne(gpa)));
492 self.files.set(index, .{ .internal = .{ .index = index } });
493 self.internal_object = index;
494 const object = self.getInternalObject().?;
495 try object.init(gpa);
496 try object.initSymbols(self);
497 }
498
499 try self.resolveSymbols();
500 try self.convertTentativeDefsAndResolveSpecialSymbols();
501 self.dedupLiterals() catch |err| switch (err) {
502 error.LinkFailure => return error.LinkFailure,
503 else => |e| return diags.fail("failed to deduplicate literals: {s}", .{@errorName(e)}),
504 };
505
506 if (self.base.gc_sections) {
507 try dead_strip.gcAtoms(self);
508 }
509
510 self.checkDuplicates() catch |err| switch (err) {
511 error.HasDuplicates => return error.LinkFailure,
512 else => |e| return diags.fail("failed to check for duplicate symbol definitions: {s}", .{@errorName(e)}),
513 };
514
515 self.markImportsAndExports();
516 self.deadStripDylibs();
517
518 for (self.dylibs.items, 1..) |index, ord| {
519 const dylib = self.getFile(index).?.dylib;
520 dylib.ordinal = @intCast(ord);
521 }
522
523 self.claimUnresolved();
524
525 self.scanRelocs() catch |err| switch (err) {
526 error.HasUndefinedSymbols => return error.LinkFailure,
527 else => |e| return diags.fail("failed to scan relocations: {s}", .{@errorName(e)}),
528 };
529
530 try self.initOutputSections();
531 try self.initSyntheticSections();
532 try self.sortSections();
533 try self.addAtomsToSections();
534 try self.calcSectionSizes();
535
536 try self.generateUnwindInfo();
537
538 try self.initSegments();
539 self.allocateSections() catch |err| switch (err) {
540 error.LinkFailure => return error.LinkFailure,
541 else => |e| return diags.fail("failed to allocate sections: {s}", .{@errorName(e)}),
542 };
543 self.allocateSegments();
544 self.allocateSyntheticSymbols();
545
546 if (build_options.enable_logging) {
547 state_log.debug("{f}", .{self.dumpState()});
548 }
549
550 // Beyond this point, everything has been allocated a virtual address and we can resolve
551 // the relocations, and commit objects to file.
552 try self.resizeSections();
553
554 if (self.getZigObject()) |zo| {
555 zo.resolveRelocs(self) catch |err| switch (err) {
556 error.ResolveFailed => return error.LinkFailure,
557 else => |e| return e,
558 };
559 }
560 try self.writeSectionsAndUpdateLinkeditSizes();
561
562 try self.writeSectionsToFile();
563 try self.allocateLinkeditSegment();
564 self.writeLinkeditSectionsToFile() catch |err| switch (err) {
565 error.OutOfMemory => return error.OutOfMemory,
566 error.LinkFailure => return error.LinkFailure,
567 else => |e| return diags.fail("failed to write linkedit sections to file: {s}", .{@errorName(e)}),
568 };
569
570 var codesig: ?CodeSignature = if (self.requiresCodeSig()) blk: {
571 // Preallocate space for the code signature.
572 // We need to do this at this stage so that we have the load commands with proper values
573 // written out to the file.
574 // The most important here is to have the correct vm and filesize of the __LINKEDIT segment
575 // where the code signature goes into.
576 var codesig = CodeSignature.init(self.getPageSize());
577 codesig.code_directory.ident = fs.path.basename(self.base.emit.sub_path);
578 if (self.entitlements) |path| codesig.addEntitlements(gpa, path) catch |err|
579 return diags.fail("failed to add entitlements from {s}: {s}", .{ path, @errorName(err) });
580 try self.writeCodeSignaturePadding(&codesig);
581 break :blk codesig;
582 } else null;
583 defer if (codesig) |*csig| csig.deinit(gpa);
584
585 self.getLinkeditSegment().vmsize = mem.alignForward(
586 u64,
587 self.getLinkeditSegment().filesize,
588 self.getPageSize(),
589 );
590
591 const ncmds, const sizeofcmds, const uuid_cmd_offset = self.writeLoadCommands() catch |err| switch (err) {
592 error.WriteFailed => unreachable,
593 error.OutOfMemory => return error.OutOfMemory,
594 error.LinkFailure => return error.LinkFailure,
595 };
596 try self.writeHeader(ncmds, sizeofcmds);
597 self.writeUuid(uuid_cmd_offset, self.requiresCodeSig()) catch |err| switch (err) {
598 error.OutOfMemory => return error.OutOfMemory,
599 error.LinkFailure => return error.LinkFailure,
600 else => |e| return diags.fail("failed to calculate and write uuid: {s}", .{@errorName(e)}),
601 };
602 if (self.getDebugSymbols()) |dsym| dsym.flush(self) catch |err| switch (err) {
603 error.OutOfMemory => return error.OutOfMemory,
604 else => |e| return diags.fail("failed to get debug symbols: {s}", .{@errorName(e)}),
605 };
606
607 // Code signing always comes last.
608 if (codesig) |*csig| {
609 self.writeCodeSignature(csig) catch |err| switch (err) {
610 error.OutOfMemory => return error.OutOfMemory,
611 error.LinkFailure => return error.LinkFailure,
612 else => |e| return diags.fail("failed to write code signature: {s}", .{@errorName(e)}),
613 };
614 const emit = self.base.emit;
615 invalidateKernelCache(emit.root_dir.handle, emit.sub_path) catch |err| switch (err) {
616 else => |e| return diags.fail("failed to invalidate kernel cache: {s}", .{@errorName(e)}),
617 };
618 }
619}
620
621/// --verbose-link output
622fn dumpArgv(self: *MachO, comp: *Compilation) !void {
623 const gpa = self.base.comp.gpa;
624 var arena_allocator = std.heap.ArenaAllocator.init(gpa);
625 defer arena_allocator.deinit();
626 const arena = arena_allocator.allocator();
627
628 const directory = self.base.emit.root_dir;
629 const full_out_path = try directory.join(arena, &[_][]const u8{self.base.emit.sub_path});
630 const zcu_obj_path: ?[]const u8 = if (self.base.zcu_object_basename) |raw| p: {
631 const p = try comp.resolveEmitPathFlush(arena, .temp, raw);
632 break :p try p.toString(arena);
633 } else null;
634
635 var argv = std.array_list.Managed([]const u8).init(arena);
636
637 try argv.append("zig");
638
639 if (self.base.isStaticLib()) {
640 try argv.append("ar");
641 } else {
642 try argv.append("ld");
643 }
644
645 if (self.base.isObject()) {
646 try argv.append("-r");
647 }
648
649 if (self.base.isRelocatable()) {
650 for (comp.link_inputs) |link_input| switch (link_input) {
651 .object, .archive => |obj| try argv.append(try obj.path.toString(arena)),
652 .res => |res| try argv.append(try res.path.toString(arena)),
653 .dso => |dso| try argv.append(try dso.path.toString(arena)),
654 .dso_exact => |dso_exact| try argv.appendSlice(&.{ "-l", dso_exact.name }),
655 };
656
657 for (comp.c_object_table.keys()) |key| {
658 try argv.append(try key.status.success.object_path.toString(arena));
659 }
660
661 if (zcu_obj_path) |p| {
662 try argv.append(p);
663 }
664 } else {
665 if (!self.base.isStatic()) {
666 try argv.append("-dynamic");
667 }
668
669 if (self.base.isDynLib()) {
670 try argv.append("-dylib");
671
672 if (self.install_name) |install_name| {
673 try argv.append("-install_name");
674 try argv.append(install_name);
675 }
676 }
677
678 try argv.append("-platform_version");
679 try argv.append(@tagName(self.platform.os_tag));
680 try argv.append(try std.fmt.allocPrint(arena, "{f}", .{self.platform.version}));
681
682 if (self.sdk_version) |ver| {
683 try argv.append(try std.fmt.allocPrint(arena, "{d}.{d}", .{ ver.major, ver.minor }));
684 } else {
685 try argv.append(try std.fmt.allocPrint(arena, "{f}", .{self.platform.version}));
686 }
687
688 if (comp.sysroot) |syslibroot| {
689 try argv.append("-syslibroot");
690 try argv.append(syslibroot);
691 }
692
693 for (self.rpath_list) |rpath| {
694 try argv.appendSlice(&.{ "-rpath", rpath });
695 }
696
697 if (self.pagezero_size) |size| {
698 try argv.append("-pagezero_size");
699 try argv.append(try std.fmt.allocPrint(arena, "0x{x}", .{size}));
700 }
701
702 if (self.headerpad_size) |size| {
703 try argv.append("-headerpad_size");
704 try argv.append(try std.fmt.allocPrint(arena, "0x{x}", .{size}));
705 }
706
707 if (self.headerpad_max_install_names) {
708 try argv.append("-headerpad_max_install_names");
709 }
710
711 if (self.base.gc_sections) {
712 try argv.append("-dead_strip");
713 }
714
715 if (self.dead_strip_dylibs) {
716 try argv.append("-dead_strip_dylibs");
717 }
718
719 if (self.force_load_objc) {
720 try argv.append("-ObjC");
721 }
722
723 if (self.discard_local_symbols) {
724 try argv.append("-x");
725 }
726
727 if (self.entry_name) |entry_name| {
728 try argv.appendSlice(&.{ "-e", entry_name });
729 }
730
731 try argv.append("-o");
732 try argv.append(full_out_path);
733
734 if (self.base.isDynLib() and self.base.allow_shlib_undefined) {
735 try argv.append("-undefined");
736 try argv.append("dynamic_lookup");
737 }
738
739 for (comp.link_inputs) |link_input| switch (link_input) {
740 .dso => continue, // handled below
741 .res => unreachable, // windows only
742 .object, .archive => |obj| {
743 if (obj.must_link) try argv.append("-force_load"); // TODO: verify this
744 try argv.append(try obj.path.toString(arena));
745 },
746 .dso_exact => |dso_exact| try argv.appendSlice(&.{ "-l", dso_exact.name }),
747 };
748
749 for (comp.c_object_table.keys()) |key| {
750 try argv.append(try key.status.success.object_path.toString(arena));
751 }
752
753 if (zcu_obj_path) |p| {
754 try argv.append(p);
755 }
756
757 if (comp.config.any_sanitize_thread) {
758 const path = try comp.tsan_lib.?.full_object_path.toString(arena);
759 try argv.appendSlice(&.{ path, "-rpath", std.fs.path.dirname(path) orelse "." });
760 }
761
762 if (comp.config.any_fuzz) {
763 try argv.append(try comp.fuzzer_lib.?.full_object_path.toString(arena));
764 }
765
766 for (self.lib_directories) |lib_directory| {
767 // TODO delete this, directories must instead be resolved by the frontend
768 const arg = try std.fmt.allocPrint(arena, "-L{s}", .{lib_directory.path orelse "."});
769 try argv.append(arg);
770 }
771
772 for (comp.link_inputs) |link_input| switch (link_input) {
773 .object, .archive, .dso_exact => continue, // handled above
774 .res => unreachable, // windows only
775 .dso => |dso| {
776 if (dso.needed) {
777 try argv.appendSlice(&.{ "-needed-l", try dso.path.toString(arena) });
778 } else if (dso.weak) {
779 try argv.appendSlice(&.{ "-weak-l", try dso.path.toString(arena) });
780 } else {
781 try argv.appendSlice(&.{ "-l", try dso.path.toString(arena) });
782 }
783 },
784 };
785
786 for (self.framework_dirs) |f_dir| {
787 try argv.append("-F");
788 try argv.append(f_dir);
789 }
790
791 for (self.frameworks) |framework| {
792 const name = framework.path.stem();
793 const arg = if (framework.needed)
794 try std.fmt.allocPrint(arena, "-needed_framework {s}", .{name})
795 else if (framework.weak)
796 try std.fmt.allocPrint(arena, "-weak_framework {s}", .{name})
797 else
798 try std.fmt.allocPrint(arena, "-framework {s}", .{name});
799 try argv.append(arg);
800 }
801
802 if (comp.config.link_libcpp) {
803 try argv.appendSlice(&.{
804 try comp.libcxxabi_static_lib.?.full_object_path.toString(arena),
805 try comp.libcxx_static_lib.?.full_object_path.toString(arena),
806 });
807 }
808
809 try argv.append("-lSystem");
810
811 if (comp.zigc_static_lib) |lib| try argv.append(try lib.full_object_path.toString(arena));
812 if (comp.compiler_rt_lib) |lib| try argv.append(try lib.full_object_path.toString(arena));
813 if (comp.compiler_rt_obj) |obj| try argv.append(try obj.full_object_path.toString(arena));
814 if (comp.ubsan_rt_lib) |lib| try argv.append(try lib.full_object_path.toString(arena));
815 if (comp.ubsan_rt_obj) |obj| try argv.append(try obj.full_object_path.toString(arena));
816 }
817
818 Compilation.dump_argv(argv.items);
819}
820
821/// TODO delete this, libsystem must be resolved when setting up the compilation pipeline
822pub fn resolveLibSystem(
823 self: *MachO,
824 arena: Allocator,
825 comp: *Compilation,
826 out_libs: anytype,
827) !void {
828 const diags = &self.base.comp.link_diags;
829
830 var test_path = std.array_list.Managed(u8).init(arena);
831 var checked_paths = std.array_list.Managed([]const u8).init(arena);
832
833 success: {
834 if (self.sdk_layout) |sdk_layout| switch (sdk_layout) {
835 .sdk => {
836 const dir = try fs.path.join(arena, &.{ comp.sysroot.?, "usr", "lib" });
837 if (try accessLibPath(arena, &test_path, &checked_paths, dir, "System")) break :success;
838 },
839 .vendored => {
840 const dir = try comp.dirs.zig_lib.join(arena, &.{ "libc", "darwin" });
841 if (try accessLibPath(arena, &test_path, &checked_paths, dir, "System")) break :success;
842 },
843 };
844
845 for (self.lib_directories) |directory| {
846 if (try accessLibPath(arena, &test_path, &checked_paths, directory.path orelse ".", "System")) break :success;
847 }
848
849 diags.addMissingLibraryError(checked_paths.items, "unable to find libSystem system library", .{});
850 return error.MissingLibSystem;
851 }
852
853 const libsystem_path = Path.initCwd(try arena.dupe(u8, test_path.items));
854 try out_libs.append(.{
855 .needed = true,
856 .path = libsystem_path,
857 });
858}
859
860pub fn classifyInputFile(self: *MachO, input: link.Input) !void {
861 const tracy = trace(@src());
862 defer tracy.end();
863
864 const path, const file = input.pathAndFile().?;
865 // TODO don't classify now, it's too late. The input file has already been classified
866 log.debug("classifying input file {f}", .{path});
867
868 const fh = try self.addFileHandle(file);
869 var buffer: [Archive.SARMAG]u8 = undefined;
870
871 const fat_arch: ?fat.Arch = try self.parseFatFile(file, path);
872 const offset = if (fat_arch) |fa| fa.offset else 0;
873
874 if (readMachHeader(file, offset) catch null) |h| blk: {
875 if (h.magic != macho.MH_MAGIC_64) break :blk;
876 switch (h.filetype) {
877 macho.MH_OBJECT => try self.addObject(path, fh, offset),
878 macho.MH_DYLIB => _ = try self.addDylib(.fromLinkInput(input), true, fh, offset),
879 else => return error.UnknownFileType,
880 }
881 return;
882 }
883 if (readArMagic(file, offset, &buffer) catch null) |ar_magic| blk: {
884 if (!mem.eql(u8, ar_magic, Archive.ARMAG)) break :blk;
885 try self.addArchive(input.archive, fh, fat_arch);
886 return;
887 }
888 _ = try self.addTbd(.fromLinkInput(input), true, fh);
889}
890
891fn parseFatFile(self: *MachO, file: std.fs.File, path: Path) !?fat.Arch {
892 const diags = &self.base.comp.link_diags;
893 const fat_h = fat.readFatHeader(file) catch return null;
894 if (fat_h.magic != macho.FAT_MAGIC and fat_h.magic != macho.FAT_MAGIC_64) return null;
895 var fat_archs_buffer: [2]fat.Arch = undefined;
896 const fat_archs = try fat.parseArchs(file, fat_h, &fat_archs_buffer);
897 const cpu_arch = self.getTarget().cpu.arch;
898 for (fat_archs) |arch| {
899 if (arch.tag == cpu_arch) return arch;
900 }
901 return diags.failParse(path, "missing arch in universal file: expected {s}", .{@tagName(cpu_arch)});
902}
903
904pub fn readMachHeader(file: std.fs.File, offset: usize) !macho.mach_header_64 {
905 var buffer: [@sizeOf(macho.mach_header_64)]u8 = undefined;
906 const nread = try file.preadAll(&buffer, offset);
907 if (nread != buffer.len) return error.InputOutput;
908 const hdr = @as(*align(1) const macho.mach_header_64, @ptrCast(&buffer)).*;
909 return hdr;
910}
911
912pub fn readArMagic(file: std.fs.File, offset: usize, buffer: *[Archive.SARMAG]u8) ![]const u8 {
913 const nread = try file.preadAll(buffer, offset);
914 if (nread != buffer.len) return error.InputOutput;
915 return buffer[0..Archive.SARMAG];
916}
917
918fn addObject(self: *MachO, path: Path, handle_index: File.HandleIndex, offset: u64) !void {
919 const tracy = trace(@src());
920 defer tracy.end();
921
922 const comp = self.base.comp;
923 const gpa = comp.gpa;
924
925 const abs_path = try std.fs.path.resolvePosix(gpa, &.{
926 comp.dirs.cwd,
927 path.root_dir.path orelse ".",
928 path.sub_path,
929 });
930 errdefer gpa.free(abs_path);
931
932 const file = self.getFileHandle(handle_index);
933 const stat = try file.stat();
934 const mtime = stat.mtime.toSeconds();
935 const index: File.Index = @intCast(try self.files.addOne(gpa));
936 self.files.set(index, .{ .object = .{
937 .offset = offset,
938 .path = abs_path,
939 .file_handle = handle_index,
940 .mtime = @intCast(mtime),
941 .index = index,
942 } });
943 try self.objects.append(gpa, index);
944}
945
946pub fn parseInputFiles(self: *MachO) !void {
947 const tracy = trace(@src());
948 defer tracy.end();
949
950 const diags = &self.base.comp.link_diags;
951
952 {
953 for (self.objects.items) |index| {
954 parseInputFileWorker(self, self.getFile(index).?);
955 }
956 for (self.dylibs.items) |index| {
957 parseInputFileWorker(self, self.getFile(index).?);
958 }
959 }
960
961 if (diags.hasErrors()) return error.LinkFailure;
962}
963
964fn parseInputFileWorker(self: *MachO, file: File) void {
965 file.parse(self) catch |err| {
966 switch (err) {
967 error.MalformedObject,
968 error.MalformedDylib,
969 error.MalformedTbd,
970 error.InvalidMachineType,
971 error.InvalidTarget,
972 => {}, // already reported
973
974 else => |e| self.reportParseError2(file.getIndex(), "unexpected error: parsing input file failed with error {s}", .{@errorName(e)}) catch {},
975 }
976 };
977}
978
979fn addArchive(self: *MachO, lib: link.Input.Object, handle: File.HandleIndex, fat_arch: ?fat.Arch) !void {
980 const tracy = trace(@src());
981 defer tracy.end();
982
983 const gpa = self.base.comp.gpa;
984
985 var archive: Archive = .{};
986 defer archive.deinit(gpa);
987 try archive.unpack(self, lib.path, handle, fat_arch);
988
989 for (archive.objects.items) |unpacked| {
990 const index: File.Index = @intCast(try self.files.addOne(gpa));
991 self.files.set(index, .{ .object = unpacked });
992 const object = &self.files.items(.data)[index].object;
993 object.index = index;
994 object.alive = lib.must_link; // TODO: or self.options.all_load;
995 object.hidden = lib.hidden;
996 try self.objects.append(gpa, index);
997 }
998}
999
1000fn addDylib(self: *MachO, lib: SystemLib, explicit: bool, handle: File.HandleIndex, offset: u64) !File.Index {
1001 const tracy = trace(@src());
1002 defer tracy.end();
1003
1004 const gpa = self.base.comp.gpa;
1005
1006 const index: File.Index = @intCast(try self.files.addOne(gpa));
1007 self.files.set(index, .{ .dylib = .{
1008 .offset = offset,
1009 .file_handle = handle,
1010 .tag = .dylib,
1011 .path = .{
1012 .root_dir = lib.path.root_dir,
1013 .sub_path = try gpa.dupe(u8, lib.path.sub_path),
1014 },
1015 .index = index,
1016 .needed = lib.needed,
1017 .weak = lib.weak,
1018 .reexport = lib.reexport,
1019 .explicit = explicit,
1020 .umbrella = index,
1021 } });
1022 try self.dylibs.append(gpa, index);
1023
1024 return index;
1025}
1026
1027fn addTbd(self: *MachO, lib: SystemLib, explicit: bool, handle: File.HandleIndex) !File.Index {
1028 const tracy = trace(@src());
1029 defer tracy.end();
1030
1031 const gpa = self.base.comp.gpa;
1032 const index: File.Index = @intCast(try self.files.addOne(gpa));
1033 self.files.set(index, .{ .dylib = .{
1034 .offset = 0,
1035 .file_handle = handle,
1036 .tag = .tbd,
1037 .path = .{
1038 .root_dir = lib.path.root_dir,
1039 .sub_path = try gpa.dupe(u8, lib.path.sub_path),
1040 },
1041 .index = index,
1042 .needed = lib.needed,
1043 .weak = lib.weak,
1044 .reexport = lib.reexport,
1045 .explicit = explicit,
1046 .umbrella = index,
1047 } });
1048 try self.dylibs.append(gpa, index);
1049
1050 return index;
1051}
1052
1053/// According to ld64's manual, public (i.e., system) dylibs/frameworks are hoisted into the final
1054/// image unless overriden by -no_implicit_dylibs.
1055fn isHoisted(self: *MachO, install_name: []const u8) bool {
1056 if (self.no_implicit_dylibs) return true;
1057 if (fs.path.dirname(install_name)) |dirname| {
1058 if (mem.startsWith(u8, dirname, "/usr/lib")) return true;
1059 if (eatPrefix(dirname, "/System/Library/Frameworks/")) |path| {
1060 const basename = fs.path.basename(install_name);
1061 if (mem.indexOfScalar(u8, path, '.')) |index| {
1062 if (mem.eql(u8, basename, path[0..index])) return true;
1063 }
1064 }
1065 }
1066 return false;
1067}
1068
1069/// TODO delete this, libraries must be instead resolved when instantiating the compilation pipeline
1070fn accessLibPath(
1071 arena: Allocator,
1072 test_path: *std.array_list.Managed(u8),
1073 checked_paths: *std.array_list.Managed([]const u8),
1074 search_dir: []const u8,
1075 name: []const u8,
1076) !bool {
1077 const sep = fs.path.sep_str;
1078
1079 for (&[_][]const u8{ ".tbd", ".dylib", "" }) |ext| {
1080 test_path.clearRetainingCapacity();
1081 try test_path.print("{s}" ++ sep ++ "lib{s}{s}", .{ search_dir, name, ext });
1082 try checked_paths.append(try arena.dupe(u8, test_path.items));
1083 fs.cwd().access(test_path.items, .{}) catch |err| switch (err) {
1084 error.FileNotFound => continue,
1085 else => |e| return e,
1086 };
1087 return true;
1088 }
1089
1090 return false;
1091}
1092
1093fn accessFrameworkPath(
1094 arena: Allocator,
1095 test_path: *std.array_list.Managed(u8),
1096 checked_paths: *std.array_list.Managed([]const u8),
1097 search_dir: []const u8,
1098 name: []const u8,
1099) !bool {
1100 const sep = fs.path.sep_str;
1101
1102 for (&[_][]const u8{ ".tbd", ".dylib", "" }) |ext| {
1103 test_path.clearRetainingCapacity();
1104 try test_path.print("{s}" ++ sep ++ "{s}.framework" ++ sep ++ "{s}{s}", .{
1105 search_dir,
1106 name,
1107 name,
1108 ext,
1109 });
1110 try checked_paths.append(try arena.dupe(u8, test_path.items));
1111 fs.cwd().access(test_path.items, .{}) catch |err| switch (err) {
1112 error.FileNotFound => continue,
1113 else => |e| return e,
1114 };
1115 return true;
1116 }
1117
1118 return false;
1119}
1120
1121fn parseDependentDylibs(self: *MachO) !void {
1122 const tracy = trace(@src());
1123 defer tracy.end();
1124
1125 if (self.dylibs.items.len == 0) return;
1126
1127 const gpa = self.base.comp.gpa;
1128 const framework_dirs = self.framework_dirs;
1129
1130 // TODO delete this, directories must instead be resolved by the frontend
1131 const lib_directories = self.lib_directories;
1132
1133 var arena_alloc = std.heap.ArenaAllocator.init(gpa);
1134 defer arena_alloc.deinit();
1135 const arena = arena_alloc.allocator();
1136
1137 // TODO handle duplicate dylibs - it is not uncommon to have the same dylib loaded multiple times
1138 // in which case we should track that and return File.Index immediately instead re-parsing paths.
1139
1140 var has_errors = false;
1141 var index: usize = 0;
1142 while (index < self.dylibs.items.len) : (index += 1) {
1143 const dylib_index = self.dylibs.items[index];
1144
1145 var dependents = std.array_list.Managed(File.Index).init(gpa);
1146 defer dependents.deinit();
1147 try dependents.ensureTotalCapacityPrecise(self.getFile(dylib_index).?.dylib.dependents.items.len);
1148
1149 const is_weak = self.getFile(dylib_index).?.dylib.weak;
1150 for (self.getFile(dylib_index).?.dylib.dependents.items) |id| {
1151 // We will search for the dependent dylibs in the following order:
1152 // 1. Basename is in search lib directories or framework directories
1153 // 2. If name is an absolute path, search as-is optionally prepending a syslibroot
1154 // if specified.
1155 // 3. If name is a relative path, substitute @rpath, @loader_path, @executable_path with
1156 // dependees list of rpaths, and search there.
1157 // 4. Finally, just search the provided relative path directly in CWD.
1158 var test_path = std.array_list.Managed(u8).init(arena);
1159 var checked_paths = std.array_list.Managed([]const u8).init(arena);
1160
1161 const full_path = full_path: {
1162 {
1163 const stem = fs.path.stem(id.name);
1164
1165 // Framework
1166 for (framework_dirs) |dir| {
1167 test_path.clearRetainingCapacity();
1168 if (try accessFrameworkPath(arena, &test_path, &checked_paths, dir, stem)) break :full_path test_path.items;
1169 }
1170
1171 // Library
1172 const lib_name = eatPrefix(stem, "lib") orelse stem;
1173 for (lib_directories) |lib_directory| {
1174 test_path.clearRetainingCapacity();
1175 if (try accessLibPath(arena, &test_path, &checked_paths, lib_directory.path orelse ".", lib_name)) break :full_path test_path.items;
1176 }
1177 }
1178
1179 if (fs.path.isAbsolute(id.name)) {
1180 const existing_ext = fs.path.extension(id.name);
1181 const path = if (existing_ext.len > 0) id.name[0 .. id.name.len - existing_ext.len] else id.name;
1182 for (&[_][]const u8{ ".tbd", ".dylib", "" }) |ext| {
1183 test_path.clearRetainingCapacity();
1184 if (self.base.comp.sysroot) |root| {
1185 try test_path.print("{s}" ++ fs.path.sep_str ++ "{s}{s}", .{ root, path, ext });
1186 } else {
1187 try test_path.print("{s}{s}", .{ path, ext });
1188 }
1189 try checked_paths.append(try arena.dupe(u8, test_path.items));
1190 fs.cwd().access(test_path.items, .{}) catch |err| switch (err) {
1191 error.FileNotFound => continue,
1192 else => |e| return e,
1193 };
1194 break :full_path test_path.items;
1195 }
1196 }
1197
1198 if (eatPrefix(id.name, "@rpath/")) |path| {
1199 const dylib = self.getFile(dylib_index).?.dylib;
1200 for (self.getFile(dylib.umbrella).?.dylib.rpaths.keys()) |rpath| {
1201 const prefix = eatPrefix(rpath, "@loader_path/") orelse rpath;
1202 const rel_path = try fs.path.join(arena, &.{ prefix, path });
1203 try checked_paths.append(rel_path);
1204 var buffer: [fs.max_path_bytes]u8 = undefined;
1205 const full_path = fs.realpath(rel_path, &buffer) catch continue;
1206 break :full_path try arena.dupe(u8, full_path);
1207 }
1208 } else if (eatPrefix(id.name, "@loader_path/")) |_| {
1209 try self.reportParseError2(dylib_index, "TODO handle install_name '{s}'", .{id.name});
1210 return error.Unhandled;
1211 } else if (eatPrefix(id.name, "@executable_path/")) |_| {
1212 try self.reportParseError2(dylib_index, "TODO handle install_name '{s}'", .{id.name});
1213 return error.Unhandled;
1214 }
1215
1216 try checked_paths.append(try arena.dupe(u8, id.name));
1217 var buffer: [fs.max_path_bytes]u8 = undefined;
1218 if (fs.realpath(id.name, &buffer)) |full_path| {
1219 break :full_path try arena.dupe(u8, full_path);
1220 } else |_| {
1221 try self.reportMissingDependencyError(
1222 self.getFile(dylib_index).?.dylib.getUmbrella(self).index,
1223 id.name,
1224 checked_paths.items,
1225 "unable to resolve dependency",
1226 .{},
1227 );
1228 has_errors = true;
1229 continue;
1230 }
1231 };
1232 const lib: SystemLib = .{
1233 .path = Path.initCwd(full_path),
1234 .weak = is_weak,
1235 };
1236 const file = try lib.path.root_dir.handle.openFile(lib.path.sub_path, .{});
1237 const fh = try self.addFileHandle(file);
1238 const fat_arch = try self.parseFatFile(file, lib.path);
1239 const offset = if (fat_arch) |fa| fa.offset else 0;
1240 const file_index = file_index: {
1241 if (readMachHeader(file, offset) catch null) |h| blk: {
1242 if (h.magic != macho.MH_MAGIC_64) break :blk;
1243 switch (h.filetype) {
1244 macho.MH_DYLIB => break :file_index try self.addDylib(lib, false, fh, offset),
1245 else => break :file_index @as(File.Index, 0),
1246 }
1247 }
1248 break :file_index try self.addTbd(lib, false, fh);
1249 };
1250 dependents.appendAssumeCapacity(file_index);
1251 }
1252
1253 const dylib = self.getFile(dylib_index).?.dylib;
1254 for (dylib.dependents.items, dependents.items) |id, file_index| {
1255 if (self.getFile(file_index)) |file| {
1256 const dep_dylib = file.dylib;
1257 try dep_dylib.parse(self); // TODO in parallel
1258 dep_dylib.hoisted = self.isHoisted(id.name);
1259 dep_dylib.umbrella = dylib.umbrella;
1260 if (!dep_dylib.hoisted) {
1261 const umbrella = dep_dylib.getUmbrella(self);
1262 for (dep_dylib.exports.items(.name), dep_dylib.exports.items(.flags)) |off, flags| {
1263 // TODO rethink this entire algorithm
1264 try umbrella.addExport(gpa, dep_dylib.getString(off), flags);
1265 }
1266 try umbrella.rpaths.ensureUnusedCapacity(gpa, dep_dylib.rpaths.keys().len);
1267 for (dep_dylib.rpaths.keys()) |rpath| {
1268 umbrella.rpaths.putAssumeCapacity(try gpa.dupe(u8, rpath), {});
1269 }
1270 }
1271 } else try self.reportDependencyError(
1272 dylib.getUmbrella(self).index,
1273 id.name,
1274 "unable to resolve dependency",
1275 .{},
1276 );
1277 has_errors = true;
1278 }
1279 }
1280
1281 if (has_errors) return error.MissingLibraryDependencies;
1282}
1283
1284/// When resolving symbols, we approach the problem similarly to `mold`.
1285/// 1. Resolve symbols across all objects (including those preemptively extracted archives).
1286/// 2. Resolve symbols across all shared objects.
1287/// 3. Mark live objects (see `MachO.markLive`)
1288/// 4. Reset state of all resolved globals since we will redo this bit on the pruned set.
1289/// 5. Remove references to dead objects/shared objects
1290/// 6. Re-run symbol resolution on pruned objects and shared objects sets.
1291pub fn resolveSymbols(self: *MachO) !void {
1292 const tracy = trace(@src());
1293 defer tracy.end();
1294
1295 // Resolve symbols in the ZigObject. For now, we assume that it's always live.
1296 if (self.getZigObject()) |zo| try zo.asFile().resolveSymbols(self);
1297 // Resolve symbols on the set of all objects and shared objects (even if some are unneeded).
1298 for (self.objects.items) |index| try self.getFile(index).?.resolveSymbols(self);
1299 for (self.dylibs.items) |index| try self.getFile(index).?.resolveSymbols(self);
1300 if (self.getInternalObject()) |obj| try obj.resolveSymbols(self);
1301
1302 // Mark live objects.
1303 self.markLive();
1304
1305 // Reset state of all globals after marking live objects.
1306 self.resolver.reset();
1307
1308 // Prune dead objects.
1309 var i: usize = 0;
1310 while (i < self.objects.items.len) {
1311 const index = self.objects.items[i];
1312 if (!self.getFile(index).?.object.alive) {
1313 _ = self.objects.orderedRemove(i);
1314 self.files.items(.data)[index].object.deinit(self.base.comp.gpa);
1315 self.files.set(index, .null);
1316 } else i += 1;
1317 }
1318
1319 // Re-resolve the symbols.
1320 if (self.getZigObject()) |zo| try zo.resolveSymbols(self);
1321 for (self.objects.items) |index| try self.getFile(index).?.resolveSymbols(self);
1322 for (self.dylibs.items) |index| try self.getFile(index).?.resolveSymbols(self);
1323 if (self.getInternalObject()) |obj| try obj.resolveSymbols(self);
1324
1325 // Merge symbol visibility
1326 if (self.getZigObject()) |zo| zo.mergeSymbolVisibility(self);
1327 for (self.objects.items) |index| self.getFile(index).?.object.mergeSymbolVisibility(self);
1328}
1329
1330fn markLive(self: *MachO) void {
1331 const tracy = trace(@src());
1332 defer tracy.end();
1333
1334 if (self.getZigObject()) |zo| zo.markLive(self);
1335 for (self.objects.items) |index| {
1336 const object = self.getFile(index).?.object;
1337 if (object.alive) object.markLive(self);
1338 }
1339 if (self.getInternalObject()) |obj| obj.markLive(self);
1340}
1341
1342fn convertTentativeDefsAndResolveSpecialSymbols(self: *MachO) !void {
1343 const diags = &self.base.comp.link_diags;
1344 {
1345 for (self.objects.items) |index| {
1346 convertTentativeDefinitionsWorker(self, self.getFile(index).?.object);
1347 }
1348 if (self.getInternalObject()) |obj| {
1349 resolveSpecialSymbolsWorker(self, obj);
1350 }
1351 }
1352 if (diags.hasErrors()) return error.LinkFailure;
1353}
1354
1355fn convertTentativeDefinitionsWorker(self: *MachO, object: *Object) void {
1356 const tracy = trace(@src());
1357 defer tracy.end();
1358 object.convertTentativeDefinitions(self) catch |err| {
1359 self.reportParseError2(
1360 object.index,
1361 "unexpected error occurred while converting tentative symbols into defined symbols: {s}",
1362 .{@errorName(err)},
1363 ) catch {};
1364 };
1365}
1366
1367fn resolveSpecialSymbolsWorker(self: *MachO, obj: *InternalObject) void {
1368 const tracy = trace(@src());
1369 defer tracy.end();
1370
1371 const diags = &self.base.comp.link_diags;
1372
1373 obj.resolveBoundarySymbols(self) catch |err|
1374 return diags.addError("failed to resolve boundary symbols: {s}", .{@errorName(err)});
1375 obj.resolveObjcMsgSendSymbols(self) catch |err|
1376 return diags.addError("failed to resolve ObjC msgsend stubs: {s}", .{@errorName(err)});
1377}
1378
1379pub fn dedupLiterals(self: *MachO) !void {
1380 const tracy = trace(@src());
1381 defer tracy.end();
1382
1383 const gpa = self.base.comp.gpa;
1384 var lp: LiteralPool = .{};
1385 defer lp.deinit(gpa);
1386
1387 if (self.getZigObject()) |zo| {
1388 try zo.resolveLiterals(&lp, self);
1389 }
1390 for (self.objects.items) |index| {
1391 try self.getFile(index).?.object.resolveLiterals(&lp, self);
1392 }
1393 if (self.getInternalObject()) |object| {
1394 try object.resolveLiterals(&lp, self);
1395 }
1396
1397 {
1398 if (self.getZigObject()) |zo| {
1399 File.dedupLiterals(zo.asFile(), lp, self);
1400 }
1401 for (self.objects.items) |index| {
1402 File.dedupLiterals(self.getFile(index).?, lp, self);
1403 }
1404 if (self.getInternalObject()) |object| {
1405 File.dedupLiterals(object.asFile(), lp, self);
1406 }
1407 }
1408}
1409
1410fn claimUnresolved(self: *MachO) void {
1411 if (self.getZigObject()) |zo| {
1412 zo.asFile().claimUnresolved(self);
1413 }
1414 for (self.objects.items) |index| {
1415 self.getFile(index).?.claimUnresolved(self);
1416 }
1417}
1418
1419fn checkDuplicates(self: *MachO) !void {
1420 const tracy = trace(@src());
1421 defer tracy.end();
1422
1423 const diags = &self.base.comp.link_diags;
1424
1425 {
1426 if (self.getZigObject()) |zo| {
1427 checkDuplicatesWorker(self, zo.asFile());
1428 }
1429 for (self.objects.items) |index| {
1430 checkDuplicatesWorker(self, self.getFile(index).?);
1431 }
1432 if (self.getInternalObject()) |obj| {
1433 checkDuplicatesWorker(self, obj.asFile());
1434 }
1435 }
1436
1437 if (diags.hasErrors()) return error.LinkFailure;
1438
1439 try self.reportDuplicates();
1440}
1441
1442fn checkDuplicatesWorker(self: *MachO, file: File) void {
1443 const tracy = trace(@src());
1444 defer tracy.end();
1445 file.checkDuplicates(self) catch |err| {
1446 self.reportParseError2(file.getIndex(), "failed to check for duplicate definitions: {s}", .{
1447 @errorName(err),
1448 }) catch {};
1449 };
1450}
1451
1452fn markImportsAndExports(self: *MachO) void {
1453 const tracy = trace(@src());
1454 defer tracy.end();
1455
1456 if (self.getZigObject()) |zo| {
1457 zo.asFile().markImportsExports(self);
1458 }
1459 for (self.objects.items) |index| {
1460 self.getFile(index).?.markImportsExports(self);
1461 }
1462 if (self.getInternalObject()) |obj| {
1463 obj.asFile().markImportsExports(self);
1464 }
1465}
1466
1467fn deadStripDylibs(self: *MachO) void {
1468 const tracy = trace(@src());
1469 defer tracy.end();
1470
1471 for (self.dylibs.items) |index| {
1472 self.getFile(index).?.dylib.markReferenced(self);
1473 }
1474
1475 var i: usize = 0;
1476 while (i < self.dylibs.items.len) {
1477 const index = self.dylibs.items[i];
1478 if (!self.getFile(index).?.dylib.isAlive(self)) {
1479 _ = self.dylibs.orderedRemove(i);
1480 self.files.items(.data)[index].dylib.deinit(self.base.comp.gpa);
1481 self.files.set(index, .null);
1482 } else i += 1;
1483 }
1484}
1485
1486fn scanRelocs(self: *MachO) !void {
1487 const tracy = trace(@src());
1488 defer tracy.end();
1489
1490 const diags = &self.base.comp.link_diags;
1491
1492 {
1493 if (self.getZigObject()) |zo| {
1494 scanRelocsWorker(self, zo.asFile());
1495 }
1496 for (self.objects.items) |index| {
1497 scanRelocsWorker(self, self.getFile(index).?);
1498 }
1499 if (self.getInternalObject()) |obj| {
1500 scanRelocsWorker(self, obj.asFile());
1501 }
1502 }
1503
1504 if (diags.hasErrors()) return error.LinkFailure;
1505
1506 if (self.getInternalObject()) |obj| {
1507 try obj.checkUndefs(self);
1508 }
1509 try self.reportUndefs();
1510
1511 if (self.getZigObject()) |zo| {
1512 try zo.asFile().createSymbolIndirection(self);
1513 }
1514 for (self.objects.items) |index| {
1515 try self.getFile(index).?.createSymbolIndirection(self);
1516 }
1517 for (self.dylibs.items) |index| {
1518 try self.getFile(index).?.createSymbolIndirection(self);
1519 }
1520 if (self.getInternalObject()) |obj| {
1521 try obj.asFile().createSymbolIndirection(self);
1522 }
1523}
1524
1525fn scanRelocsWorker(self: *MachO, file: File) void {
1526 file.scanRelocs(self) catch |err| {
1527 self.reportParseError2(file.getIndex(), "failed to scan relocations: {s}", .{
1528 @errorName(err),
1529 }) catch {};
1530 };
1531}
1532
1533fn sortGlobalSymbolsByName(self: *MachO, symbols: []SymbolResolver.Index) void {
1534 const lessThan = struct {
1535 fn lessThan(ctx: *MachO, lhs: SymbolResolver.Index, rhs: SymbolResolver.Index) bool {
1536 const lhs_name = ctx.resolver.keys.items[lhs - 1].getName(ctx);
1537 const rhs_name = ctx.resolver.keys.items[rhs - 1].getName(ctx);
1538 return mem.order(u8, lhs_name, rhs_name) == .lt;
1539 }
1540 }.lessThan;
1541 mem.sort(SymbolResolver.Index, symbols, self, lessThan);
1542}
1543
1544fn reportUndefs(self: *MachO) !void {
1545 const tracy = trace(@src());
1546 defer tracy.end();
1547
1548 if (self.undefined_treatment == .suppress or
1549 self.undefined_treatment == .dynamic_lookup) return;
1550 if (self.undefs.keys().len == 0) return; // Nothing to do
1551
1552 const gpa = self.base.comp.gpa;
1553 const diags = &self.base.comp.link_diags;
1554 const max_notes = 4;
1555
1556 // We will sort by name, and then by file to ensure deterministic output.
1557 var keys = try std.array_list.Managed(SymbolResolver.Index).initCapacity(gpa, self.undefs.keys().len);
1558 defer keys.deinit();
1559 keys.appendSliceAssumeCapacity(self.undefs.keys());
1560 self.sortGlobalSymbolsByName(keys.items);
1561
1562 const refLessThan = struct {
1563 fn lessThan(ctx: void, lhs: Ref, rhs: Ref) bool {
1564 _ = ctx;
1565 return lhs.lessThan(rhs);
1566 }
1567 }.lessThan;
1568
1569 for (self.undefs.values()) |*undefs| switch (undefs.*) {
1570 .refs => |refs| mem.sort(Ref, refs.items, {}, refLessThan),
1571 else => {},
1572 };
1573
1574 for (keys.items) |key| {
1575 const undef_sym = self.resolver.keys.items[key - 1];
1576 const notes = self.undefs.get(key).?;
1577 const nnotes = nnotes: {
1578 const nnotes = switch (notes) {
1579 .refs => |refs| refs.items.len,
1580 else => 1,
1581 };
1582 break :nnotes @min(nnotes, max_notes) + @intFromBool(nnotes > max_notes);
1583 };
1584
1585 var err = try diags.addErrorWithNotes(nnotes);
1586 try err.addMsg("undefined symbol: {s}", .{undef_sym.getName(self)});
1587
1588 switch (notes) {
1589 .force_undefined => err.addNote("referenced with linker flag -u", .{}),
1590 .entry => err.addNote("referenced with linker flag -e", .{}),
1591 .dyld_stub_binder, .objc_msgsend => err.addNote("referenced implicitly", .{}),
1592 .refs => |refs| {
1593 var inote: usize = 0;
1594 while (inote < @min(refs.items.len, max_notes)) : (inote += 1) {
1595 const ref = refs.items[inote];
1596 const file = self.getFile(ref.file).?;
1597 const atom = ref.getAtom(self).?;
1598 err.addNote("referenced by {f}:{s}", .{ file.fmtPath(), atom.getName(self) });
1599 }
1600
1601 if (refs.items.len > max_notes) {
1602 const remaining = refs.items.len - max_notes;
1603 err.addNote("referenced {d} more times", .{remaining});
1604 }
1605 },
1606 }
1607 }
1608
1609 return error.HasUndefinedSymbols;
1610}
1611
1612fn initOutputSections(self: *MachO) !void {
1613 const tracy = trace(@src());
1614 defer tracy.end();
1615
1616 for (self.objects.items) |index| {
1617 try self.getFile(index).?.initOutputSections(self);
1618 }
1619 if (self.getInternalObject()) |obj| {
1620 try obj.asFile().initOutputSections(self);
1621 }
1622 self.text_sect_index = self.getSectionByName("__TEXT", "__text") orelse
1623 try self.addSection("__TEXT", "__text", .{
1624 .alignment = switch (self.getTarget().cpu.arch) {
1625 .x86_64 => 0,
1626 .aarch64 => 2,
1627 else => unreachable,
1628 },
1629 .flags = macho.S_REGULAR |
1630 macho.S_ATTR_PURE_INSTRUCTIONS | macho.S_ATTR_SOME_INSTRUCTIONS,
1631 });
1632 self.data_sect_index = self.getSectionByName("__DATA", "__data") orelse
1633 try self.addSection("__DATA", "__data", .{});
1634}
1635
1636fn initSyntheticSections(self: *MachO) !void {
1637 const cpu_arch = self.getTarget().cpu.arch;
1638
1639 if (self.got.symbols.items.len > 0) {
1640 self.got_sect_index = try self.addSection("__DATA_CONST", "__got", .{
1641 .flags = macho.S_NON_LAZY_SYMBOL_POINTERS,
1642 .reserved1 = @intCast(self.stubs.symbols.items.len),
1643 });
1644 }
1645
1646 if (self.stubs.symbols.items.len > 0) {
1647 self.stubs_sect_index = try self.addSection("__TEXT", "__stubs", .{
1648 .flags = macho.S_SYMBOL_STUBS |
1649 macho.S_ATTR_PURE_INSTRUCTIONS | macho.S_ATTR_SOME_INSTRUCTIONS,
1650 .reserved1 = 0,
1651 .reserved2 = switch (cpu_arch) {
1652 .x86_64 => 6,
1653 .aarch64 => 3 * @sizeOf(u32),
1654 else => 0,
1655 },
1656 });
1657 self.stubs_helper_sect_index = try self.addSection("__TEXT", "__stub_helper", .{
1658 .flags = macho.S_ATTR_PURE_INSTRUCTIONS | macho.S_ATTR_SOME_INSTRUCTIONS,
1659 });
1660 self.la_symbol_ptr_sect_index = try self.addSection("__DATA", "__la_symbol_ptr", .{
1661 .flags = macho.S_LAZY_SYMBOL_POINTERS,
1662 .reserved1 = @intCast(self.stubs.symbols.items.len + self.got.symbols.items.len),
1663 });
1664 }
1665
1666 if (self.objc_stubs.symbols.items.len > 0) {
1667 self.objc_stubs_sect_index = try self.addSection("__TEXT", "__objc_stubs", .{
1668 .flags = macho.S_ATTR_PURE_INSTRUCTIONS | macho.S_ATTR_SOME_INSTRUCTIONS,
1669 });
1670 }
1671
1672 if (self.tlv_ptr.symbols.items.len > 0) {
1673 self.tlv_ptr_sect_index = try self.addSection("__DATA", "__thread_ptrs", .{
1674 .flags = macho.S_THREAD_LOCAL_VARIABLE_POINTERS,
1675 });
1676 }
1677
1678 const needs_unwind_info = for (self.objects.items) |index| {
1679 if (self.getFile(index).?.object.hasUnwindRecords()) break true;
1680 } else false;
1681 if (needs_unwind_info) {
1682 self.unwind_info_sect_index = try self.addSection("__TEXT", "__unwind_info", .{});
1683 }
1684
1685 const needs_eh_frame = for (self.objects.items) |index| {
1686 if (self.getFile(index).?.object.hasEhFrameRecords()) break true;
1687 } else false;
1688 if (needs_eh_frame) {
1689 assert(needs_unwind_info);
1690 self.eh_frame_sect_index = try self.addSection("__TEXT", "__eh_frame", .{});
1691 }
1692
1693 if (self.getInternalObject()) |obj| {
1694 const gpa = self.base.comp.gpa;
1695
1696 for (obj.boundary_symbols.items) |sym_index| {
1697 const ref = obj.getSymbolRef(sym_index, self);
1698 const sym = ref.getSymbol(self).?;
1699 const name = sym.getName(self);
1700
1701 if (eatPrefix(name, "segment$start$")) |segname| {
1702 if (self.getSegmentByName(segname) == null) { // TODO check segname is valid
1703 const prot = getSegmentProt(segname);
1704 _ = try self.segments.append(gpa, .{
1705 .cmdsize = @sizeOf(macho.segment_command_64),
1706 .segname = makeStaticString(segname),
1707 .initprot = prot,
1708 .maxprot = prot,
1709 });
1710 }
1711 } else if (eatPrefix(name, "segment$end$")) |segname| {
1712 if (self.getSegmentByName(segname) == null) { // TODO check segname is valid
1713 const prot = getSegmentProt(segname);
1714 _ = try self.segments.append(gpa, .{
1715 .cmdsize = @sizeOf(macho.segment_command_64),
1716 .segname = makeStaticString(segname),
1717 .initprot = prot,
1718 .maxprot = prot,
1719 });
1720 }
1721 } else if (eatPrefix(name, "section$start$")) |actual_name| {
1722 const sep = mem.indexOfScalar(u8, actual_name, '$').?; // TODO error rather than a panic
1723 const segname = actual_name[0..sep]; // TODO check segname is valid
1724 const sectname = actual_name[sep + 1 ..]; // TODO check sectname is valid
1725 if (self.getSectionByName(segname, sectname) == null) {
1726 _ = try self.addSection(segname, sectname, .{});
1727 }
1728 } else if (eatPrefix(name, "section$end$")) |actual_name| {
1729 const sep = mem.indexOfScalar(u8, actual_name, '$').?; // TODO error rather than a panic
1730 const segname = actual_name[0..sep]; // TODO check segname is valid
1731 const sectname = actual_name[sep + 1 ..]; // TODO check sectname is valid
1732 if (self.getSectionByName(segname, sectname) == null) {
1733 _ = try self.addSection(segname, sectname, .{});
1734 }
1735 } else unreachable;
1736 }
1737 }
1738}
1739
1740fn getSegmentProt(segname: []const u8) macho.vm_prot_t {
1741 if (mem.eql(u8, segname, "__PAGEZERO")) return macho.PROT.NONE;
1742 if (mem.eql(u8, segname, "__TEXT")) return macho.PROT.READ | macho.PROT.EXEC;
1743 if (mem.eql(u8, segname, "__LINKEDIT")) return macho.PROT.READ;
1744 return macho.PROT.READ | macho.PROT.WRITE;
1745}
1746
1747fn getSegmentRank(segname: []const u8) u8 {
1748 if (mem.eql(u8, segname, "__PAGEZERO")) return 0x0;
1749 if (mem.eql(u8, segname, "__LINKEDIT")) return 0xf;
1750 if (mem.indexOf(u8, segname, "ZIG")) |_| return 0xe;
1751 if (mem.startsWith(u8, segname, "__TEXT")) return 0x1;
1752 if (mem.startsWith(u8, segname, "__DATA_CONST")) return 0x2;
1753 if (mem.startsWith(u8, segname, "__DATA")) return 0x3;
1754 return 0x4;
1755}
1756
1757fn segmentLessThan(ctx: void, lhs: []const u8, rhs: []const u8) bool {
1758 _ = ctx;
1759 const lhs_rank = getSegmentRank(lhs);
1760 const rhs_rank = getSegmentRank(rhs);
1761 if (lhs_rank == rhs_rank) {
1762 return mem.order(u8, lhs, rhs) == .lt;
1763 }
1764 return lhs_rank < rhs_rank;
1765}
1766
1767fn getSectionRank(section: macho.section_64) u8 {
1768 if (section.isCode()) {
1769 if (mem.eql(u8, "__text", section.sectName())) return 0x0;
1770 if (section.type() == macho.S_SYMBOL_STUBS) return 0x1;
1771 return 0x2;
1772 }
1773 switch (section.type()) {
1774 macho.S_NON_LAZY_SYMBOL_POINTERS,
1775 macho.S_LAZY_SYMBOL_POINTERS,
1776 => return 0x0,
1777
1778 macho.S_MOD_INIT_FUNC_POINTERS => return 0x1,
1779 macho.S_MOD_TERM_FUNC_POINTERS => return 0x2,
1780 macho.S_ZEROFILL => return 0xf,
1781 macho.S_THREAD_LOCAL_REGULAR => return 0xd,
1782 macho.S_THREAD_LOCAL_ZEROFILL => return 0xe,
1783
1784 else => {
1785 if (mem.eql(u8, "__unwind_info", section.sectName())) return 0xe;
1786 if (mem.eql(u8, "__compact_unwind", section.sectName())) return 0xe;
1787 if (mem.eql(u8, "__eh_frame", section.sectName())) return 0xf;
1788 return 0x3;
1789 },
1790 }
1791}
1792
1793fn sectionLessThan(ctx: void, lhs: macho.section_64, rhs: macho.section_64) bool {
1794 if (mem.eql(u8, lhs.segName(), rhs.segName())) {
1795 const lhs_rank = getSectionRank(lhs);
1796 const rhs_rank = getSectionRank(rhs);
1797 if (lhs_rank == rhs_rank) {
1798 return mem.order(u8, lhs.sectName(), rhs.sectName()) == .lt;
1799 }
1800 return lhs_rank < rhs_rank;
1801 }
1802 return segmentLessThan(ctx, lhs.segName(), rhs.segName());
1803}
1804
1805pub fn sortSections(self: *MachO) !void {
1806 const Entry = struct {
1807 index: u8,
1808
1809 pub fn lessThan(macho_file: *MachO, lhs: @This(), rhs: @This()) bool {
1810 return sectionLessThan(
1811 {},
1812 macho_file.sections.items(.header)[lhs.index],
1813 macho_file.sections.items(.header)[rhs.index],
1814 );
1815 }
1816 };
1817
1818 const gpa = self.base.comp.gpa;
1819
1820 var entries = try std.array_list.Managed(Entry).initCapacity(gpa, self.sections.slice().len);
1821 defer entries.deinit();
1822 for (0..self.sections.slice().len) |index| {
1823 entries.appendAssumeCapacity(.{ .index = @intCast(index) });
1824 }
1825
1826 mem.sort(Entry, entries.items, self, Entry.lessThan);
1827
1828 const backlinks = try gpa.alloc(u8, entries.items.len);
1829 defer gpa.free(backlinks);
1830 for (entries.items, 0..) |entry, i| {
1831 backlinks[entry.index] = @intCast(i);
1832 }
1833
1834 var slice = self.sections.toOwnedSlice();
1835 defer slice.deinit(gpa);
1836
1837 try self.sections.ensureTotalCapacity(gpa, slice.len);
1838 for (entries.items) |sorted| {
1839 self.sections.appendAssumeCapacity(slice.get(sorted.index));
1840 }
1841
1842 for (&[_]*?u8{
1843 &self.data_sect_index,
1844 &self.got_sect_index,
1845 &self.zig_text_sect_index,
1846 &self.zig_const_sect_index,
1847 &self.zig_data_sect_index,
1848 &self.zig_bss_sect_index,
1849 &self.stubs_sect_index,
1850 &self.stubs_helper_sect_index,
1851 &self.la_symbol_ptr_sect_index,
1852 &self.tlv_ptr_sect_index,
1853 &self.eh_frame_sect_index,
1854 &self.unwind_info_sect_index,
1855 &self.objc_stubs_sect_index,
1856 &self.debug_str_sect_index,
1857 &self.debug_info_sect_index,
1858 &self.debug_abbrev_sect_index,
1859 &self.debug_aranges_sect_index,
1860 &self.debug_line_sect_index,
1861 &self.debug_line_str_sect_index,
1862 &self.debug_loclists_sect_index,
1863 &self.debug_rnglists_sect_index,
1864 }) |maybe_index| {
1865 if (maybe_index.*) |*index| {
1866 index.* = backlinks[index.*];
1867 }
1868 }
1869
1870 if (self.getZigObject()) |zo| {
1871 for (zo.getAtoms()) |atom_index| {
1872 const atom = zo.getAtom(atom_index) orelse continue;
1873 if (!atom.isAlive()) continue;
1874 atom.out_n_sect = backlinks[atom.out_n_sect];
1875 }
1876 if (zo.dwarf) |*dwarf| dwarf.reloadSectionMetadata();
1877 }
1878
1879 for (self.objects.items) |index| {
1880 const file = self.getFile(index).?;
1881 for (file.getAtoms()) |atom_index| {
1882 const atom = file.getAtom(atom_index) orelse continue;
1883 if (!atom.isAlive()) continue;
1884 atom.out_n_sect = backlinks[atom.out_n_sect];
1885 }
1886 }
1887
1888 if (self.getInternalObject()) |object| {
1889 for (object.getAtoms()) |atom_index| {
1890 const atom = object.getAtom(atom_index) orelse continue;
1891 if (!atom.isAlive()) continue;
1892 atom.out_n_sect = backlinks[atom.out_n_sect];
1893 }
1894 }
1895}
1896
1897pub fn addAtomsToSections(self: *MachO) !void {
1898 const tracy = trace(@src());
1899 defer tracy.end();
1900
1901 const gpa = self.base.comp.gpa;
1902
1903 if (self.getZigObject()) |zo| {
1904 for (zo.getAtoms()) |atom_index| {
1905 const atom = zo.getAtom(atom_index) orelse continue;
1906 if (!atom.isAlive()) continue;
1907 if (self.isZigSection(atom.out_n_sect)) continue;
1908 const atoms = &self.sections.items(.atoms)[atom.out_n_sect];
1909 try atoms.append(gpa, .{ .index = atom_index, .file = zo.index });
1910 }
1911 }
1912 for (self.objects.items) |index| {
1913 const file = self.getFile(index).?;
1914 for (file.getAtoms()) |atom_index| {
1915 const atom = file.getAtom(atom_index) orelse continue;
1916 if (!atom.isAlive()) continue;
1917 const atoms = &self.sections.items(.atoms)[atom.out_n_sect];
1918 try atoms.append(gpa, .{ .index = atom_index, .file = index });
1919 }
1920 }
1921 if (self.getInternalObject()) |object| {
1922 for (object.getAtoms()) |atom_index| {
1923 const atom = object.getAtom(atom_index) orelse continue;
1924 if (!atom.isAlive()) continue;
1925 const atoms = &self.sections.items(.atoms)[atom.out_n_sect];
1926 try atoms.append(gpa, .{ .index = atom_index, .file = object.index });
1927 }
1928 }
1929}
1930
1931fn calcSectionSizes(self: *MachO) !void {
1932 const tracy = trace(@src());
1933 defer tracy.end();
1934
1935 const diags = &self.base.comp.link_diags;
1936 const cpu_arch = self.getTarget().cpu.arch;
1937
1938 if (self.data_sect_index) |idx| {
1939 const header = &self.sections.items(.header)[idx];
1940 header.size += @sizeOf(u64);
1941 header.@"align" = 3;
1942 }
1943
1944 {
1945 const slice = self.sections.slice();
1946 for (slice.items(.header), slice.items(.atoms), 0..) |header, atoms, i| {
1947 if (atoms.items.len == 0) continue;
1948 if (self.requiresThunks() and header.isCode()) continue;
1949 calcSectionSizeWorker(self, @as(u8, @intCast(i)));
1950 }
1951
1952 if (self.requiresThunks()) {
1953 for (slice.items(.header), slice.items(.atoms), 0..) |header, atoms, i| {
1954 if (!header.isCode()) continue;
1955 if (atoms.items.len == 0) continue;
1956 createThunksWorker(self, @as(u8, @intCast(i)));
1957 }
1958 }
1959
1960 // At this point, we can also calculate most of the symtab and data-in-code linkedit section sizes
1961 if (self.getZigObject()) |zo| {
1962 File.calcSymtabSize(zo.asFile(), self);
1963 }
1964 for (self.objects.items) |index| {
1965 File.calcSymtabSize(self.getFile(index).?, self);
1966 }
1967 for (self.dylibs.items) |index| {
1968 File.calcSymtabSize(self.getFile(index).?, self);
1969 }
1970 if (self.getInternalObject()) |obj| {
1971 File.calcSymtabSize(obj.asFile(), self);
1972 }
1973 }
1974
1975 if (diags.hasErrors()) return error.LinkFailure;
1976
1977 try self.calcSymtabSize();
1978
1979 if (self.got_sect_index) |idx| {
1980 const header = &self.sections.items(.header)[idx];
1981 header.size = self.got.size();
1982 header.@"align" = 3;
1983 }
1984
1985 if (self.stubs_sect_index) |idx| {
1986 const header = &self.sections.items(.header)[idx];
1987 header.size = self.stubs.size(self);
1988 header.@"align" = switch (cpu_arch) {
1989 .x86_64 => 1,
1990 .aarch64 => 2,
1991 else => 0,
1992 };
1993 }
1994
1995 if (self.stubs_helper_sect_index) |idx| {
1996 const header = &self.sections.items(.header)[idx];
1997 header.size = self.stubs_helper.size(self);
1998 header.@"align" = 2;
1999 }
2000
2001 if (self.la_symbol_ptr_sect_index) |idx| {
2002 const header = &self.sections.items(.header)[idx];
2003 header.size = self.la_symbol_ptr.size(self);
2004 header.@"align" = 3;
2005 }
2006
2007 if (self.tlv_ptr_sect_index) |idx| {
2008 const header = &self.sections.items(.header)[idx];
2009 header.size = self.tlv_ptr.size();
2010 header.@"align" = 3;
2011 }
2012
2013 if (self.objc_stubs_sect_index) |idx| {
2014 const header = &self.sections.items(.header)[idx];
2015 header.size = self.objc_stubs.size(self);
2016 header.@"align" = switch (cpu_arch) {
2017 .x86_64 => 0,
2018 .aarch64 => 2,
2019 else => 0,
2020 };
2021 }
2022}
2023
2024fn calcSectionSizeWorker(self: *MachO, sect_id: u8) void {
2025 const tracy = trace(@src());
2026 defer tracy.end();
2027
2028 const diags = &self.base.comp.link_diags;
2029
2030 const doWork = struct {
2031 fn doWork(macho_file: *MachO, header: *macho.section_64, atoms: []const Ref) !void {
2032 for (atoms) |ref| {
2033 const atom = ref.getAtom(macho_file).?;
2034 const atom_alignment = atom.alignment.toByteUnits() orelse 1;
2035 const offset = mem.alignForward(u64, header.size, atom_alignment);
2036 const padding = offset - header.size;
2037 atom.value = offset;
2038 header.size += padding + atom.size;
2039 header.@"align" = @max(header.@"align", atom.alignment.toLog2Units());
2040 }
2041 }
2042 }.doWork;
2043 const slice = self.sections.slice();
2044 const header = &slice.items(.header)[sect_id];
2045 const atoms = slice.items(.atoms)[sect_id].items;
2046 doWork(self, header, atoms) catch |err| {
2047 try diags.addError("failed to calculate size of section '{s},{s}': {s}", .{
2048 header.segName(), header.sectName(), @errorName(err),
2049 });
2050 };
2051}
2052
2053fn createThunksWorker(self: *MachO, sect_id: u8) void {
2054 const tracy = trace(@src());
2055 defer tracy.end();
2056 const diags = &self.base.comp.link_diags;
2057 self.createThunks(sect_id) catch |err| {
2058 const header = self.sections.items(.header)[sect_id];
2059 diags.addError("failed to create thunks and calculate size of section '{s},{s}': {s}", .{
2060 header.segName(), header.sectName(), @errorName(err),
2061 });
2062 };
2063}
2064
2065fn generateUnwindInfo(self: *MachO) !void {
2066 const tracy = trace(@src());
2067 defer tracy.end();
2068
2069 const diags = &self.base.comp.link_diags;
2070
2071 if (self.eh_frame_sect_index) |index| {
2072 const sect = &self.sections.items(.header)[index];
2073 sect.size = try eh_frame.calcSize(self);
2074 sect.@"align" = 3;
2075 }
2076 if (self.unwind_info_sect_index) |index| {
2077 const sect = &self.sections.items(.header)[index];
2078 self.unwind_info.generate(self) catch |err| switch (err) {
2079 error.TooManyPersonalities => return diags.fail("too many personalities in unwind info", .{}),
2080 else => |e| return e,
2081 };
2082 sect.size = self.unwind_info.calcSize();
2083 sect.@"align" = 2;
2084 }
2085}
2086
2087fn initSegments(self: *MachO) !void {
2088 const gpa = self.base.comp.gpa;
2089 const slice = self.sections.slice();
2090
2091 // Add __PAGEZERO if required
2092 const pagezero_size = self.pagezero_size orelse default_pagezero_size;
2093 const aligned_pagezero_size = mem.alignBackward(u64, pagezero_size, self.getPageSize());
2094 if (!self.base.isDynLib() and aligned_pagezero_size > 0) {
2095 if (aligned_pagezero_size != pagezero_size) {
2096 // TODO convert into a warning
2097 log.warn("requested __PAGEZERO size (0x{x}) is not page aligned", .{pagezero_size});
2098 log.warn(" rounding down to 0x{x}", .{aligned_pagezero_size});
2099 }
2100 self.pagezero_seg_index = try self.addSegment("__PAGEZERO", .{ .vmsize = aligned_pagezero_size });
2101 }
2102
2103 // __TEXT segment is non-optional
2104 self.text_seg_index = try self.addSegment("__TEXT", .{ .prot = getSegmentProt("__TEXT") });
2105
2106 // Next, create segments required by sections
2107 for (slice.items(.header)) |header| {
2108 const segname = header.segName();
2109 if (self.getSegmentByName(segname) == null) {
2110 _ = try self.addSegment(segname, .{ .prot = getSegmentProt(segname) });
2111 }
2112 }
2113
2114 // Add __LINKEDIT
2115 self.linkedit_seg_index = try self.addSegment("__LINKEDIT", .{ .prot = getSegmentProt("__LINKEDIT") });
2116
2117 // Sort segments
2118 const Entry = struct {
2119 index: u8,
2120
2121 pub fn lessThan(macho_file: *MachO, lhs: @This(), rhs: @This()) bool {
2122 return segmentLessThan(
2123 {},
2124 macho_file.segments.items[lhs.index].segName(),
2125 macho_file.segments.items[rhs.index].segName(),
2126 );
2127 }
2128 };
2129
2130 var entries = try std.array_list.Managed(Entry).initCapacity(gpa, self.segments.items.len);
2131 defer entries.deinit();
2132 for (0..self.segments.items.len) |index| {
2133 entries.appendAssumeCapacity(.{ .index = @intCast(index) });
2134 }
2135
2136 mem.sort(Entry, entries.items, self, Entry.lessThan);
2137
2138 const backlinks = try gpa.alloc(u8, entries.items.len);
2139 defer gpa.free(backlinks);
2140 for (entries.items, 0..) |entry, i| {
2141 backlinks[entry.index] = @intCast(i);
2142 }
2143
2144 const segments = try self.segments.toOwnedSlice(gpa);
2145 defer gpa.free(segments);
2146
2147 try self.segments.ensureTotalCapacityPrecise(gpa, segments.len);
2148 for (entries.items) |sorted| {
2149 self.segments.appendAssumeCapacity(segments[sorted.index]);
2150 }
2151
2152 for (&[_]*?u8{
2153 &self.pagezero_seg_index,
2154 &self.text_seg_index,
2155 &self.linkedit_seg_index,
2156 &self.zig_text_seg_index,
2157 &self.zig_const_seg_index,
2158 &self.zig_data_seg_index,
2159 &self.zig_bss_seg_index,
2160 }) |maybe_index| {
2161 if (maybe_index.*) |*index| {
2162 index.* = backlinks[index.*];
2163 }
2164 }
2165
2166 // Attach sections to segments
2167 for (slice.items(.header), slice.items(.segment_id)) |header, *seg_id| {
2168 const segname = header.segName();
2169 const segment_id = self.getSegmentByName(segname) orelse blk: {
2170 const segment_id = @as(u8, @intCast(self.segments.items.len));
2171 const protection = getSegmentProt(segname);
2172 try self.segments.append(gpa, .{
2173 .cmdsize = @sizeOf(macho.segment_command_64),
2174 .segname = makeStaticString(segname),
2175 .maxprot = protection,
2176 .initprot = protection,
2177 });
2178 break :blk segment_id;
2179 };
2180 const segment = &self.segments.items[segment_id];
2181 segment.cmdsize += @sizeOf(macho.section_64);
2182 segment.nsects += 1;
2183 seg_id.* = segment_id;
2184 }
2185
2186 // Set __DATA_CONST as READ_ONLY
2187 if (self.getSegmentByName("__DATA_CONST")) |seg_id| {
2188 const seg = &self.segments.items[seg_id];
2189 seg.flags |= macho.SG_READ_ONLY;
2190 }
2191}
2192
2193fn allocateSections(self: *MachO) !void {
2194 const headerpad = try load_commands.calcMinHeaderPadSize(self);
2195 var vmaddr: u64 = if (self.pagezero_seg_index) |index|
2196 self.segments.items[index].vmaddr + self.segments.items[index].vmsize
2197 else
2198 0;
2199 vmaddr += headerpad;
2200 var fileoff = headerpad;
2201 var prev_seg_id: u8 = if (self.pagezero_seg_index) |index| index + 1 else 0;
2202
2203 const page_size = self.getPageSize();
2204 const slice = self.sections.slice();
2205 const last_index = for (0..slice.items(.header).len) |i| {
2206 if (self.isZigSection(@intCast(i))) break i;
2207 } else slice.items(.header).len;
2208
2209 for (slice.items(.header)[0..last_index], slice.items(.segment_id)[0..last_index]) |*header, curr_seg_id| {
2210 if (prev_seg_id != curr_seg_id) {
2211 vmaddr = mem.alignForward(u64, vmaddr, page_size);
2212 fileoff = mem.alignForward(u32, fileoff, page_size);
2213 }
2214
2215 const alignment = try self.alignPow(header.@"align");
2216
2217 vmaddr = mem.alignForward(u64, vmaddr, alignment);
2218 header.addr = vmaddr;
2219 vmaddr += header.size;
2220
2221 if (!header.isZerofill()) {
2222 fileoff = mem.alignForward(u32, fileoff, alignment);
2223 header.offset = fileoff;
2224 fileoff += @intCast(header.size);
2225 }
2226
2227 prev_seg_id = curr_seg_id;
2228 }
2229
2230 fileoff = mem.alignForward(u32, fileoff, page_size);
2231 for (slice.items(.header)[last_index..], slice.items(.segment_id)[last_index..]) |*header, seg_id| {
2232 if (header.isZerofill()) continue;
2233 if (header.offset < fileoff) {
2234 const existing_size = header.size;
2235 header.size = 0;
2236
2237 // Must move the entire section.
2238 const new_offset = try self.findFreeSpace(existing_size, page_size);
2239
2240 log.debug("moving '{s},{s}' from 0x{x} to 0x{x}", .{
2241 header.segName(),
2242 header.sectName(),
2243 header.offset,
2244 new_offset,
2245 });
2246
2247 try self.copyRangeAllZeroOut(header.offset, new_offset, existing_size);
2248
2249 header.offset = @intCast(new_offset);
2250 header.size = existing_size;
2251 self.segments.items[seg_id].fileoff = new_offset;
2252 }
2253 }
2254}
2255
2256/// We allocate segments in a separate step to also consider segments that have no sections.
2257fn allocateSegments(self: *MachO) void {
2258 const first_index = if (self.pagezero_seg_index) |index| index + 1 else 0;
2259 const last_index = for (0..self.segments.items.len) |i| {
2260 if (self.isZigSegment(@intCast(i))) break i;
2261 } else self.segments.items.len;
2262
2263 var vmaddr: u64 = if (self.pagezero_seg_index) |index|
2264 self.segments.items[index].vmaddr + self.segments.items[index].vmsize
2265 else
2266 0;
2267 var fileoff: u64 = 0;
2268
2269 const page_size = self.getPageSize();
2270 const slice = self.sections.slice();
2271
2272 var next_sect_id: u8 = 0;
2273 for (self.segments.items[first_index..last_index], first_index..last_index) |*seg, seg_id| {
2274 seg.vmaddr = vmaddr;
2275 seg.fileoff = fileoff;
2276
2277 while (next_sect_id < slice.items(.header).len) : (next_sect_id += 1) {
2278 const header = slice.items(.header)[next_sect_id];
2279 const sid = slice.items(.segment_id)[next_sect_id];
2280
2281 if (seg_id != sid) break;
2282
2283 vmaddr = header.addr + header.size;
2284 if (!header.isZerofill()) {
2285 fileoff = header.offset + header.size;
2286 }
2287 }
2288
2289 seg.vmsize = vmaddr - seg.vmaddr;
2290 seg.filesize = fileoff - seg.fileoff;
2291
2292 vmaddr = mem.alignForward(u64, vmaddr, page_size);
2293 fileoff = mem.alignForward(u64, fileoff, page_size);
2294 }
2295}
2296
2297fn allocateSyntheticSymbols(self: *MachO) void {
2298 if (self.getInternalObject()) |obj| {
2299 obj.allocateSyntheticSymbols(self);
2300
2301 const text_seg = self.getTextSegment();
2302
2303 for (obj.boundary_symbols.items) |sym_index| {
2304 const ref = obj.getSymbolRef(sym_index, self);
2305 const sym = ref.getSymbol(self).?;
2306 const name = sym.getName(self);
2307
2308 sym.value = text_seg.vmaddr;
2309
2310 if (mem.startsWith(u8, name, "segment$start$")) {
2311 const segname = name["segment$start$".len..];
2312 if (self.getSegmentByName(segname)) |seg_id| {
2313 const seg = self.segments.items[seg_id];
2314 sym.value = seg.vmaddr;
2315 }
2316 } else if (mem.startsWith(u8, name, "segment$end$")) {
2317 const segname = name["segment$end$".len..];
2318 if (self.getSegmentByName(segname)) |seg_id| {
2319 const seg = self.segments.items[seg_id];
2320 sym.value = seg.vmaddr + seg.vmsize;
2321 }
2322 } else if (mem.startsWith(u8, name, "section$start$")) {
2323 const actual_name = name["section$start$".len..];
2324 const sep = mem.indexOfScalar(u8, actual_name, '$').?; // TODO error rather than a panic
2325 const segname = actual_name[0..sep];
2326 const sectname = actual_name[sep + 1 ..];
2327 if (self.getSectionByName(segname, sectname)) |sect_id| {
2328 const sect = self.sections.items(.header)[sect_id];
2329 sym.value = sect.addr;
2330 sym.out_n_sect = sect_id;
2331 }
2332 } else if (mem.startsWith(u8, name, "section$end$")) {
2333 const actual_name = name["section$end$".len..];
2334 const sep = mem.indexOfScalar(u8, actual_name, '$').?; // TODO error rather than a panic
2335 const segname = actual_name[0..sep];
2336 const sectname = actual_name[sep + 1 ..];
2337 if (self.getSectionByName(segname, sectname)) |sect_id| {
2338 const sect = self.sections.items(.header)[sect_id];
2339 sym.value = sect.addr + sect.size;
2340 sym.out_n_sect = sect_id;
2341 }
2342 } else unreachable;
2343 }
2344
2345 if (self.objc_stubs.symbols.items.len > 0) {
2346 const addr = self.sections.items(.header)[self.objc_stubs_sect_index.?].addr;
2347
2348 for (self.objc_stubs.symbols.items, 0..) |ref, idx| {
2349 const sym = ref.getSymbol(self).?;
2350 sym.value = addr + idx * ObjcStubsSection.entrySize(self.getTarget().cpu.arch);
2351 sym.out_n_sect = self.objc_stubs_sect_index.?;
2352 }
2353 }
2354 }
2355}
2356
2357fn allocateLinkeditSegment(self: *MachO) !void {
2358 var fileoff: u64 = 0;
2359 var vmaddr: u64 = 0;
2360
2361 for (self.segments.items) |seg| {
2362 if (fileoff < seg.fileoff + seg.filesize) fileoff = seg.fileoff + seg.filesize;
2363 if (vmaddr < seg.vmaddr + seg.vmsize) vmaddr = seg.vmaddr + seg.vmsize;
2364 }
2365
2366 const page_size = self.getPageSize();
2367 const seg = self.getLinkeditSegment();
2368 seg.vmaddr = mem.alignForward(u64, vmaddr, page_size);
2369 seg.fileoff = mem.alignForward(u64, fileoff, page_size);
2370
2371 var off = try self.cast(u32, seg.fileoff);
2372 // DYLD_INFO_ONLY
2373 {
2374 const cmd = &self.dyld_info_cmd;
2375 cmd.rebase_off = off;
2376 off += cmd.rebase_size;
2377 cmd.bind_off = off;
2378 off += cmd.bind_size;
2379 cmd.weak_bind_off = off;
2380 off += cmd.weak_bind_size;
2381 cmd.lazy_bind_off = off;
2382 off += cmd.lazy_bind_size;
2383 cmd.export_off = off;
2384 off += cmd.export_size;
2385 off = mem.alignForward(u32, off, @alignOf(u64));
2386 }
2387
2388 // FUNCTION_STARTS
2389 {
2390 const cmd = &self.function_starts_cmd;
2391 cmd.dataoff = off;
2392 off += cmd.datasize;
2393 off = mem.alignForward(u32, off, @alignOf(u64));
2394 }
2395
2396 // DATA_IN_CODE
2397 {
2398 const cmd = &self.data_in_code_cmd;
2399 cmd.dataoff = off;
2400 off += cmd.datasize;
2401 off = mem.alignForward(u32, off, @alignOf(u64));
2402 }
2403
2404 // SYMTAB (symtab)
2405 {
2406 const cmd = &self.symtab_cmd;
2407 cmd.symoff = off;
2408 off += cmd.nsyms * @sizeOf(macho.nlist_64);
2409 off = mem.alignForward(u32, off, @alignOf(u32));
2410 }
2411
2412 // DYSYMTAB
2413 {
2414 const cmd = &self.dysymtab_cmd;
2415 cmd.indirectsymoff = off;
2416 off += cmd.nindirectsyms * @sizeOf(u32);
2417 off = mem.alignForward(u32, off, @alignOf(u64));
2418 }
2419
2420 // SYMTAB (strtab)
2421 {
2422 const cmd = &self.symtab_cmd;
2423 cmd.stroff = off;
2424 off += cmd.strsize;
2425 }
2426
2427 seg.filesize = off - seg.fileoff;
2428}
2429
2430fn resizeSections(self: *MachO) !void {
2431 const slice = self.sections.slice();
2432 for (slice.items(.header), slice.items(.out), 0..) |header, *out, n_sect| {
2433 if (header.isZerofill()) continue;
2434 if (self.isZigSection(@intCast(n_sect))) continue; // TODO this is horrible
2435 const cpu_arch = self.getTarget().cpu.arch;
2436 const size = try self.cast(usize, header.size);
2437 try out.resize(self.base.comp.gpa, size);
2438 const padding_byte: u8 = if (header.isCode() and cpu_arch == .x86_64) 0xcc else 0;
2439 @memset(out.items, padding_byte);
2440 }
2441}
2442
2443fn writeSectionsAndUpdateLinkeditSizes(self: *MachO) !void {
2444 const tracy = trace(@src());
2445 defer tracy.end();
2446
2447 const gpa = self.base.comp.gpa;
2448 const diags = &self.base.comp.link_diags;
2449
2450 const cmd = self.symtab_cmd;
2451 try self.symtab.resize(gpa, cmd.nsyms);
2452 try self.strtab.resize(gpa, cmd.strsize);
2453 self.strtab.items[0] = 0;
2454
2455 {
2456 for (self.objects.items) |index| {
2457 writeAtomsWorker(self, self.getFile(index).?);
2458 }
2459 if (self.getZigObject()) |zo| {
2460 writeAtomsWorker(self, zo.asFile());
2461 }
2462 if (self.getInternalObject()) |obj| {
2463 writeAtomsWorker(self, obj.asFile());
2464 }
2465 for (self.thunks.items) |thunk| {
2466 writeThunkWorker(self, thunk);
2467 }
2468
2469 const slice = self.sections.slice();
2470 for (&[_]?u8{
2471 self.eh_frame_sect_index,
2472 self.unwind_info_sect_index,
2473 self.got_sect_index,
2474 self.stubs_sect_index,
2475 self.la_symbol_ptr_sect_index,
2476 self.tlv_ptr_sect_index,
2477 self.objc_stubs_sect_index,
2478 }) |maybe_sect_id| {
2479 if (maybe_sect_id) |sect_id| {
2480 const out = slice.items(.out)[sect_id].items;
2481 writeSyntheticSectionWorker(self, sect_id, out);
2482 }
2483 }
2484
2485 if (self.la_symbol_ptr_sect_index) |_| {
2486 updateLazyBindSizeWorker(self);
2487 }
2488
2489 updateLinkeditSizeWorker(self, .rebase);
2490 updateLinkeditSizeWorker(self, .bind);
2491 updateLinkeditSizeWorker(self, .weak_bind);
2492 updateLinkeditSizeWorker(self, .export_trie);
2493 updateLinkeditSizeWorker(self, .data_in_code);
2494
2495 if (self.getZigObject()) |zo| {
2496 File.writeSymtab(zo.asFile(), self, self);
2497 }
2498 for (self.objects.items) |index| {
2499 File.writeSymtab(self.getFile(index).?, self, self);
2500 }
2501 for (self.dylibs.items) |index| {
2502 File.writeSymtab(self.getFile(index).?, self, self);
2503 }
2504 if (self.getInternalObject()) |obj| {
2505 File.writeSymtab(obj.asFile(), self, self);
2506 }
2507 if (self.requiresThunks()) for (self.thunks.items) |th| {
2508 Thunk.writeSymtab(th, self, self);
2509 };
2510 }
2511
2512 if (diags.hasErrors()) return error.LinkFailure;
2513}
2514
2515fn writeAtomsWorker(self: *MachO, file: File) void {
2516 const tracy = trace(@src());
2517 defer tracy.end();
2518 file.writeAtoms(self) catch |err| {
2519 self.reportParseError2(file.getIndex(), "failed to resolve relocations and write atoms: {s}", .{
2520 @errorName(err),
2521 }) catch {};
2522 };
2523}
2524
2525fn writeThunkWorker(self: *MachO, thunk: Thunk) void {
2526 const tracy = trace(@src());
2527 defer tracy.end();
2528
2529 const diags = &self.base.comp.link_diags;
2530
2531 const doWork = struct {
2532 fn doWork(th: Thunk, buffer: []u8, macho_file: *MachO) !void {
2533 const off = try macho_file.cast(usize, th.value);
2534 const size = th.size();
2535 var stream: Writer = .fixed(buffer[off..][0..size]);
2536 try th.write(macho_file, &stream);
2537 }
2538 }.doWork;
2539 const out = self.sections.items(.out)[thunk.out_n_sect].items;
2540 doWork(thunk, out, self) catch |err| {
2541 diags.addError("failed to write contents of thunk: {s}", .{@errorName(err)});
2542 };
2543}
2544
2545fn writeSyntheticSectionWorker(self: *MachO, sect_id: u8, out: []u8) void {
2546 const tracy = trace(@src());
2547 defer tracy.end();
2548
2549 const diags = &self.base.comp.link_diags;
2550
2551 const Tag = enum {
2552 eh_frame,
2553 unwind_info,
2554 got,
2555 stubs,
2556 la_symbol_ptr,
2557 tlv_ptr,
2558 objc_stubs,
2559 };
2560
2561 const doWork = struct {
2562 fn doWork(macho_file: *MachO, tag: Tag, buffer: []u8) !void {
2563 var stream: Writer = .fixed(buffer);
2564 switch (tag) {
2565 .eh_frame => eh_frame.write(macho_file, buffer),
2566 .unwind_info => try macho_file.unwind_info.write(macho_file, buffer),
2567 .got => try macho_file.got.write(macho_file, &stream),
2568 .stubs => try macho_file.stubs.write(macho_file, &stream),
2569 .la_symbol_ptr => try macho_file.la_symbol_ptr.write(macho_file, &stream),
2570 .tlv_ptr => try macho_file.tlv_ptr.write(macho_file, &stream),
2571 .objc_stubs => try macho_file.objc_stubs.write(macho_file, &stream),
2572 }
2573 }
2574 }.doWork;
2575
2576 const header = self.sections.items(.header)[sect_id];
2577 const tag: Tag = tag: {
2578 if (self.eh_frame_sect_index != null and
2579 self.eh_frame_sect_index.? == sect_id) break :tag .eh_frame;
2580 if (self.unwind_info_sect_index != null and
2581 self.unwind_info_sect_index.? == sect_id) break :tag .unwind_info;
2582 if (self.got_sect_index != null and
2583 self.got_sect_index.? == sect_id) break :tag .got;
2584 if (self.stubs_sect_index != null and
2585 self.stubs_sect_index.? == sect_id) break :tag .stubs;
2586 if (self.la_symbol_ptr_sect_index != null and
2587 self.la_symbol_ptr_sect_index.? == sect_id) break :tag .la_symbol_ptr;
2588 if (self.tlv_ptr_sect_index != null and
2589 self.tlv_ptr_sect_index.? == sect_id) break :tag .tlv_ptr;
2590 if (self.objc_stubs_sect_index != null and
2591 self.objc_stubs_sect_index.? == sect_id) break :tag .objc_stubs;
2592 unreachable;
2593 };
2594 doWork(self, tag, out) catch |err| {
2595 diags.addError("could not write section '{s},{s}': {s}", .{
2596 header.segName(), header.sectName(), @errorName(err),
2597 });
2598 };
2599}
2600
2601fn updateLazyBindSizeWorker(self: *MachO) void {
2602 const tracy = trace(@src());
2603 defer tracy.end();
2604
2605 const diags = &self.base.comp.link_diags;
2606
2607 const doWork = struct {
2608 fn doWork(macho_file: *MachO) !void {
2609 try macho_file.lazy_bind_section.updateSize(macho_file);
2610 const sect_id = macho_file.stubs_helper_sect_index.?;
2611 const out = &macho_file.sections.items(.out)[sect_id];
2612 var stream: Writer = .fixed(out.items);
2613 try macho_file.stubs_helper.write(macho_file, &stream);
2614 }
2615 }.doWork;
2616 doWork(self) catch |err|
2617 diags.addError("could not calculate size of lazy binding section: {s}", .{@errorName(err)});
2618}
2619
2620pub fn updateLinkeditSizeWorker(self: *MachO, tag: enum {
2621 rebase,
2622 bind,
2623 weak_bind,
2624 export_trie,
2625 data_in_code,
2626}) void {
2627 const diags = &self.base.comp.link_diags;
2628 const res = switch (tag) {
2629 .rebase => self.rebase_section.updateSize(self),
2630 .bind => self.bind_section.updateSize(self),
2631 .weak_bind => self.weak_bind_section.updateSize(self),
2632 .export_trie => self.export_trie.updateSize(self),
2633 .data_in_code => self.data_in_code.updateSize(self),
2634 };
2635 res catch |err|
2636 diags.addError("could not calculate size of {s} section: {s}", .{ @tagName(tag), @errorName(err) });
2637}
2638
2639fn writeSectionsToFile(self: *MachO) !void {
2640 const tracy = trace(@src());
2641 defer tracy.end();
2642
2643 const slice = self.sections.slice();
2644 for (slice.items(.header), slice.items(.out)) |header, out| {
2645 try self.pwriteAll(out.items, header.offset);
2646 }
2647}
2648
2649fn writeLinkeditSectionsToFile(self: *MachO) !void {
2650 const tracy = trace(@src());
2651 defer tracy.end();
2652 try self.writeDyldInfo();
2653 try self.writeDataInCode();
2654 try self.writeSymtabToFile();
2655 try self.writeIndsymtab();
2656}
2657
2658fn writeDyldInfo(self: *MachO) !void {
2659 const tracy = trace(@src());
2660 defer tracy.end();
2661
2662 const gpa = self.base.comp.gpa;
2663 const base_off = self.getLinkeditSegment().fileoff;
2664 const cmd = self.dyld_info_cmd;
2665 var needed_size: u32 = 0;
2666 needed_size += cmd.rebase_size;
2667 needed_size += cmd.bind_size;
2668 needed_size += cmd.weak_bind_size;
2669 needed_size += cmd.lazy_bind_size;
2670 needed_size += cmd.export_size;
2671
2672 const buffer = try gpa.alloc(u8, needed_size);
2673 defer gpa.free(buffer);
2674 @memset(buffer, 0);
2675
2676 var writer: Writer = .fixed(buffer);
2677
2678 try self.rebase_section.write(&writer);
2679 writer.end = @intCast(cmd.bind_off - base_off);
2680 try self.bind_section.write(&writer);
2681 writer.end = @intCast(cmd.weak_bind_off - base_off);
2682 try self.weak_bind_section.write(&writer);
2683 writer.end = @intCast(cmd.lazy_bind_off - base_off);
2684 try self.lazy_bind_section.write(&writer);
2685 writer.end = @intCast(cmd.export_off - base_off);
2686 try self.export_trie.write(&writer);
2687 try self.pwriteAll(buffer, cmd.rebase_off);
2688}
2689
2690pub fn writeDataInCode(self: *MachO) !void {
2691 const tracy = trace(@src());
2692 defer tracy.end();
2693 const gpa = self.base.comp.gpa;
2694 const cmd = self.data_in_code_cmd;
2695 var buffer = try std.Io.Writer.Allocating.initCapacity(gpa, self.data_in_code.size());
2696 defer buffer.deinit();
2697 self.data_in_code.write(self, &buffer.writer) catch return error.OutOfMemory;
2698 try self.pwriteAll(buffer.written(), cmd.dataoff);
2699}
2700
2701fn writeIndsymtab(self: *MachO) !void {
2702 const tracy = trace(@src());
2703 defer tracy.end();
2704 const gpa = self.base.comp.gpa;
2705 const cmd = self.dysymtab_cmd;
2706 const needed_size = cmd.nindirectsyms * @sizeOf(u32);
2707 const buffer = try gpa.alloc(u8, needed_size);
2708 defer gpa.free(buffer);
2709 var writer: Writer = .fixed(buffer);
2710 try self.indsymtab.write(self, &writer);
2711 try self.pwriteAll(buffer, cmd.indirectsymoff);
2712}
2713
2714pub fn writeSymtabToFile(self: *MachO) !void {
2715 const tracy = trace(@src());
2716 defer tracy.end();
2717 const cmd = self.symtab_cmd;
2718 try self.pwriteAll(@ptrCast(self.symtab.items), cmd.symoff);
2719 try self.pwriteAll(self.strtab.items, cmd.stroff);
2720}
2721
2722fn writeUnwindInfo(self: *MachO) !void {
2723 const tracy = trace(@src());
2724 defer tracy.end();
2725
2726 const gpa = self.base.comp.gpa;
2727
2728 if (self.eh_frame_sect_index) |index| {
2729 const header = self.sections.items(.header)[index];
2730 const size = try self.cast(usize, header.size);
2731 const buffer = try gpa.alloc(u8, size);
2732 defer gpa.free(buffer);
2733 eh_frame.write(self, buffer);
2734 try self.pwriteAll(buffer, header.offset);
2735 }
2736
2737 if (self.unwind_info_sect_index) |index| {
2738 const header = self.sections.items(.header)[index];
2739 const size = try self.cast(usize, header.size);
2740 const buffer = try gpa.alloc(u8, size);
2741 defer gpa.free(buffer);
2742 try self.unwind_info.write(self, buffer);
2743 try self.pwriteAll(buffer, header.offset);
2744 }
2745}
2746
2747fn calcSymtabSize(self: *MachO) !void {
2748 const tracy = trace(@src());
2749 defer tracy.end();
2750
2751 const gpa = self.base.comp.gpa;
2752
2753 var files = std.array_list.Managed(File.Index).init(gpa);
2754 defer files.deinit();
2755 try files.ensureTotalCapacityPrecise(self.objects.items.len + self.dylibs.items.len + 2);
2756 if (self.zig_object) |index| files.appendAssumeCapacity(index);
2757 for (self.objects.items) |index| files.appendAssumeCapacity(index);
2758 for (self.dylibs.items) |index| files.appendAssumeCapacity(index);
2759 if (self.internal_object) |index| files.appendAssumeCapacity(index);
2760
2761 var nlocals: u32 = 0;
2762 var nstabs: u32 = 0;
2763 var nexports: u32 = 0;
2764 var nimports: u32 = 0;
2765 var strsize: u32 = 1;
2766
2767 if (self.requiresThunks()) for (self.thunks.items) |*th| {
2768 th.output_symtab_ctx.ilocal = nlocals;
2769 th.output_symtab_ctx.stroff = strsize;
2770 th.calcSymtabSize(self);
2771 nlocals += th.output_symtab_ctx.nlocals;
2772 strsize += th.output_symtab_ctx.strsize;
2773 };
2774
2775 for (files.items) |index| {
2776 const file = self.getFile(index).?;
2777 const ctx = switch (file) {
2778 inline else => |x| &x.output_symtab_ctx,
2779 };
2780 ctx.ilocal = nlocals;
2781 ctx.istab = nstabs;
2782 ctx.iexport = nexports;
2783 ctx.iimport = nimports;
2784 ctx.stroff = strsize;
2785 nlocals += ctx.nlocals;
2786 nstabs += ctx.nstabs;
2787 nexports += ctx.nexports;
2788 nimports += ctx.nimports;
2789 strsize += ctx.strsize;
2790 }
2791
2792 for (files.items) |index| {
2793 const file = self.getFile(index).?;
2794 const ctx = switch (file) {
2795 inline else => |x| &x.output_symtab_ctx,
2796 };
2797 ctx.istab += nlocals;
2798 ctx.iexport += nlocals + nstabs;
2799 ctx.iimport += nlocals + nstabs + nexports;
2800 }
2801
2802 try self.indsymtab.updateSize(self);
2803
2804 {
2805 const cmd = &self.symtab_cmd;
2806 cmd.nsyms = nlocals + nstabs + nexports + nimports;
2807 cmd.strsize = strsize;
2808 }
2809
2810 {
2811 const cmd = &self.dysymtab_cmd;
2812 cmd.ilocalsym = 0;
2813 cmd.nlocalsym = nlocals + nstabs;
2814 cmd.iextdefsym = nlocals + nstabs;
2815 cmd.nextdefsym = nexports;
2816 cmd.iundefsym = nlocals + nstabs + nexports;
2817 cmd.nundefsym = nimports;
2818 }
2819}
2820
2821fn writeLoadCommands(self: *MachO) !struct { usize, usize, u64 } {
2822 const comp = self.base.comp;
2823 const gpa = comp.gpa;
2824 const needed_size = try load_commands.calcLoadCommandsSize(self, false);
2825 const buffer = try gpa.alloc(u8, needed_size);
2826 defer gpa.free(buffer);
2827
2828 var writer: Writer = .fixed(buffer);
2829
2830 var ncmds: usize = 0;
2831
2832 // Segment and section load commands
2833 {
2834 const slice = self.sections.slice();
2835 var sect_id: usize = 0;
2836 for (self.segments.items) |seg| {
2837 try writer.writeStruct(seg, .little);
2838 for (slice.items(.header)[sect_id..][0..seg.nsects]) |header| {
2839 try writer.writeStruct(header, .little);
2840 }
2841 sect_id += seg.nsects;
2842 }
2843 ncmds += self.segments.items.len;
2844 }
2845
2846 try writer.writeStruct(self.dyld_info_cmd, .little);
2847 ncmds += 1;
2848 try writer.writeStruct(self.function_starts_cmd, .little);
2849 ncmds += 1;
2850 try writer.writeStruct(self.data_in_code_cmd, .little);
2851 ncmds += 1;
2852 try writer.writeStruct(self.symtab_cmd, .little);
2853 ncmds += 1;
2854 try writer.writeStruct(self.dysymtab_cmd, .little);
2855 ncmds += 1;
2856 try load_commands.writeDylinkerLC(&writer);
2857 ncmds += 1;
2858
2859 if (self.getInternalObject()) |obj| {
2860 if (obj.getEntryRef(self)) |ref| {
2861 const sym = ref.getSymbol(self).?;
2862 const seg = self.getTextSegment();
2863 const entryoff: u32 = if (sym.getFile(self) == null)
2864 0
2865 else
2866 @as(u32, @intCast(sym.getAddress(.{ .stubs = true }, self) - seg.vmaddr));
2867 try writer.writeStruct(@as(macho.entry_point_command, .{
2868 .entryoff = entryoff,
2869 .stacksize = self.base.stack_size,
2870 }), .little);
2871 ncmds += 1;
2872 }
2873 }
2874
2875 if (self.base.isDynLib()) {
2876 try load_commands.writeDylibIdLC(self, &writer);
2877 ncmds += 1;
2878 }
2879
2880 for (self.rpath_list) |rpath| {
2881 try load_commands.writeRpathLC(rpath, &writer);
2882 ncmds += 1;
2883 }
2884 if (comp.config.any_sanitize_thread) {
2885 const path = try comp.tsan_lib.?.full_object_path.toString(gpa);
2886 defer gpa.free(path);
2887 const rpath = std.fs.path.dirname(path) orelse ".";
2888 try load_commands.writeRpathLC(rpath, &writer);
2889 ncmds += 1;
2890 }
2891
2892 try writer.writeStruct(@as(macho.source_version_command, .{ .version = 0 }), .little);
2893 ncmds += 1;
2894
2895 if (self.platform.isBuildVersionCompatible()) {
2896 try load_commands.writeBuildVersionLC(self.platform, self.sdk_version, &writer);
2897 ncmds += 1;
2898 } else {
2899 try load_commands.writeVersionMinLC(self.platform, self.sdk_version, &writer);
2900 ncmds += 1;
2901 }
2902
2903 const uuid_cmd_offset = @sizeOf(macho.mach_header_64) + writer.end;
2904 try writer.writeStruct(self.uuid_cmd, .little);
2905 ncmds += 1;
2906
2907 for (self.dylibs.items) |index| {
2908 const dylib = self.getFile(index).?.dylib;
2909 assert(dylib.isAlive(self));
2910 const dylib_id = dylib.id.?;
2911 try load_commands.writeDylibLC(.{
2912 .cmd = if (dylib.weak)
2913 .LOAD_WEAK_DYLIB
2914 else if (dylib.reexport)
2915 .REEXPORT_DYLIB
2916 else
2917 .LOAD_DYLIB,
2918 .name = dylib_id.name,
2919 .timestamp = dylib_id.timestamp,
2920 .current_version = dylib_id.current_version,
2921 .compatibility_version = dylib_id.compatibility_version,
2922 }, &writer);
2923 ncmds += 1;
2924 }
2925
2926 if (self.requiresCodeSig()) {
2927 try writer.writeStruct(self.codesig_cmd, .little);
2928 ncmds += 1;
2929 }
2930
2931 assert(writer.end == needed_size);
2932
2933 try self.pwriteAll(buffer, @sizeOf(macho.mach_header_64));
2934
2935 return .{ ncmds, buffer.len, uuid_cmd_offset };
2936}
2937
2938fn writeHeader(self: *MachO, ncmds: usize, sizeofcmds: usize) !void {
2939 var header: macho.mach_header_64 = .{};
2940 header.flags = macho.MH_NOUNDEFS | macho.MH_DYLDLINK;
2941
2942 // TODO: if (self.options.namespace == .two_level) {
2943 header.flags |= macho.MH_TWOLEVEL;
2944 // }
2945
2946 switch (self.getTarget().cpu.arch) {
2947 .aarch64 => {
2948 header.cputype = macho.CPU_TYPE_ARM64;
2949 header.cpusubtype = macho.CPU_SUBTYPE_ARM_ALL;
2950 },
2951 .x86_64 => {
2952 header.cputype = macho.CPU_TYPE_X86_64;
2953 header.cpusubtype = macho.CPU_SUBTYPE_X86_64_ALL;
2954 },
2955 else => {},
2956 }
2957
2958 if (self.base.isDynLib()) {
2959 header.filetype = macho.MH_DYLIB;
2960 } else {
2961 header.filetype = macho.MH_EXECUTE;
2962 header.flags |= macho.MH_PIE;
2963 }
2964
2965 const has_reexports = for (self.dylibs.items) |index| {
2966 if (self.getFile(index).?.dylib.reexport) break true;
2967 } else false;
2968 if (!has_reexports) {
2969 header.flags |= macho.MH_NO_REEXPORTED_DYLIBS;
2970 }
2971
2972 if (self.has_tlv.load(.seq_cst)) {
2973 header.flags |= macho.MH_HAS_TLV_DESCRIPTORS;
2974 }
2975 if (self.binds_to_weak.load(.seq_cst)) {
2976 header.flags |= macho.MH_BINDS_TO_WEAK;
2977 }
2978 if (self.weak_defines.load(.seq_cst)) {
2979 header.flags |= macho.MH_WEAK_DEFINES;
2980 }
2981
2982 header.ncmds = @intCast(ncmds);
2983 header.sizeofcmds = @intCast(sizeofcmds);
2984
2985 log.debug("writing Mach-O header {}", .{header});
2986
2987 try self.pwriteAll(mem.asBytes(&header), 0);
2988}
2989
2990fn writeUuid(self: *MachO, uuid_cmd_offset: u64, has_codesig: bool) !void {
2991 const file_size = if (!has_codesig) blk: {
2992 const seg = self.getLinkeditSegment();
2993 break :blk seg.fileoff + seg.filesize;
2994 } else self.codesig_cmd.dataoff;
2995 try calcUuid(self.base.comp, self.base.file.?, file_size, &self.uuid_cmd.uuid);
2996 const offset = uuid_cmd_offset + @sizeOf(macho.load_command);
2997 try self.pwriteAll(&self.uuid_cmd.uuid, offset);
2998}
2999
3000pub fn writeCodeSignaturePadding(self: *MachO, code_sig: *CodeSignature) !void {
3001 const seg = self.getLinkeditSegment();
3002 // Code signature data has to be 16-bytes aligned for Apple tools to recognize the file
3003 // https://github.com/opensource-apple/cctools/blob/fdb4825f303fd5c0751be524babd32958181b3ed/libstuff/checkout.c#L271
3004 const offset = mem.alignForward(u64, seg.fileoff + seg.filesize, 16);
3005 const needed_size = code_sig.estimateSize(offset);
3006 seg.filesize = offset + needed_size - seg.fileoff;
3007 seg.vmsize = mem.alignForward(u64, seg.filesize, self.getPageSize());
3008 log.debug("writing code signature padding from 0x{x} to 0x{x}", .{ offset, offset + needed_size });
3009 // Pad out the space. We need to do this to calculate valid hashes for everything in the file
3010 // except for code signature data.
3011 try self.pwriteAll(&[_]u8{0}, offset + needed_size - 1);
3012
3013 self.codesig_cmd.dataoff = @as(u32, @intCast(offset));
3014 self.codesig_cmd.datasize = @as(u32, @intCast(needed_size));
3015}
3016
3017pub fn writeCodeSignature(self: *MachO, code_sig: *CodeSignature) !void {
3018 const seg = self.getTextSegment();
3019 const offset = self.codesig_cmd.dataoff;
3020 const gpa = self.base.comp.gpa;
3021
3022 var buffer: std.Io.Writer.Allocating = .init(gpa);
3023 defer buffer.deinit();
3024 // The writeAdhocSignature function internally changes code_sig.size()
3025 // during the execution.
3026 try buffer.ensureUnusedCapacity(code_sig.size());
3027
3028 code_sig.writeAdhocSignature(self, .{
3029 .file = self.base.file.?,
3030 .exec_seg_base = seg.fileoff,
3031 .exec_seg_limit = seg.filesize,
3032 .file_size = offset,
3033 .dylib = self.base.isDynLib(),
3034 }, &buffer.writer) catch |err| switch (err) {
3035 error.WriteFailed => return error.OutOfMemory,
3036 else => |e| return e,
3037 };
3038 assert(buffer.written().len == code_sig.size());
3039
3040 log.debug("writing code signature from 0x{x} to 0x{x}", .{
3041 offset,
3042 offset + buffer.written().len,
3043 });
3044
3045 try self.pwriteAll(buffer.written(), offset);
3046}
3047
3048pub fn updateFunc(
3049 self: *MachO,
3050 pt: Zcu.PerThread,
3051 func_index: InternPool.Index,
3052 mir: *const codegen.AnyMir,
3053) link.File.UpdateNavError!void {
3054 if (build_options.skip_non_native and builtin.object_format != .macho) {
3055 @panic("Attempted to compile for object format that was disabled by build configuration");
3056 }
3057 return self.getZigObject().?.updateFunc(self, pt, func_index, mir);
3058}
3059
3060pub fn updateNav(self: *MachO, pt: Zcu.PerThread, nav: InternPool.Nav.Index) link.File.UpdateNavError!void {
3061 if (build_options.skip_non_native and builtin.object_format != .macho) {
3062 @panic("Attempted to compile for object format that was disabled by build configuration");
3063 }
3064 return self.getZigObject().?.updateNav(self, pt, nav);
3065}
3066
3067pub fn updateLineNumber(self: *MachO, pt: Zcu.PerThread, ti_id: InternPool.TrackedInst.Index) !void {
3068 return self.getZigObject().?.updateLineNumber(pt, ti_id);
3069}
3070
3071pub fn updateExports(
3072 self: *MachO,
3073 pt: Zcu.PerThread,
3074 exported: Zcu.Exported,
3075 export_indices: []const Zcu.Export.Index,
3076) link.File.UpdateExportsError!void {
3077 if (build_options.skip_non_native and builtin.object_format != .macho) {
3078 @panic("Attempted to compile for object format that was disabled by build configuration");
3079 }
3080 return self.getZigObject().?.updateExports(self, pt, exported, export_indices);
3081}
3082
3083pub fn deleteExport(
3084 self: *MachO,
3085 exported: Zcu.Exported,
3086 name: InternPool.NullTerminatedString,
3087) void {
3088 return self.getZigObject().?.deleteExport(self, exported, name);
3089}
3090
3091pub fn freeNav(self: *MachO, nav: InternPool.Nav.Index) void {
3092 return self.getZigObject().?.freeNav(nav);
3093}
3094
3095pub fn getNavVAddr(self: *MachO, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index, reloc_info: link.File.RelocInfo) !u64 {
3096 return self.getZigObject().?.getNavVAddr(self, pt, nav_index, reloc_info);
3097}
3098
3099pub fn lowerUav(
3100 self: *MachO,
3101 pt: Zcu.PerThread,
3102 uav: InternPool.Index,
3103 explicit_alignment: InternPool.Alignment,
3104 src_loc: Zcu.LazySrcLoc,
3105) !codegen.SymbolResult {
3106 return self.getZigObject().?.lowerUav(self, pt, uav, explicit_alignment, src_loc);
3107}
3108
3109pub fn getUavVAddr(self: *MachO, uav: InternPool.Index, reloc_info: link.File.RelocInfo) !u64 {
3110 return self.getZigObject().?.getUavVAddr(self, uav, reloc_info);
3111}
3112
3113pub fn getGlobalSymbol(self: *MachO, name: []const u8, lib_name: ?[]const u8) !u32 {
3114 return self.getZigObject().?.getGlobalSymbol(self, name, lib_name);
3115}
3116
3117pub fn padToIdeal(actual_size: anytype) @TypeOf(actual_size) {
3118 return actual_size +| (actual_size / ideal_factor);
3119}
3120
3121fn detectAllocCollision(self: *MachO, start: u64, size: u64) !?u64 {
3122 // Conservatively commit one page size as reserved space for the headers as we
3123 // expect it to grow and everything else be moved in flush anyhow.
3124 const header_size = self.getPageSize();
3125 if (start < header_size)
3126 return header_size;
3127
3128 var at_end = true;
3129 const end = start + padToIdeal(size);
3130
3131 for (self.sections.items(.header)) |header| {
3132 if (header.isZerofill()) continue;
3133 const increased_size = padToIdeal(header.size);
3134 const test_end = header.offset +| increased_size;
3135 if (start < test_end) {
3136 if (end > header.offset) return test_end;
3137 if (test_end < std.math.maxInt(u64)) at_end = false;
3138 }
3139 }
3140
3141 for (self.segments.items) |seg| {
3142 const increased_size = padToIdeal(seg.filesize);
3143 const test_end = seg.fileoff +| increased_size;
3144 if (start < test_end) {
3145 if (end > seg.fileoff) return test_end;
3146 if (test_end < std.math.maxInt(u64)) at_end = false;
3147 }
3148 }
3149
3150 if (at_end) try self.base.file.?.setEndPos(end);
3151 return null;
3152}
3153
3154fn detectAllocCollisionVirtual(self: *MachO, start: u64, size: u64) ?u64 {
3155 // Conservatively commit one page size as reserved space for the headers as we
3156 // expect it to grow and everything else be moved in flush anyhow.
3157 const header_size = self.getPageSize();
3158 if (start < header_size)
3159 return header_size;
3160
3161 const end = start + padToIdeal(size);
3162
3163 for (self.sections.items(.header)) |header| {
3164 const increased_size = padToIdeal(header.size);
3165 const test_end = header.addr +| increased_size;
3166 if (end > header.addr and start < test_end) {
3167 return test_end;
3168 }
3169 }
3170
3171 for (self.segments.items) |seg| {
3172 const increased_size = padToIdeal(seg.vmsize);
3173 const test_end = seg.vmaddr +| increased_size;
3174 if (end > seg.vmaddr and start < test_end) {
3175 return test_end;
3176 }
3177 }
3178
3179 return null;
3180}
3181
3182pub fn allocatedSize(self: *MachO, start: u64) u64 {
3183 if (start == 0) return 0;
3184
3185 var min_pos: u64 = std.math.maxInt(u64);
3186
3187 for (self.sections.items(.header)) |header| {
3188 if (header.offset <= start) continue;
3189 if (header.offset < min_pos) min_pos = header.offset;
3190 }
3191
3192 for (self.segments.items) |seg| {
3193 if (seg.fileoff <= start) continue;
3194 if (seg.fileoff < min_pos) min_pos = seg.fileoff;
3195 }
3196
3197 return min_pos - start;
3198}
3199
3200pub fn allocatedSizeVirtual(self: *MachO, start: u64) u64 {
3201 if (start == 0) return 0;
3202
3203 var min_pos: u64 = std.math.maxInt(u64);
3204
3205 for (self.sections.items(.header)) |header| {
3206 if (header.addr <= start) continue;
3207 if (header.addr < min_pos) min_pos = header.addr;
3208 }
3209
3210 for (self.segments.items) |seg| {
3211 if (seg.vmaddr <= start) continue;
3212 if (seg.vmaddr < min_pos) min_pos = seg.vmaddr;
3213 }
3214
3215 return min_pos - start;
3216}
3217
3218pub fn findFreeSpace(self: *MachO, object_size: u64, min_alignment: u32) !u64 {
3219 var start: u64 = 0;
3220 while (try self.detectAllocCollision(start, object_size)) |item_end| {
3221 start = mem.alignForward(u64, item_end, min_alignment);
3222 }
3223 return start;
3224}
3225
3226pub fn findFreeSpaceVirtual(self: *MachO, object_size: u64, min_alignment: u32) u64 {
3227 var start: u64 = 0;
3228 while (self.detectAllocCollisionVirtual(start, object_size)) |item_end| {
3229 start = mem.alignForward(u64, item_end, min_alignment);
3230 }
3231 return start;
3232}
3233
3234pub fn copyRangeAll(self: *MachO, old_offset: u64, new_offset: u64, size: u64) !void {
3235 const file = self.base.file.?;
3236 const amt = try file.copyRangeAll(old_offset, file, new_offset, size);
3237 if (amt != size) return error.InputOutput;
3238}
3239
3240/// Like File.copyRangeAll but also ensures the source region is zeroed out after copy.
3241/// This is so that we guarantee zeroed out regions for mapping of zerofill sections by the loader.
3242fn copyRangeAllZeroOut(self: *MachO, old_offset: u64, new_offset: u64, size: u64) !void {
3243 const gpa = self.base.comp.gpa;
3244 try self.copyRangeAll(old_offset, new_offset, size);
3245 const size_u = math.cast(usize, size) orelse return error.Overflow;
3246 const zeroes = try gpa.alloc(u8, size_u); // TODO no need to allocate here.
3247 defer gpa.free(zeroes);
3248 @memset(zeroes, 0);
3249 try self.base.file.?.pwriteAll(zeroes, old_offset);
3250}
3251
3252const InitMetadataOptions = struct {
3253 emit: Path,
3254 zo: *ZigObject,
3255 symbol_count_hint: u64,
3256 program_code_size_hint: u64,
3257};
3258
3259pub fn closeDebugInfo(self: *MachO) bool {
3260 const d_sym = &(self.d_sym orelse return false);
3261 d_sym.file.?.close();
3262 d_sym.file = null;
3263 return true;
3264}
3265
3266pub fn reopenDebugInfo(self: *MachO) !void {
3267 assert(self.d_sym.?.file == null);
3268
3269 assert(!self.base.comp.config.use_llvm);
3270 assert(self.base.comp.config.debug_format == .dwarf);
3271
3272 const gpa = self.base.comp.gpa;
3273 const sep = fs.path.sep_str;
3274 const d_sym_path = try std.fmt.allocPrint(
3275 gpa,
3276 "{s}.dSYM" ++ sep ++ "Contents" ++ sep ++ "Resources" ++ sep ++ "DWARF",
3277 .{self.base.emit.sub_path},
3278 );
3279 defer gpa.free(d_sym_path);
3280
3281 var d_sym_bundle = try self.base.emit.root_dir.handle.makeOpenPath(d_sym_path, .{});
3282 defer d_sym_bundle.close();
3283
3284 self.d_sym.?.file = try d_sym_bundle.createFile(fs.path.basename(self.base.emit.sub_path), .{
3285 .truncate = false,
3286 .read = true,
3287 });
3288}
3289
3290// TODO: move to ZigObject
3291fn initMetadata(self: *MachO, options: InitMetadataOptions) !void {
3292 if (!self.base.isRelocatable()) {
3293 const base_vmaddr = blk: {
3294 const pagezero_size = self.pagezero_size orelse default_pagezero_size;
3295 break :blk mem.alignBackward(u64, pagezero_size, self.getPageSize());
3296 };
3297
3298 {
3299 const filesize = options.program_code_size_hint;
3300 const off = try self.findFreeSpace(filesize, self.getPageSize());
3301 self.zig_text_seg_index = try self.addSegment("__TEXT_ZIG", .{
3302 .fileoff = off,
3303 .filesize = filesize,
3304 .vmaddr = base_vmaddr + 0x4000000,
3305 .vmsize = filesize,
3306 .prot = macho.PROT.READ | macho.PROT.EXEC,
3307 });
3308 }
3309
3310 {
3311 const filesize: u64 = 1024;
3312 const off = try self.findFreeSpace(filesize, self.getPageSize());
3313 self.zig_const_seg_index = try self.addSegment("__CONST_ZIG", .{
3314 .fileoff = off,
3315 .filesize = filesize,
3316 .vmaddr = base_vmaddr + 0xc000000,
3317 .vmsize = filesize,
3318 .prot = macho.PROT.READ | macho.PROT.WRITE,
3319 });
3320 }
3321
3322 {
3323 const filesize: u64 = 1024;
3324 const off = try self.findFreeSpace(filesize, self.getPageSize());
3325 self.zig_data_seg_index = try self.addSegment("__DATA_ZIG", .{
3326 .fileoff = off,
3327 .filesize = filesize,
3328 .vmaddr = base_vmaddr + 0x10000000,
3329 .vmsize = filesize,
3330 .prot = macho.PROT.READ | macho.PROT.WRITE,
3331 });
3332 }
3333
3334 {
3335 const memsize: u64 = 1024;
3336 self.zig_bss_seg_index = try self.addSegment("__BSS_ZIG", .{
3337 .vmaddr = base_vmaddr + 0x14000000,
3338 .vmsize = memsize,
3339 .prot = macho.PROT.READ | macho.PROT.WRITE,
3340 });
3341 }
3342
3343 if (options.zo.dwarf) |*dwarf| {
3344 // Create dSYM bundle.
3345 log.debug("creating {s}.dSYM bundle", .{options.emit.sub_path});
3346 self.d_sym = .{ .allocator = self.base.comp.gpa, .file = null };
3347 try self.reopenDebugInfo();
3348 try self.d_sym.?.initMetadata(self);
3349 try dwarf.initMetadata();
3350 }
3351 }
3352
3353 const appendSect = struct {
3354 fn appendSect(macho_file: *MachO, sect_id: u8, seg_id: u8) void {
3355 const sect = &macho_file.sections.items(.header)[sect_id];
3356 const seg = macho_file.segments.items[seg_id];
3357 sect.addr = seg.vmaddr;
3358 sect.offset = @intCast(seg.fileoff);
3359 sect.size = seg.vmsize;
3360 macho_file.sections.items(.segment_id)[sect_id] = seg_id;
3361 }
3362 }.appendSect;
3363
3364 const allocSect = struct {
3365 fn allocSect(macho_file: *MachO, sect_id: u8, size: u64) !void {
3366 const sect = &macho_file.sections.items(.header)[sect_id];
3367 const alignment = try macho_file.alignPow(sect.@"align");
3368 if (!sect.isZerofill()) {
3369 sect.offset = try macho_file.cast(u32, try macho_file.findFreeSpace(size, alignment));
3370 }
3371 sect.addr = macho_file.findFreeSpaceVirtual(size, alignment);
3372 sect.size = size;
3373 }
3374 }.allocSect;
3375
3376 {
3377 self.zig_text_sect_index = try self.addSection("__TEXT_ZIG", "__text_zig", .{
3378 .alignment = switch (self.getTarget().cpu.arch) {
3379 .aarch64 => 2,
3380 .x86_64 => 0,
3381 else => unreachable,
3382 },
3383 .flags = macho.S_REGULAR | macho.S_ATTR_PURE_INSTRUCTIONS | macho.S_ATTR_SOME_INSTRUCTIONS,
3384 });
3385 if (self.base.isRelocatable()) {
3386 try allocSect(self, self.zig_text_sect_index.?, options.program_code_size_hint);
3387 } else {
3388 appendSect(self, self.zig_text_sect_index.?, self.zig_text_seg_index.?);
3389 }
3390 }
3391
3392 {
3393 self.zig_const_sect_index = try self.addSection("__CONST_ZIG", "__const_zig", .{});
3394 if (self.base.isRelocatable()) {
3395 try allocSect(self, self.zig_const_sect_index.?, 1024);
3396 } else {
3397 appendSect(self, self.zig_const_sect_index.?, self.zig_const_seg_index.?);
3398 }
3399 }
3400
3401 {
3402 self.zig_data_sect_index = try self.addSection("__DATA_ZIG", "__data_zig", .{});
3403 if (self.base.isRelocatable()) {
3404 try allocSect(self, self.zig_data_sect_index.?, 1024);
3405 } else {
3406 appendSect(self, self.zig_data_sect_index.?, self.zig_data_seg_index.?);
3407 }
3408 }
3409
3410 {
3411 self.zig_bss_sect_index = try self.addSection("__BSS_ZIG", "__bss_zig", .{
3412 .flags = macho.S_ZEROFILL,
3413 });
3414 if (self.base.isRelocatable()) {
3415 try allocSect(self, self.zig_bss_sect_index.?, 1024);
3416 } else {
3417 appendSect(self, self.zig_bss_sect_index.?, self.zig_bss_seg_index.?);
3418 }
3419 }
3420
3421 if (self.base.isRelocatable()) if (options.zo.dwarf) |*dwarf| {
3422 self.debug_str_sect_index = try self.addSection("__DWARF", "__debug_str", .{
3423 .flags = macho.S_ATTR_DEBUG,
3424 });
3425 self.debug_info_sect_index = try self.addSection("__DWARF", "__debug_info", .{
3426 .flags = macho.S_ATTR_DEBUG,
3427 });
3428 self.debug_abbrev_sect_index = try self.addSection("__DWARF", "__debug_abbrev", .{
3429 .flags = macho.S_ATTR_DEBUG,
3430 });
3431 self.debug_aranges_sect_index = try self.addSection("__DWARF", "__debug_aranges", .{
3432 .alignment = 4,
3433 .flags = macho.S_ATTR_DEBUG,
3434 });
3435 self.debug_line_sect_index = try self.addSection("__DWARF", "__debug_line", .{
3436 .flags = macho.S_ATTR_DEBUG,
3437 });
3438 self.debug_line_str_sect_index = try self.addSection("__DWARF", "__debug_line_str", .{
3439 .flags = macho.S_ATTR_DEBUG,
3440 });
3441 self.debug_loclists_sect_index = try self.addSection("__DWARF", "__debug_loclists", .{
3442 .flags = macho.S_ATTR_DEBUG,
3443 });
3444 self.debug_rnglists_sect_index = try self.addSection("__DWARF", "__debug_rnglists", .{
3445 .flags = macho.S_ATTR_DEBUG,
3446 });
3447 try dwarf.initMetadata();
3448 };
3449}
3450
3451pub fn growSection(self: *MachO, sect_index: u8, needed_size: u64) !void {
3452 if (self.base.isRelocatable()) {
3453 try self.growSectionRelocatable(sect_index, needed_size);
3454 } else {
3455 try self.growSectionNonRelocatable(sect_index, needed_size);
3456 }
3457}
3458
3459fn growSectionNonRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !void {
3460 const diags = &self.base.comp.link_diags;
3461 const sect = &self.sections.items(.header)[sect_index];
3462
3463 const seg_id = self.sections.items(.segment_id)[sect_index];
3464 const seg = &self.segments.items[seg_id];
3465
3466 if (!sect.isZerofill()) {
3467 const allocated_size = self.allocatedSize(sect.offset);
3468 if (needed_size > allocated_size) {
3469 const existing_size = sect.size;
3470 sect.size = 0;
3471
3472 // Must move the entire section.
3473 const alignment = self.getPageSize();
3474 const new_offset = try self.findFreeSpace(needed_size, alignment);
3475
3476 log.debug("moving '{s},{s}' from 0x{x} to 0x{x}", .{
3477 sect.segName(),
3478 sect.sectName(),
3479 sect.offset,
3480 new_offset,
3481 });
3482
3483 try self.copyRangeAllZeroOut(sect.offset, new_offset, existing_size);
3484
3485 sect.offset = @intCast(new_offset);
3486 } else if (sect.offset + allocated_size == std.math.maxInt(u64)) {
3487 try self.base.file.?.setEndPos(sect.offset + needed_size);
3488 }
3489 seg.filesize = needed_size;
3490 }
3491 sect.size = needed_size;
3492 seg.fileoff = sect.offset;
3493
3494 const mem_capacity = self.allocatedSizeVirtual(seg.vmaddr);
3495 if (needed_size > mem_capacity) {
3496 var err = try diags.addErrorWithNotes(2);
3497 try err.addMsg("fatal linker error: cannot expand segment seg({d})({s}) in virtual memory", .{
3498 seg_id,
3499 seg.segName(),
3500 });
3501 err.addNote("TODO: emit relocations to memory locations in self-hosted backends", .{});
3502 err.addNote("as a workaround, try increasing pre-allocated virtual memory of each segment", .{});
3503 }
3504
3505 seg.vmsize = needed_size;
3506}
3507
3508fn growSectionRelocatable(self: *MachO, sect_index: u8, needed_size: u64) !void {
3509 const sect = &self.sections.items(.header)[sect_index];
3510
3511 if (!sect.isZerofill()) {
3512 const allocated_size = self.allocatedSize(sect.offset);
3513 if (needed_size > allocated_size) {
3514 const existing_size = sect.size;
3515 sect.size = 0;
3516
3517 // Must move the entire section.
3518 const alignment = try math.powi(u32, 2, sect.@"align");
3519 const new_offset = try self.findFreeSpace(needed_size, alignment);
3520 const new_addr = self.findFreeSpaceVirtual(needed_size, alignment);
3521
3522 log.debug("new '{s},{s}' file offset 0x{x} to 0x{x} (0x{x} - 0x{x})", .{
3523 sect.segName(),
3524 sect.sectName(),
3525 new_offset,
3526 new_offset + existing_size,
3527 new_addr,
3528 new_addr + existing_size,
3529 });
3530
3531 try self.copyRangeAll(sect.offset, new_offset, existing_size);
3532
3533 sect.offset = @intCast(new_offset);
3534 sect.addr = new_addr;
3535 } else if (sect.offset + allocated_size == std.math.maxInt(u64)) {
3536 try self.base.file.?.setEndPos(sect.offset + needed_size);
3537 }
3538 }
3539 sect.size = needed_size;
3540}
3541
3542pub fn markDirty(self: *MachO, sect_index: u8) void {
3543 if (self.getZigObject()) |zo| {
3544 if (self.debug_info_sect_index.? == sect_index) {
3545 zo.debug_info_header_dirty = true;
3546 } else if (self.debug_line_sect_index.? == sect_index) {
3547 zo.debug_line_header_dirty = true;
3548 } else if (self.debug_abbrev_sect_index.? == sect_index) {
3549 zo.debug_abbrev_dirty = true;
3550 } else if (self.debug_str_sect_index.? == sect_index) {
3551 zo.debug_strtab_dirty = true;
3552 } else if (self.debug_aranges_sect_index.? == sect_index) {
3553 zo.debug_aranges_dirty = true;
3554 }
3555 }
3556}
3557
3558pub fn getTarget(self: *const MachO) *const std.Target {
3559 return &self.base.comp.root_mod.resolved_target.result;
3560}
3561
3562/// XNU starting with Big Sur running on arm64 is caching inodes of running binaries.
3563/// Any change to the binary will effectively invalidate the kernel's cache
3564/// resulting in a SIGKILL on each subsequent run. Since when doing incremental
3565/// linking we're modifying a binary in-place, this will end up with the kernel
3566/// killing it on every subsequent run. To circumvent it, we will copy the file
3567/// into a new inode, remove the original file, and rename the copy to match
3568/// the original file. This is super messy, but there doesn't seem any other
3569/// way to please the XNU.
3570pub fn invalidateKernelCache(dir: fs.Dir, sub_path: []const u8) !void {
3571 const tracy = trace(@src());
3572 defer tracy.end();
3573 if (builtin.target.os.tag.isDarwin() and builtin.target.cpu.arch == .aarch64) {
3574 try dir.copyFile(sub_path, dir, sub_path, .{});
3575 }
3576}
3577
3578inline fn conformUuid(out: *[Md5.digest_length]u8) void {
3579 // LC_UUID uuids should conform to RFC 4122 UUID version 4 & UUID version 5 formats
3580 out[6] = (out[6] & 0x0F) | (3 << 4);
3581 out[8] = (out[8] & 0x3F) | 0x80;
3582}
3583
3584pub inline fn getPageSize(self: MachO) u16 {
3585 return switch (self.getTarget().cpu.arch) {
3586 .aarch64 => 0x4000,
3587 .x86_64 => 0x1000,
3588 else => unreachable,
3589 };
3590}
3591
3592pub fn requiresCodeSig(self: MachO) bool {
3593 if (self.entitlements) |_| return true;
3594 // TODO: enable once we support this linker option
3595 // if (self.options.adhoc_codesign) |cs| return cs;
3596 const target = self.getTarget();
3597 return switch (target.cpu.arch) {
3598 .aarch64 => switch (target.os.tag) {
3599 .driverkit, .maccatalyst, .macos => true,
3600 .ios, .tvos, .visionos, .watchos => target.abi == .simulator,
3601 else => false,
3602 },
3603 .x86_64 => false,
3604 else => unreachable,
3605 };
3606}
3607
3608inline fn requiresThunks(self: MachO) bool {
3609 return self.getTarget().cpu.arch == .aarch64;
3610}
3611
3612pub fn isZigSegment(self: MachO, seg_id: u8) bool {
3613 inline for (&[_]?u8{
3614 self.zig_text_seg_index,
3615 self.zig_const_seg_index,
3616 self.zig_data_seg_index,
3617 self.zig_bss_seg_index,
3618 }) |maybe_index| {
3619 if (maybe_index) |index| {
3620 if (index == seg_id) return true;
3621 }
3622 }
3623 return false;
3624}
3625
3626pub fn isZigSection(self: MachO, sect_id: u8) bool {
3627 inline for (&[_]?u8{
3628 self.zig_text_sect_index,
3629 self.zig_const_sect_index,
3630 self.zig_data_sect_index,
3631 self.zig_bss_sect_index,
3632 }) |maybe_index| {
3633 if (maybe_index) |index| {
3634 if (index == sect_id) return true;
3635 }
3636 }
3637 return false;
3638}
3639
3640pub fn isDebugSection(self: MachO, sect_id: u8) bool {
3641 inline for (&[_]?u8{
3642 self.debug_info_sect_index,
3643 self.debug_abbrev_sect_index,
3644 self.debug_str_sect_index,
3645 self.debug_aranges_sect_index,
3646 self.debug_line_sect_index,
3647 }) |maybe_index| {
3648 if (maybe_index) |index| {
3649 if (index == sect_id) return true;
3650 }
3651 }
3652 return false;
3653}
3654
3655pub fn addSegment(self: *MachO, name: []const u8, opts: struct {
3656 vmaddr: u64 = 0,
3657 vmsize: u64 = 0,
3658 fileoff: u64 = 0,
3659 filesize: u64 = 0,
3660 prot: macho.vm_prot_t = macho.PROT.NONE,
3661}) error{OutOfMemory}!u8 {
3662 const gpa = self.base.comp.gpa;
3663 const index = @as(u8, @intCast(self.segments.items.len));
3664 try self.segments.append(gpa, .{
3665 .segname = makeStaticString(name),
3666 .vmaddr = opts.vmaddr,
3667 .vmsize = opts.vmsize,
3668 .fileoff = opts.fileoff,
3669 .filesize = opts.filesize,
3670 .maxprot = opts.prot,
3671 .initprot = opts.prot,
3672 .nsects = 0,
3673 .cmdsize = @sizeOf(macho.segment_command_64),
3674 });
3675 return index;
3676}
3677
3678const AddSectionOpts = struct {
3679 alignment: u32 = 0,
3680 flags: u32 = macho.S_REGULAR,
3681 reserved1: u32 = 0,
3682 reserved2: u32 = 0,
3683};
3684
3685pub fn addSection(
3686 self: *MachO,
3687 segname: []const u8,
3688 sectname: []const u8,
3689 opts: AddSectionOpts,
3690) !u8 {
3691 const gpa = self.base.comp.gpa;
3692 const index = @as(u8, @intCast(try self.sections.addOne(gpa)));
3693 self.sections.set(index, .{
3694 .segment_id = 0, // Segments will be created automatically later down the pipeline.
3695 .header = .{
3696 .sectname = makeStaticString(sectname),
3697 .segname = makeStaticString(segname),
3698 .@"align" = opts.alignment,
3699 .flags = opts.flags,
3700 .reserved1 = opts.reserved1,
3701 .reserved2 = opts.reserved2,
3702 },
3703 });
3704 return index;
3705}
3706
3707pub fn makeStaticString(bytes: []const u8) [16]u8 {
3708 var buf = [_]u8{0} ** 16;
3709 @memcpy(buf[0..bytes.len], bytes);
3710 return buf;
3711}
3712
3713pub fn getSegmentByName(self: MachO, segname: []const u8) ?u8 {
3714 for (self.segments.items, 0..) |seg, i| {
3715 if (mem.eql(u8, segname, seg.segName())) return @as(u8, @intCast(i));
3716 } else return null;
3717}
3718
3719pub fn getSectionByName(self: MachO, segname: []const u8, sectname: []const u8) ?u8 {
3720 for (self.sections.items(.header), 0..) |header, i| {
3721 if (mem.eql(u8, header.segName(), segname) and mem.eql(u8, header.sectName(), sectname))
3722 return @as(u8, @intCast(i));
3723 } else return null;
3724}
3725
3726pub fn getTlsAddress(self: MachO) u64 {
3727 for (self.sections.items(.header)) |header| switch (header.type()) {
3728 macho.S_THREAD_LOCAL_REGULAR,
3729 macho.S_THREAD_LOCAL_ZEROFILL,
3730 => return header.addr,
3731 else => {},
3732 };
3733 return 0;
3734}
3735
3736pub inline fn getTextSegment(self: *MachO) *macho.segment_command_64 {
3737 return &self.segments.items[self.text_seg_index.?];
3738}
3739
3740pub inline fn getLinkeditSegment(self: *MachO) *macho.segment_command_64 {
3741 return &self.segments.items[self.linkedit_seg_index.?];
3742}
3743
3744pub fn getFile(self: *MachO, index: File.Index) ?File {
3745 const tag = self.files.items(.tags)[index];
3746 return switch (tag) {
3747 .null => null,
3748 .zig_object => .{ .zig_object = &self.files.items(.data)[index].zig_object },
3749 .internal => .{ .internal = &self.files.items(.data)[index].internal },
3750 .object => .{ .object = &self.files.items(.data)[index].object },
3751 .dylib => .{ .dylib = &self.files.items(.data)[index].dylib },
3752 };
3753}
3754
3755pub fn getZigObject(self: *MachO) ?*ZigObject {
3756 const index = self.zig_object orelse return null;
3757 return self.getFile(index).?.zig_object;
3758}
3759
3760pub fn getInternalObject(self: *MachO) ?*InternalObject {
3761 const index = self.internal_object orelse return null;
3762 return self.getFile(index).?.internal;
3763}
3764
3765pub fn addFileHandle(self: *MachO, file: fs.File) !File.HandleIndex {
3766 const gpa = self.base.comp.gpa;
3767 const index: File.HandleIndex = @intCast(self.file_handles.items.len);
3768 const fh = try self.file_handles.addOne(gpa);
3769 fh.* = file;
3770 return index;
3771}
3772
3773pub fn getFileHandle(self: MachO, index: File.HandleIndex) File.Handle {
3774 assert(index < self.file_handles.items.len);
3775 return self.file_handles.items[index];
3776}
3777
3778pub fn addThunk(self: *MachO) !Thunk.Index {
3779 const index = @as(Thunk.Index, @intCast(self.thunks.items.len));
3780 const thunk = try self.thunks.addOne(self.base.comp.gpa);
3781 thunk.* = .{};
3782 return index;
3783}
3784
3785pub fn getThunk(self: *MachO, index: Thunk.Index) *Thunk {
3786 assert(index < self.thunks.items.len);
3787 return &self.thunks.items[index];
3788}
3789
3790pub fn eatPrefix(path: []const u8, prefix: []const u8) ?[]const u8 {
3791 if (mem.startsWith(u8, path, prefix)) return path[prefix.len..];
3792 return null;
3793}
3794
3795pub fn reportParseError2(
3796 self: *MachO,
3797 file_index: File.Index,
3798 comptime format: []const u8,
3799 args: anytype,
3800) error{OutOfMemory}!void {
3801 const diags = &self.base.comp.link_diags;
3802 var err = try diags.addErrorWithNotes(1);
3803 try err.addMsg(format, args);
3804 err.addNote("while parsing {f}", .{self.getFile(file_index).?.fmtPath()});
3805}
3806
3807fn reportMissingDependencyError(
3808 self: *MachO,
3809 parent: File.Index,
3810 path: []const u8,
3811 checked_paths: []const []const u8,
3812 comptime format: []const u8,
3813 args: anytype,
3814) error{OutOfMemory}!void {
3815 const diags = &self.base.comp.link_diags;
3816 var err = try diags.addErrorWithNotes(2 + checked_paths.len);
3817 try err.addMsg(format, args);
3818 err.addNote("while resolving {s}", .{path});
3819 err.addNote("a dependency of {f}", .{self.getFile(parent).?.fmtPath()});
3820 for (checked_paths) |p| {
3821 err.addNote("tried {s}", .{p});
3822 }
3823}
3824
3825fn reportDependencyError(
3826 self: *MachO,
3827 parent: File.Index,
3828 path: []const u8,
3829 comptime format: []const u8,
3830 args: anytype,
3831) error{OutOfMemory}!void {
3832 const diags = &self.base.comp.link_diags;
3833 var err = try diags.addErrorWithNotes(2);
3834 try err.addMsg(format, args);
3835 err.addNote("while parsing {s}", .{path});
3836 err.addNote("a dependency of {f}", .{self.getFile(parent).?.fmtPath()});
3837}
3838
3839fn reportDuplicates(self: *MachO) error{ HasDuplicates, OutOfMemory }!void {
3840 const tracy = trace(@src());
3841 defer tracy.end();
3842
3843 if (self.dupes.keys().len == 0) return; // Nothing to do
3844
3845 const gpa = self.base.comp.gpa;
3846 const diags = &self.base.comp.link_diags;
3847 const max_notes = 3;
3848
3849 // We will sort by name, and then by file to ensure deterministic output.
3850 var keys = try std.array_list.Managed(SymbolResolver.Index).initCapacity(gpa, self.dupes.keys().len);
3851 defer keys.deinit();
3852 keys.appendSliceAssumeCapacity(self.dupes.keys());
3853 self.sortGlobalSymbolsByName(keys.items);
3854
3855 for (self.dupes.values()) |*refs| {
3856 mem.sort(File.Index, refs.items, {}, std.sort.asc(File.Index));
3857 }
3858
3859 for (keys.items) |key| {
3860 const sym = self.resolver.keys.items[key - 1];
3861 const notes = self.dupes.get(key).?;
3862 const nnotes = @min(notes.items.len, max_notes) + @intFromBool(notes.items.len > max_notes);
3863
3864 var err = try diags.addErrorWithNotes(nnotes + 1);
3865 try err.addMsg("duplicate symbol definition: {s}", .{sym.getName(self)});
3866 err.addNote("defined by {f}", .{sym.getFile(self).?.fmtPath()});
3867
3868 var inote: usize = 0;
3869 while (inote < @min(notes.items.len, max_notes)) : (inote += 1) {
3870 const file = self.getFile(notes.items[inote]).?;
3871 err.addNote("defined by {f}", .{file.fmtPath()});
3872 }
3873
3874 if (notes.items.len > max_notes) {
3875 const remaining = notes.items.len - max_notes;
3876 err.addNote("defined {d} more times", .{remaining});
3877 }
3878 }
3879 return error.HasDuplicates;
3880}
3881
3882pub fn getDebugSymbols(self: *MachO) ?*DebugSymbols {
3883 if (self.d_sym) |*ds| return ds;
3884 return null;
3885}
3886
3887pub fn ptraceAttach(self: *MachO, pid: std.posix.pid_t) !void {
3888 if (!is_hot_update_compatible) return;
3889
3890 const mach_task = try machTaskForPid(pid);
3891 log.debug("Mach task for pid {d}: {any}", .{ pid, mach_task });
3892 self.hot_state.mach_task = mach_task;
3893
3894 // TODO start exception handler in another thread
3895
3896 // TODO enable ones we register for exceptions
3897 // try std.os.ptrace(std.os.darwin.PT.ATTACHEXC, pid, 0, 0);
3898}
3899
3900pub fn ptraceDetach(self: *MachO, pid: std.posix.pid_t) !void {
3901 if (!is_hot_update_compatible) return;
3902
3903 _ = pid;
3904
3905 // TODO stop exception handler
3906
3907 // TODO see comment in ptraceAttach
3908 // try std.os.ptrace(std.os.darwin.PT.DETACH, pid, 0, 0);
3909
3910 self.hot_state.mach_task = null;
3911}
3912
3913pub fn dumpState(self: *MachO) std.fmt.Alt(*MachO, fmtDumpState) {
3914 return .{ .data = self };
3915}
3916
3917fn fmtDumpState(self: *MachO, w: *Writer) Writer.Error!void {
3918 if (self.getZigObject()) |zo| {
3919 try w.print("zig_object({d}) : {s}\n", .{ zo.index, zo.basename });
3920 try w.print("{f}{f}\n", .{
3921 zo.fmtAtoms(self),
3922 zo.fmtSymtab(self),
3923 });
3924 }
3925 for (self.objects.items) |index| {
3926 const object = self.getFile(index).?.object;
3927 try w.print("object({d}) : {f} : has_debug({})", .{
3928 index,
3929 object.fmtPath(),
3930 object.hasDebugInfo(),
3931 });
3932 if (!object.alive) try w.writeAll(" : ([*])");
3933 try w.writeByte('\n');
3934 try w.print("{f}{f}{f}{f}{f}\n", .{
3935 object.fmtAtoms(self),
3936 object.fmtCies(self),
3937 object.fmtFdes(self),
3938 object.fmtUnwindRecords(self),
3939 object.fmtSymtab(self),
3940 });
3941 }
3942 for (self.dylibs.items) |index| {
3943 const dylib = self.getFile(index).?.dylib;
3944 try w.print("dylib({d}) : {f} : needed({}) : weak({})", .{
3945 index,
3946 @as(Path, dylib.path),
3947 dylib.needed,
3948 dylib.weak,
3949 });
3950 if (!dylib.isAlive(self)) try w.writeAll(" : ([*])");
3951 try w.writeByte('\n');
3952 try w.print("{f}\n", .{dylib.fmtSymtab(self)});
3953 }
3954 if (self.getInternalObject()) |internal| {
3955 try w.print("internal({d}) : internal\n", .{internal.index});
3956 try w.print("{f}{f}\n", .{ internal.fmtAtoms(self), internal.fmtSymtab(self) });
3957 }
3958 try w.writeAll("thunks\n");
3959 for (self.thunks.items, 0..) |thunk, index| {
3960 try w.print("thunk({d}) : {f}\n", .{ index, thunk.fmt(self) });
3961 }
3962 try w.print("stubs\n{f}\n", .{self.stubs.fmt(self)});
3963 try w.print("objc_stubs\n{f}\n", .{self.objc_stubs.fmt(self)});
3964 try w.print("got\n{f}\n", .{self.got.fmt(self)});
3965 try w.print("tlv_ptr\n{f}\n", .{self.tlv_ptr.fmt(self)});
3966 try w.writeByte('\n');
3967 try w.print("sections\n{f}\n", .{self.fmtSections()});
3968 try w.print("segments\n{f}\n", .{self.fmtSegments()});
3969}
3970
3971fn fmtSections(self: *MachO) std.fmt.Alt(*MachO, formatSections) {
3972 return .{ .data = self };
3973}
3974
3975fn formatSections(self: *MachO, w: *Writer) Writer.Error!void {
3976 const slice = self.sections.slice();
3977 for (slice.items(.header), slice.items(.segment_id), 0..) |header, seg_id, i| {
3978 try w.print(
3979 "sect({d}) : seg({d}) : {s},{s} : @{x} ({x}) : align({x}) : size({x}) : relocs({x};{d})\n",
3980 .{
3981 i, seg_id, header.segName(), header.sectName(), header.addr, header.offset,
3982 header.@"align", header.size, header.reloff, header.nreloc,
3983 },
3984 );
3985 }
3986}
3987
3988fn fmtSegments(self: *MachO) std.fmt.Alt(*MachO, formatSegments) {
3989 return .{ .data = self };
3990}
3991
3992fn formatSegments(self: *MachO, w: *Writer) Writer.Error!void {
3993 for (self.segments.items, 0..) |seg, i| {
3994 try w.print("seg({d}) : {s} : @{x}-{x} ({x}-{x})\n", .{
3995 i, seg.segName(), seg.vmaddr, seg.vmaddr + seg.vmsize,
3996 seg.fileoff, seg.fileoff + seg.filesize,
3997 });
3998 }
3999}
4000
4001pub fn fmtSectType(tt: u8) std.fmt.Alt(u8, formatSectType) {
4002 return .{ .data = tt };
4003}
4004
4005fn formatSectType(tt: u8, w: *Writer) Writer.Error!void {
4006 const name = switch (tt) {
4007 macho.S_REGULAR => "REGULAR",
4008 macho.S_ZEROFILL => "ZEROFILL",
4009 macho.S_CSTRING_LITERALS => "CSTRING_LITERALS",
4010 macho.S_4BYTE_LITERALS => "4BYTE_LITERALS",
4011 macho.S_8BYTE_LITERALS => "8BYTE_LITERALS",
4012 macho.S_16BYTE_LITERALS => "16BYTE_LITERALS",
4013 macho.S_LITERAL_POINTERS => "LITERAL_POINTERS",
4014 macho.S_NON_LAZY_SYMBOL_POINTERS => "NON_LAZY_SYMBOL_POINTERS",
4015 macho.S_LAZY_SYMBOL_POINTERS => "LAZY_SYMBOL_POINTERS",
4016 macho.S_SYMBOL_STUBS => "SYMBOL_STUBS",
4017 macho.S_MOD_INIT_FUNC_POINTERS => "MOD_INIT_FUNC_POINTERS",
4018 macho.S_MOD_TERM_FUNC_POINTERS => "MOD_TERM_FUNC_POINTERS",
4019 macho.S_COALESCED => "COALESCED",
4020 macho.S_GB_ZEROFILL => "GB_ZEROFILL",
4021 macho.S_INTERPOSING => "INTERPOSING",
4022 macho.S_DTRACE_DOF => "DTRACE_DOF",
4023 macho.S_THREAD_LOCAL_REGULAR => "THREAD_LOCAL_REGULAR",
4024 macho.S_THREAD_LOCAL_ZEROFILL => "THREAD_LOCAL_ZEROFILL",
4025 macho.S_THREAD_LOCAL_VARIABLES => "THREAD_LOCAL_VARIABLES",
4026 macho.S_THREAD_LOCAL_VARIABLE_POINTERS => "THREAD_LOCAL_VARIABLE_POINTERS",
4027 macho.S_THREAD_LOCAL_INIT_FUNCTION_POINTERS => "THREAD_LOCAL_INIT_FUNCTION_POINTERS",
4028 macho.S_INIT_FUNC_OFFSETS => "INIT_FUNC_OFFSETS",
4029 else => |x| return w.print("UNKNOWN({x})", .{x}),
4030 };
4031 try w.print("{s}", .{name});
4032}
4033
4034const is_hot_update_compatible = switch (builtin.target.os.tag) {
4035 .maccatalyst, .macos => true,
4036 else => false,
4037};
4038
4039const default_entry_symbol_name = "_main";
4040
4041const Section = struct {
4042 header: macho.section_64,
4043 segment_id: u8,
4044 atoms: std.ArrayList(Ref) = .empty,
4045 free_list: std.ArrayList(Atom.Index) = .empty,
4046 last_atom_index: Atom.Index = 0,
4047 thunks: std.ArrayList(Thunk.Index) = .empty,
4048 out: std.ArrayList(u8) = .empty,
4049 relocs: std.ArrayList(macho.relocation_info) = .empty,
4050};
4051
4052pub const LiteralPool = struct {
4053 table: std.AutoArrayHashMapUnmanaged(void, void) = .empty,
4054 keys: std.ArrayList(Key) = .empty,
4055 values: std.ArrayList(MachO.Ref) = .empty,
4056 data: std.ArrayList(u8) = .empty,
4057
4058 pub fn deinit(lp: *LiteralPool, allocator: Allocator) void {
4059 lp.table.deinit(allocator);
4060 lp.keys.deinit(allocator);
4061 lp.values.deinit(allocator);
4062 lp.data.deinit(allocator);
4063 }
4064
4065 const InsertResult = struct {
4066 found_existing: bool,
4067 index: Index,
4068 ref: *MachO.Ref,
4069 };
4070
4071 pub fn getSymbolRef(lp: LiteralPool, index: Index) MachO.Ref {
4072 assert(index < lp.values.items.len);
4073 return lp.values.items[index];
4074 }
4075
4076 pub fn getSymbol(lp: LiteralPool, index: Index, macho_file: *MachO) *Symbol {
4077 return lp.getSymbolRef(index).getSymbol(macho_file).?;
4078 }
4079
4080 pub fn insert(lp: *LiteralPool, allocator: Allocator, @"type": u8, string: []const u8) !InsertResult {
4081 const size: u32 = @intCast(string.len);
4082 try lp.data.ensureUnusedCapacity(allocator, size);
4083 const off: u32 = @intCast(lp.data.items.len);
4084 lp.data.appendSliceAssumeCapacity(string);
4085 const adapter = Adapter{ .lp = lp };
4086 const key = Key{ .off = off, .size = size, .seed = @"type" };
4087 const gop = try lp.table.getOrPutAdapted(allocator, key, adapter);
4088 if (!gop.found_existing) {
4089 try lp.keys.append(allocator, key);
4090 _ = try lp.values.addOne(allocator);
4091 }
4092 return .{
4093 .found_existing = gop.found_existing,
4094 .index = @intCast(gop.index),
4095 .ref = &lp.values.items[gop.index],
4096 };
4097 }
4098
4099 const Key = struct {
4100 off: u32,
4101 size: u32,
4102 seed: u8,
4103
4104 fn getData(key: Key, lp: *const LiteralPool) []const u8 {
4105 return lp.data.items[key.off..][0..key.size];
4106 }
4107
4108 fn eql(key: Key, other: Key, lp: *const LiteralPool) bool {
4109 const key_data = key.getData(lp);
4110 const other_data = other.getData(lp);
4111 return mem.eql(u8, key_data, other_data);
4112 }
4113
4114 fn hash(key: Key, lp: *const LiteralPool) u32 {
4115 const data = key.getData(lp);
4116 return @truncate(Hash.hash(key.seed, data));
4117 }
4118 };
4119
4120 const Adapter = struct {
4121 lp: *const LiteralPool,
4122
4123 pub fn eql(ctx: @This(), key: Key, b_void: void, b_map_index: usize) bool {
4124 _ = b_void;
4125 const other = ctx.lp.keys.items[b_map_index];
4126 return key.eql(other, ctx.lp);
4127 }
4128
4129 pub fn hash(ctx: @This(), key: Key) u32 {
4130 return key.hash(ctx.lp);
4131 }
4132 };
4133
4134 pub const Index = u32;
4135};
4136
4137const HotUpdateState = struct {
4138 mach_task: ?MachTask = null,
4139};
4140
4141pub const SymtabCtx = struct {
4142 ilocal: u32 = 0,
4143 istab: u32 = 0,
4144 iexport: u32 = 0,
4145 iimport: u32 = 0,
4146 nlocals: u32 = 0,
4147 nstabs: u32 = 0,
4148 nexports: u32 = 0,
4149 nimports: u32 = 0,
4150 stroff: u32 = 0,
4151 strsize: u32 = 0,
4152};
4153
4154pub const null_sym = macho.nlist_64{
4155 .n_strx = 0,
4156 .n_type = @bitCast(@as(u8, 0)),
4157 .n_sect = 0,
4158 .n_desc = @bitCast(@as(u16, 0)),
4159 .n_value = 0,
4160};
4161
4162pub const Platform = struct {
4163 os_tag: std.Target.Os.Tag,
4164 abi: std.Target.Abi,
4165 version: std.SemanticVersion,
4166
4167 /// Using Apple's ld64 as our blueprint, `min_version` as well as `sdk_version` are set to
4168 /// the extracted minimum platform version.
4169 pub fn fromLoadCommand(lc: macho.LoadCommandIterator.LoadCommand) Platform {
4170 switch (lc.hdr.cmd) {
4171 .BUILD_VERSION => {
4172 const cmd = lc.cast(macho.build_version_command).?;
4173 return .{
4174 .os_tag = switch (cmd.platform) {
4175 .DRIVERKIT => .driverkit,
4176 .IOS, .IOSSIMULATOR => .ios,
4177 .MACCATALYST => .maccatalyst,
4178 .MACOS => .macos,
4179 .TVOS, .TVOSSIMULATOR => .tvos,
4180 .VISIONOS, .VISIONOSSIMULATOR => .visionos,
4181 .WATCHOS, .WATCHOSSIMULATOR => .watchos,
4182 else => @panic("TODO"),
4183 },
4184 .abi = switch (cmd.platform) {
4185 .IOSSIMULATOR,
4186 .TVOSSIMULATOR,
4187 .VISIONOSSIMULATOR,
4188 .WATCHOSSIMULATOR,
4189 => .simulator,
4190 else => .none,
4191 },
4192 .version = appleVersionToSemanticVersion(cmd.minos),
4193 };
4194 },
4195 .VERSION_MIN_IPHONEOS,
4196 .VERSION_MIN_MACOSX,
4197 .VERSION_MIN_TVOS,
4198 .VERSION_MIN_WATCHOS,
4199 => {
4200 // We can't distinguish Mac Catalyst here, but this is legacy stuff anyway.
4201 const cmd = lc.cast(macho.version_min_command).?;
4202 return .{
4203 .os_tag = switch (lc.hdr.cmd) {
4204 .VERSION_MIN_IPHONEOS => .ios,
4205 .VERSION_MIN_MACOSX => .macos,
4206 .VERSION_MIN_TVOS => .tvos,
4207 .VERSION_MIN_WATCHOS => .watchos,
4208 else => unreachable,
4209 },
4210 .abi = .none,
4211 .version = appleVersionToSemanticVersion(cmd.version),
4212 };
4213 },
4214 else => unreachable,
4215 }
4216 }
4217
4218 pub fn fromTarget(target: *const std.Target) Platform {
4219 return .{
4220 .os_tag = target.os.tag,
4221 .abi = target.abi,
4222 .version = target.os.version_range.semver.min,
4223 };
4224 }
4225
4226 pub fn toAppleVersion(plat: Platform) u32 {
4227 return semanticVersionToAppleVersion(plat.version);
4228 }
4229
4230 pub fn toApplePlatform(plat: Platform) macho.PLATFORM {
4231 return switch (plat.os_tag) {
4232 .driverkit => .DRIVERKIT,
4233 .ios => if (plat.abi == .simulator) .IOSSIMULATOR else .IOS,
4234 .maccatalyst => .MACCATALYST,
4235 .macos => .MACOS,
4236 .tvos => if (plat.abi == .simulator) .TVOSSIMULATOR else .TVOS,
4237 .visionos => if (plat.abi == .simulator) .VISIONOSSIMULATOR else .VISIONOS,
4238 .watchos => if (plat.abi == .simulator) .WATCHOSSIMULATOR else .WATCHOS,
4239 else => unreachable,
4240 };
4241 }
4242
4243 pub fn isBuildVersionCompatible(plat: Platform) bool {
4244 inline for (supported_platforms) |sup_plat| {
4245 if (sup_plat[0] == plat.os_tag and sup_plat[1] == plat.abi) {
4246 return sup_plat[2] <= plat.toAppleVersion();
4247 }
4248 }
4249 return false;
4250 }
4251
4252 pub fn isVersionMinCompatible(plat: Platform) bool {
4253 inline for (supported_platforms) |sup_plat| {
4254 if (sup_plat[0] == plat.os_tag and sup_plat[1] == plat.abi) {
4255 return sup_plat[3] <= plat.toAppleVersion();
4256 }
4257 }
4258 return false;
4259 }
4260
4261 pub fn fmtTarget(plat: Platform, cpu_arch: std.Target.Cpu.Arch) std.fmt.Alt(Format, Format.target) {
4262 return .{ .data = .{ .platform = plat, .cpu_arch = cpu_arch } };
4263 }
4264
4265 const Format = struct {
4266 platform: Platform,
4267 cpu_arch: std.Target.Cpu.Arch,
4268
4269 pub fn target(f: Format, w: *Writer) Writer.Error!void {
4270 try w.print("{s}-{s}", .{ @tagName(f.cpu_arch), @tagName(f.platform.os_tag) });
4271 if (f.platform.abi != .none) {
4272 try w.print("-{s}", .{@tagName(f.platform.abi)});
4273 }
4274 }
4275 };
4276
4277 /// Caller owns the memory.
4278 pub fn allocPrintTarget(plat: Platform, gpa: Allocator, cpu_arch: std.Target.Cpu.Arch) error{OutOfMemory}![]u8 {
4279 var buffer = std.array_list.Managed(u8).init(gpa);
4280 defer buffer.deinit();
4281 try buffer.writer().print("{f}", .{plat.fmtTarget(cpu_arch)});
4282 return buffer.toOwnedSlice();
4283 }
4284
4285 pub fn eqlTarget(plat: Platform, other: Platform) bool {
4286 return plat.os_tag == other.os_tag and plat.abi == other.abi;
4287 }
4288};
4289
4290const SupportedPlatforms = struct {
4291 std.Target.Os.Tag,
4292 std.Target.Abi,
4293 u32, // Min platform version for which to emit LC_BUILD_VERSION
4294 u32, // Min supported platform version
4295};
4296
4297// Source: https://github.com/apple-oss-distributions/ld64/blob/59a99ab60399c5e6c49e6945a9e1049c42b71135/src/ld/PlatformSupport.cpp#L52
4298// zig fmt: off
4299const supported_platforms = [_]SupportedPlatforms{
4300 .{ .driverkit, .none, 0x130000, 0x130000 },
4301 .{ .ios, .none, 0x0C0000, 0x070000 },
4302 .{ .ios, .simulator, 0x0D0000, 0x080000 },
4303 .{ .maccatalyst, .none, 0x0D0000, 0x0D0000 },
4304 .{ .macos, .none, 0x0A0E00, 0x0A0800 },
4305 .{ .tvos, .none, 0x0C0000, 0x070000 },
4306 .{ .tvos, .simulator, 0x0D0000, 0x080000 },
4307 .{ .visionos, .none, 0x010000, 0x010000 },
4308 .{ .visionos, .simulator, 0x010000, 0x010000 },
4309 .{ .watchos, .none, 0x050000, 0x020000 },
4310 .{ .watchos, .simulator, 0x060000, 0x020000 },
4311};
4312// zig fmt: on
4313
4314pub inline fn semanticVersionToAppleVersion(version: std.SemanticVersion) u32 {
4315 const major = version.major;
4316 const minor = version.minor;
4317 const patch = version.patch;
4318 return (@as(u32, @intCast(major)) << 16) | (@as(u32, @intCast(minor)) << 8) | @as(u32, @intCast(patch));
4319}
4320
4321pub inline fn appleVersionToSemanticVersion(version: u32) std.SemanticVersion {
4322 return .{
4323 .major = @as(u16, @truncate(version >> 16)),
4324 .minor = @as(u8, @truncate(version >> 8)),
4325 .patch = @as(u8, @truncate(version)),
4326 };
4327}
4328
4329fn inferSdkVersion(comp: *Compilation, sdk_layout: SdkLayout) ?std.SemanticVersion {
4330 const gpa = comp.gpa;
4331
4332 var arena_allocator = std.heap.ArenaAllocator.init(gpa);
4333 defer arena_allocator.deinit();
4334 const arena = arena_allocator.allocator();
4335
4336 const sdk_dir = switch (sdk_layout) {
4337 .sdk => comp.sysroot.?,
4338 .vendored => fs.path.join(arena, &.{ comp.dirs.zig_lib.path.?, "libc", "darwin" }) catch return null,
4339 };
4340 if (readSdkVersionFromSettings(arena, sdk_dir)) |ver| {
4341 return parseSdkVersion(ver);
4342 } else |_| {
4343 // Read from settings should always succeed when vendored.
4344 // TODO: convert to fatal linker error
4345 if (sdk_layout == .vendored) @panic("zig installation bug: unable to parse SDK version");
4346 }
4347
4348 // infer from pathname
4349 const stem = fs.path.stem(sdk_dir);
4350 const start = for (stem, 0..) |c, i| {
4351 if (std.ascii.isDigit(c)) break i;
4352 } else stem.len;
4353 const end = for (stem[start..], start..) |c, i| {
4354 if (std.ascii.isDigit(c) or c == '.') continue;
4355 break i;
4356 } else stem.len;
4357 return parseSdkVersion(stem[start..end]);
4358}
4359
4360// Official Apple SDKs ship with a `SDKSettings.json` located at the top of SDK fs layout.
4361// Use property `MinimalDisplayName` to determine version.
4362// The file/property is also available with vendored libc.
4363fn readSdkVersionFromSettings(arena: Allocator, dir: []const u8) ![]const u8 {
4364 const sdk_path = try fs.path.join(arena, &.{ dir, "SDKSettings.json" });
4365 const contents = try fs.cwd().readFileAlloc(sdk_path, arena, .limited(std.math.maxInt(u16)));
4366 const parsed = try std.json.parseFromSlice(std.json.Value, arena, contents, .{});
4367 if (parsed.value.object.get("MinimalDisplayName")) |ver| return ver.string;
4368 return error.SdkVersionFailure;
4369}
4370
4371// Versions reported by Apple aren't exactly semantically valid as they usually omit
4372// the patch component, so we parse SDK value by hand.
4373fn parseSdkVersion(raw: []const u8) ?std.SemanticVersion {
4374 var parsed: std.SemanticVersion = .{
4375 .major = 0,
4376 .minor = 0,
4377 .patch = 0,
4378 };
4379
4380 const parseNext = struct {
4381 fn parseNext(it: anytype) ?u16 {
4382 const nn = it.next() orelse return null;
4383 return std.fmt.parseInt(u16, nn, 10) catch null;
4384 }
4385 }.parseNext;
4386
4387 var it = std.mem.splitAny(u8, raw, ".");
4388 parsed.major = parseNext(&it) orelse return null;
4389 parsed.minor = parseNext(&it) orelse return null;
4390 parsed.patch = parseNext(&it) orelse 0;
4391 return parsed;
4392}
4393
4394/// When allocating, the ideal_capacity is calculated by
4395/// actual_capacity + (actual_capacity / ideal_factor)
4396const ideal_factor = 3;
4397
4398/// In order for a slice of bytes to be considered eligible to keep metadata pointing at
4399/// it as a possible place to put new symbols, it must have enough room for this many bytes
4400/// (plus extra for reserved capacity).
4401const minimum_text_block_size = 64;
4402pub const min_text_capacity = padToIdeal(minimum_text_block_size);
4403
4404/// Default virtual memory offset corresponds to the size of __PAGEZERO segment and
4405/// start of __TEXT segment.
4406pub const default_pagezero_size: u64 = 0x100000000;
4407
4408/// We commit 0x1000 = 4096 bytes of space to the header and
4409/// the table of load commands. This should be plenty for any
4410/// potential future extensions.
4411pub const default_headerpad_size: u32 = 0x1000;
4412
4413const SystemLib = struct {
4414 path: Path,
4415 needed: bool = false,
4416 weak: bool = false,
4417 hidden: bool = false,
4418 reexport: bool = false,
4419 must_link: bool = false,
4420
4421 fn fromLinkInput(link_input: link.Input) SystemLib {
4422 return switch (link_input) {
4423 .dso_exact => unreachable,
4424 .res => unreachable,
4425 .object, .archive => |obj| .{
4426 .path = obj.path,
4427 .must_link = obj.must_link,
4428 .hidden = obj.hidden,
4429 },
4430 .dso => |dso| .{
4431 .path = dso.path,
4432 .needed = dso.needed,
4433 .weak = dso.weak,
4434 .reexport = dso.reexport,
4435 },
4436 };
4437 }
4438};
4439
4440pub const SdkLayout = std.zig.LibCDirs.DarwinSdkLayout;
4441
4442const UndefinedTreatment = enum {
4443 @"error",
4444 warn,
4445 suppress,
4446 dynamic_lookup,
4447};
4448
4449/// A reference to atom or symbol in an input file.
4450/// If file == 0, symbol is an undefined global.
4451pub const Ref = struct {
4452 index: u32,
4453 file: File.Index,
4454
4455 pub fn eql(ref: Ref, other: Ref) bool {
4456 return ref.index == other.index and ref.file == other.file;
4457 }
4458
4459 pub fn lessThan(ref: Ref, other: Ref) bool {
4460 if (ref.file == other.file) {
4461 return ref.index < other.index;
4462 }
4463 return ref.file < other.file;
4464 }
4465
4466 pub fn getFile(ref: Ref, macho_file: *MachO) ?File {
4467 return macho_file.getFile(ref.file);
4468 }
4469
4470 pub fn getAtom(ref: Ref, macho_file: *MachO) ?*Atom {
4471 const file = ref.getFile(macho_file) orelse return null;
4472 return file.getAtom(ref.index);
4473 }
4474
4475 pub fn getSymbol(ref: Ref, macho_file: *MachO) ?*Symbol {
4476 const file = ref.getFile(macho_file) orelse return null;
4477 return switch (file) {
4478 inline else => |x| &x.symbols.items[ref.index],
4479 };
4480 }
4481
4482 pub fn format(ref: Ref, bw: *Writer) Writer.Error!void {
4483 try bw.print("%{d} in file({d})", .{ ref.index, ref.file });
4484 }
4485};
4486
4487pub const SymbolResolver = struct {
4488 keys: std.ArrayList(Key) = .empty,
4489 values: std.ArrayList(Ref) = .empty,
4490 table: std.AutoArrayHashMapUnmanaged(void, void) = .empty,
4491
4492 const Result = struct {
4493 found_existing: bool,
4494 index: Index,
4495 ref: *Ref,
4496 };
4497
4498 pub fn deinit(resolver: *SymbolResolver, allocator: Allocator) void {
4499 resolver.keys.deinit(allocator);
4500 resolver.values.deinit(allocator);
4501 resolver.table.deinit(allocator);
4502 }
4503
4504 pub fn getOrPut(
4505 resolver: *SymbolResolver,
4506 allocator: Allocator,
4507 ref: Ref,
4508 macho_file: *MachO,
4509 ) !Result {
4510 const adapter = Adapter{ .keys = resolver.keys.items, .macho_file = macho_file };
4511 const key = Key{ .index = ref.index, .file = ref.file };
4512 const gop = try resolver.table.getOrPutAdapted(allocator, key, adapter);
4513 if (!gop.found_existing) {
4514 try resolver.keys.append(allocator, key);
4515 _ = try resolver.values.addOne(allocator);
4516 }
4517 return .{
4518 .found_existing = gop.found_existing,
4519 .index = @intCast(gop.index + 1),
4520 .ref = &resolver.values.items[gop.index],
4521 };
4522 }
4523
4524 pub fn get(resolver: SymbolResolver, index: Index) ?Ref {
4525 if (index == 0) return null;
4526 return resolver.values.items[index - 1];
4527 }
4528
4529 pub fn reset(resolver: *SymbolResolver) void {
4530 resolver.keys.clearRetainingCapacity();
4531 resolver.values.clearRetainingCapacity();
4532 resolver.table.clearRetainingCapacity();
4533 }
4534
4535 const Key = struct {
4536 index: Symbol.Index,
4537 file: File.Index,
4538
4539 fn getName(key: Key, macho_file: *MachO) [:0]const u8 {
4540 const ref = Ref{ .index = key.index, .file = key.file };
4541 return ref.getSymbol(macho_file).?.getName(macho_file);
4542 }
4543
4544 pub fn getFile(key: Key, macho_file: *MachO) ?File {
4545 const ref = Ref{ .index = key.index, .file = key.file };
4546 return ref.getFile(macho_file);
4547 }
4548
4549 fn eql(key: Key, other: Key, macho_file: *MachO) bool {
4550 const key_name = key.getName(macho_file);
4551 const other_name = other.getName(macho_file);
4552 return mem.eql(u8, key_name, other_name);
4553 }
4554
4555 fn hash(key: Key, macho_file: *MachO) u32 {
4556 const name = key.getName(macho_file);
4557 return @truncate(Hash.hash(0, name));
4558 }
4559 };
4560
4561 const Adapter = struct {
4562 keys: []const Key,
4563 macho_file: *MachO,
4564
4565 pub fn eql(ctx: @This(), key: Key, b_void: void, b_map_index: usize) bool {
4566 _ = b_void;
4567 const other = ctx.keys[b_map_index];
4568 return key.eql(other, ctx.macho_file);
4569 }
4570
4571 pub fn hash(ctx: @This(), key: Key) u32 {
4572 return key.hash(ctx.macho_file);
4573 }
4574 };
4575
4576 pub const Index = u32;
4577};
4578
4579pub const String = struct {
4580 pos: u32 = 0,
4581 len: u32 = 0,
4582};
4583
4584pub const UndefRefs = union(enum) {
4585 force_undefined,
4586 entry,
4587 dyld_stub_binder,
4588 objc_msgsend,
4589 refs: std.ArrayList(Ref),
4590
4591 pub fn deinit(self: *UndefRefs, allocator: Allocator) void {
4592 switch (self.*) {
4593 .refs => |*refs| refs.deinit(allocator),
4594 else => {},
4595 }
4596 }
4597};
4598
4599pub const MachError = error{
4600 /// Not enough permissions held to perform the requested kernel
4601 /// call.
4602 PermissionDenied,
4603} || std.posix.UnexpectedError;
4604
4605pub const MachTask = extern struct {
4606 port: std.c.mach_port_name_t,
4607
4608 pub fn isValid(self: MachTask) bool {
4609 return self.port != std.c.TASK_NULL;
4610 }
4611
4612 pub fn pidForTask(self: MachTask) MachError!std.c.pid_t {
4613 var pid: std.c.pid_t = undefined;
4614 switch (getKernError(std.c.pid_for_task(self.port, &pid))) {
4615 .SUCCESS => return pid,
4616 .FAILURE => return error.PermissionDenied,
4617 else => |err| return unexpectedKernError(err),
4618 }
4619 }
4620
4621 pub fn allocatePort(self: MachTask, right: std.c.MACH_PORT_RIGHT) MachError!MachTask {
4622 var out_port: std.c.mach_port_name_t = undefined;
4623 switch (getKernError(std.c.mach_port_allocate(
4624 self.port,
4625 @intFromEnum(right),
4626 &out_port,
4627 ))) {
4628 .SUCCESS => return .{ .port = out_port },
4629 .FAILURE => return error.PermissionDenied,
4630 else => |err| return unexpectedKernError(err),
4631 }
4632 }
4633
4634 pub fn deallocatePort(self: MachTask, port: MachTask) void {
4635 _ = getKernError(std.c.mach_port_deallocate(self.port, port.port));
4636 }
4637
4638 pub fn insertRight(self: MachTask, port: MachTask, msg: std.c.MACH_MSG_TYPE) !void {
4639 switch (getKernError(std.c.mach_port_insert_right(
4640 self.port,
4641 port.port,
4642 port.port,
4643 @intFromEnum(msg),
4644 ))) {
4645 .SUCCESS => return,
4646 .FAILURE => return error.PermissionDenied,
4647 else => |err| return unexpectedKernError(err),
4648 }
4649 }
4650
4651 pub const PortInfo = struct {
4652 mask: std.c.exception_mask_t,
4653 masks: [std.c.EXC.TYPES_COUNT]std.c.exception_mask_t,
4654 ports: [std.c.EXC.TYPES_COUNT]std.c.mach_port_t,
4655 behaviors: [std.c.EXC.TYPES_COUNT]std.c.exception_behavior_t,
4656 flavors: [std.c.EXC.TYPES_COUNT]std.c.thread_state_flavor_t,
4657 count: std.c.mach_msg_type_number_t,
4658 };
4659
4660 pub fn getExceptionPorts(self: MachTask, mask: std.c.exception_mask_t) !PortInfo {
4661 var info: PortInfo = .{
4662 .mask = mask,
4663 .masks = undefined,
4664 .ports = undefined,
4665 .behaviors = undefined,
4666 .flavors = undefined,
4667 .count = 0,
4668 };
4669 info.count = info.ports.len / @sizeOf(std.c.mach_port_t);
4670
4671 switch (getKernError(std.c.task_get_exception_ports(
4672 self.port,
4673 info.mask,
4674 &info.masks,
4675 &info.count,
4676 &info.ports,
4677 &info.behaviors,
4678 &info.flavors,
4679 ))) {
4680 .SUCCESS => return info,
4681 .FAILURE => return error.PermissionDenied,
4682 else => |err| return unexpectedKernError(err),
4683 }
4684 }
4685
4686 pub fn setExceptionPorts(
4687 self: MachTask,
4688 mask: std.c.exception_mask_t,
4689 new_port: MachTask,
4690 behavior: std.c.exception_behavior_t,
4691 new_flavor: std.c.thread_state_flavor_t,
4692 ) !void {
4693 switch (getKernError(std.c.task_set_exception_ports(
4694 self.port,
4695 mask,
4696 new_port.port,
4697 behavior,
4698 new_flavor,
4699 ))) {
4700 .SUCCESS => return,
4701 .FAILURE => return error.PermissionDenied,
4702 else => |err| return unexpectedKernError(err),
4703 }
4704 }
4705
4706 pub const RegionInfo = struct {
4707 pub const Tag = enum {
4708 basic,
4709 extended,
4710 top,
4711 };
4712
4713 base_addr: u64,
4714 tag: Tag,
4715 info: union {
4716 basic: std.c.vm_region_basic_info_64,
4717 extended: std.c.vm_region_extended_info,
4718 top: std.c.vm_region_top_info,
4719 },
4720 };
4721
4722 pub fn getRegionInfo(
4723 task: MachTask,
4724 address: u64,
4725 len: usize,
4726 tag: RegionInfo.Tag,
4727 ) MachError!RegionInfo {
4728 var info: RegionInfo = .{
4729 .base_addr = address,
4730 .tag = tag,
4731 .info = undefined,
4732 };
4733 switch (tag) {
4734 .basic => info.info = .{ .basic = undefined },
4735 .extended => info.info = .{ .extended = undefined },
4736 .top => info.info = .{ .top = undefined },
4737 }
4738 var base_len: std.c.mach_vm_size_t = if (len == 1) 2 else len;
4739 var objname: std.c.mach_port_t = undefined;
4740 var count: std.c.mach_msg_type_number_t = switch (tag) {
4741 .basic => std.c.VM.REGION.BASIC_INFO_COUNT,
4742 .extended => std.c.VM.REGION.EXTENDED_INFO_COUNT,
4743 .top => std.c.VM.REGION.TOP_INFO_COUNT,
4744 };
4745 switch (getKernError(std.c.mach_vm_region(
4746 task.port,
4747 &info.base_addr,
4748 &base_len,
4749 switch (tag) {
4750 .basic => std.c.VM.REGION.BASIC_INFO_64,
4751 .extended => std.c.VM.REGION.EXTENDED_INFO,
4752 .top => std.c.VM.REGION.TOP_INFO,
4753 },
4754 switch (tag) {
4755 .basic => @as(std.c.vm_region_info_t, @ptrCast(&info.info.basic)),
4756 .extended => @as(std.c.vm_region_info_t, @ptrCast(&info.info.extended)),
4757 .top => @as(std.c.vm_region_info_t, @ptrCast(&info.info.top)),
4758 },
4759 &count,
4760 &objname,
4761 ))) {
4762 .SUCCESS => return info,
4763 .FAILURE => return error.PermissionDenied,
4764 else => |err| return unexpectedKernError(err),
4765 }
4766 }
4767
4768 pub const RegionSubmapInfo = struct {
4769 pub const Tag = enum {
4770 short,
4771 full,
4772 };
4773
4774 tag: Tag,
4775 base_addr: u64,
4776 info: union {
4777 short: std.c.vm_region_submap_short_info_64,
4778 full: std.c.vm_region_submap_info_64,
4779 },
4780 };
4781
4782 pub fn getRegionSubmapInfo(
4783 task: MachTask,
4784 address: u64,
4785 len: usize,
4786 nesting_depth: u32,
4787 tag: RegionSubmapInfo.Tag,
4788 ) MachError!RegionSubmapInfo {
4789 var info: RegionSubmapInfo = .{
4790 .base_addr = address,
4791 .tag = tag,
4792 .info = undefined,
4793 };
4794 switch (tag) {
4795 .short => info.info = .{ .short = undefined },
4796 .full => info.info = .{ .full = undefined },
4797 }
4798 var nesting = nesting_depth;
4799 var base_len: std.c.mach_vm_size_t = if (len == 1) 2 else len;
4800 var count: std.c.mach_msg_type_number_t = switch (tag) {
4801 .short => std.c.VM.REGION.SUBMAP_SHORT_INFO_COUNT_64,
4802 .full => std.c.VM.REGION.SUBMAP_INFO_COUNT_64,
4803 };
4804 switch (getKernError(std.c.mach_vm_region_recurse(
4805 task.port,
4806 &info.base_addr,
4807 &base_len,
4808 &nesting,
4809 switch (tag) {
4810 .short => @as(std.c.vm_region_recurse_info_t, @ptrCast(&info.info.short)),
4811 .full => @as(std.c.vm_region_recurse_info_t, @ptrCast(&info.info.full)),
4812 },
4813 &count,
4814 ))) {
4815 .SUCCESS => return info,
4816 .FAILURE => return error.PermissionDenied,
4817 else => |err| return unexpectedKernError(err),
4818 }
4819 }
4820
4821 pub fn getCurrProtection(task: MachTask, address: u64, len: usize) MachError!std.c.vm_prot_t {
4822 const info = try task.getRegionSubmapInfo(address, len, 0, .short);
4823 return info.info.short.protection;
4824 }
4825
4826 pub fn setMaxProtection(task: MachTask, address: u64, len: usize, prot: std.c.vm_prot_t) MachError!void {
4827 return task.setProtectionImpl(address, len, true, prot);
4828 }
4829
4830 pub fn setCurrProtection(task: MachTask, address: u64, len: usize, prot: std.c.vm_prot_t) MachError!void {
4831 return task.setProtectionImpl(address, len, false, prot);
4832 }
4833
4834 fn setProtectionImpl(task: MachTask, address: u64, len: usize, set_max: bool, prot: std.c.vm_prot_t) MachError!void {
4835 switch (getKernError(std.c.mach_vm_protect(task.port, address, len, @intFromBool(set_max), prot))) {
4836 .SUCCESS => return,
4837 .FAILURE => return error.PermissionDenied,
4838 else => |err| return unexpectedKernError(err),
4839 }
4840 }
4841
4842 /// Will write to VM even if current protection attributes specifically prohibit
4843 /// us from doing so, by temporarily setting protection level to a level with VM_PROT_COPY
4844 /// variant, and resetting after a successful or unsuccessful write.
4845 pub fn writeMemProtected(task: MachTask, address: u64, buf: []const u8, arch: std.Target.Cpu.Arch) MachError!usize {
4846 const curr_prot = try task.getCurrProtection(address, buf.len);
4847 try task.setCurrProtection(
4848 address,
4849 buf.len,
4850 std.c.PROT.READ | std.c.PROT.WRITE | std.c.PROT.COPY,
4851 );
4852 defer {
4853 task.setCurrProtection(address, buf.len, curr_prot) catch {};
4854 }
4855 return task.writeMem(address, buf, arch);
4856 }
4857
4858 pub fn writeMem(task: MachTask, address: u64, buf: []const u8, arch: std.Target.Cpu.Arch) MachError!usize {
4859 const count = buf.len;
4860 var total_written: usize = 0;
4861 var curr_addr = address;
4862 const page_size = try MachTask.getPageSize(task); // TODO we probably can assume value here
4863 var out_buf = buf[0..];
4864
4865 while (total_written < count) {
4866 const curr_size = maxBytesLeftInPage(page_size, curr_addr, count - total_written);
4867 switch (getKernError(std.c.mach_vm_write(
4868 task.port,
4869 curr_addr,
4870 @intFromPtr(out_buf.ptr),
4871 @as(std.c.mach_msg_type_number_t, @intCast(curr_size)),
4872 ))) {
4873 .SUCCESS => {},
4874 .FAILURE => return error.PermissionDenied,
4875 else => |err| return unexpectedKernError(err),
4876 }
4877
4878 switch (arch) {
4879 .aarch64 => {
4880 var mattr_value: std.c.vm_machine_attribute_val_t = std.c.MATTR.VAL_CACHE_FLUSH;
4881 switch (getKernError(std.c.vm_machine_attribute(
4882 task.port,
4883 curr_addr,
4884 curr_size,
4885 std.c.MATTR.CACHE,
4886 &mattr_value,
4887 ))) {
4888 .SUCCESS => {},
4889 .FAILURE => return error.PermissionDenied,
4890 else => |err| return unexpectedKernError(err),
4891 }
4892 },
4893 .x86_64 => {},
4894 else => unreachable,
4895 }
4896
4897 out_buf = out_buf[curr_size..];
4898 total_written += curr_size;
4899 curr_addr += curr_size;
4900 }
4901
4902 return total_written;
4903 }
4904
4905 pub fn readMem(task: MachTask, address: u64, buf: []u8) MachError!usize {
4906 const count = buf.len;
4907 var total_read: usize = 0;
4908 var curr_addr = address;
4909 const page_size = try MachTask.getPageSize(task); // TODO we probably can assume value here
4910 var out_buf = buf[0..];
4911
4912 while (total_read < count) {
4913 const curr_size = maxBytesLeftInPage(page_size, curr_addr, count - total_read);
4914 var curr_bytes_read: std.c.mach_msg_type_number_t = 0;
4915 var vm_memory: std.c.vm_offset_t = undefined;
4916 switch (getKernError(std.c.mach_vm_read(task.port, curr_addr, curr_size, &vm_memory, &curr_bytes_read))) {
4917 .SUCCESS => {},
4918 .FAILURE => return error.PermissionDenied,
4919 else => |err| return unexpectedKernError(err),
4920 }
4921
4922 @memcpy(out_buf[0..curr_bytes_read], @as([*]const u8, @ptrFromInt(vm_memory)));
4923 _ = std.c.vm_deallocate(std.c.mach_task_self(), vm_memory, curr_bytes_read);
4924
4925 out_buf = out_buf[curr_bytes_read..];
4926 curr_addr += curr_bytes_read;
4927 total_read += curr_bytes_read;
4928 }
4929
4930 return total_read;
4931 }
4932
4933 fn maxBytesLeftInPage(page_size: usize, address: u64, count: usize) usize {
4934 var left = count;
4935 if (page_size > 0) {
4936 const page_offset = address % page_size;
4937 const bytes_left_in_page = page_size - page_offset;
4938 if (count > bytes_left_in_page) {
4939 left = bytes_left_in_page;
4940 }
4941 }
4942 return left;
4943 }
4944
4945 fn getPageSize(task: MachTask) MachError!usize {
4946 if (task.isValid()) {
4947 var info_count = std.c.TASK_VM_INFO_COUNT;
4948 var vm_info: std.c.task_vm_info_data_t = undefined;
4949 switch (getKernError(std.c.task_info(
4950 task.port,
4951 std.c.TASK_VM_INFO,
4952 @as(std.c.task_info_t, @ptrCast(&vm_info)),
4953 &info_count,
4954 ))) {
4955 .SUCCESS => return @as(usize, @intCast(vm_info.page_size)),
4956 else => {},
4957 }
4958 }
4959 var page_size: std.c.vm_size_t = undefined;
4960 switch (getKernError(std.c._host_page_size(std.c.mach_host_self(), &page_size))) {
4961 .SUCCESS => return page_size,
4962 else => |err| return unexpectedKernError(err),
4963 }
4964 }
4965
4966 pub fn basicTaskInfo(task: MachTask) MachError!std.c.mach_task_basic_info {
4967 var info: std.c.mach_task_basic_info = undefined;
4968 var count = std.c.MACH_TASK_BASIC_INFO_COUNT;
4969 switch (getKernError(std.c.task_info(
4970 task.port,
4971 std.c.MACH_TASK_BASIC_INFO,
4972 @as(std.c.task_info_t, @ptrCast(&info)),
4973 &count,
4974 ))) {
4975 .SUCCESS => return info,
4976 else => |err| return unexpectedKernError(err),
4977 }
4978 }
4979
4980 pub fn @"resume"(task: MachTask) MachError!void {
4981 switch (getKernError(std.c.task_resume(task.port))) {
4982 .SUCCESS => {},
4983 else => |err| return unexpectedKernError(err),
4984 }
4985 }
4986
4987 pub fn @"suspend"(task: MachTask) MachError!void {
4988 switch (getKernError(std.c.task_suspend(task.port))) {
4989 .SUCCESS => {},
4990 else => |err| return unexpectedKernError(err),
4991 }
4992 }
4993
4994 const ThreadList = struct {
4995 buf: []MachThread,
4996
4997 pub fn deinit(list: ThreadList) void {
4998 const self_task = machTaskForSelf();
4999 _ = std.c.vm_deallocate(
5000 self_task.port,
5001 @intFromPtr(list.buf.ptr),
5002 @as(std.c.vm_size_t, @intCast(list.buf.len * @sizeOf(std.c.mach_port_t))),
5003 );
5004 }
5005 };
5006
5007 pub fn getThreads(task: MachTask) MachError!ThreadList {
5008 var thread_list: std.c.mach_port_array_t = undefined;
5009 var thread_count: std.c.mach_msg_type_number_t = undefined;
5010 switch (getKernError(std.c.task_threads(task.port, &thread_list, &thread_count))) {
5011 .SUCCESS => return ThreadList{ .buf = @as([*]MachThread, @ptrCast(thread_list))[0..thread_count] },
5012 else => |err| return unexpectedKernError(err),
5013 }
5014 }
5015};
5016
5017pub const MachThread = extern struct {
5018 port: std.c.mach_port_t,
5019
5020 pub fn isValid(thread: MachThread) bool {
5021 return thread.port != std.c.THREAD_NULL;
5022 }
5023
5024 pub fn getBasicInfo(thread: MachThread) MachError!std.c.thread_basic_info {
5025 var info: std.c.thread_basic_info = undefined;
5026 var count = std.c.THREAD_BASIC_INFO_COUNT;
5027 switch (getKernError(std.c.thread_info(
5028 thread.port,
5029 std.c.THREAD_BASIC_INFO,
5030 @as(std.c.thread_info_t, @ptrCast(&info)),
5031 &count,
5032 ))) {
5033 .SUCCESS => return info,
5034 else => |err| return unexpectedKernError(err),
5035 }
5036 }
5037
5038 pub fn getIdentifierInfo(thread: MachThread) MachError!std.c.thread_identifier_info {
5039 var info: std.c.thread_identifier_info = undefined;
5040 var count = std.c.THREAD_IDENTIFIER_INFO_COUNT;
5041 switch (getKernError(std.c.thread_info(
5042 thread.port,
5043 std.c.THREAD_IDENTIFIER_INFO,
5044 @as(std.c.thread_info_t, @ptrCast(&info)),
5045 &count,
5046 ))) {
5047 .SUCCESS => return info,
5048 else => |err| return unexpectedKernError(err),
5049 }
5050 }
5051};
5052
5053pub fn machTaskForPid(pid: std.c.pid_t) MachError!MachTask {
5054 var port: std.c.mach_port_name_t = undefined;
5055 switch (getKernError(std.c.task_for_pid(std.c.mach_task_self(), pid, &port))) {
5056 .SUCCESS => {},
5057 .FAILURE => return error.PermissionDenied,
5058 else => |err| return unexpectedKernError(err),
5059 }
5060 return MachTask{ .port = port };
5061}
5062
5063pub fn machTaskForSelf() MachTask {
5064 return .{ .port = std.c.mach_task_self() };
5065}
5066
5067pub fn getKernError(err: std.c.kern_return_t) KernE {
5068 return @as(KernE, @enumFromInt(@as(u32, @truncate(@as(usize, @intCast(err))))));
5069}
5070
5071pub fn unexpectedKernError(err: KernE) std.posix.UnexpectedError {
5072 if (std.posix.unexpected_error_tracing) {
5073 std.debug.print("unexpected error: {d}\n", .{@intFromEnum(err)});
5074 std.debug.dumpCurrentStackTrace(.{});
5075 }
5076 return error.Unexpected;
5077}
5078
5079/// Kernel return values
5080pub const KernE = enum(u32) {
5081 SUCCESS = 0,
5082 /// Specified address is not currently valid
5083 INVALID_ADDRESS = 1,
5084 /// Specified memory is valid, but does not permit the
5085 /// required forms of access.
5086 PROTECTION_FAILURE = 2,
5087 /// The address range specified is already in use, or
5088 /// no address range of the size specified could be
5089 /// found.
5090 NO_SPACE = 3,
5091 /// The function requested was not applicable to this
5092 /// type of argument, or an argument is invalid
5093 INVALID_ARGUMENT = 4,
5094 /// The function could not be performed. A catch-all.
5095 FAILURE = 5,
5096 /// A system resource could not be allocated to fulfill
5097 /// this request. This failure may not be permanent.
5098 RESOURCE_SHORTAGE = 6,
5099 /// The task in question does not hold receive rights
5100 /// for the port argument.
5101 NOT_RECEIVER = 7,
5102 /// Bogus access restriction.
5103 NO_ACCESS = 8,
5104 /// During a page fault, the target address refers to a
5105 /// memory object that has been destroyed. This
5106 /// failure is permanent.
5107 MEMORY_FAILURE = 9,
5108 /// During a page fault, the memory object indicated
5109 /// that the data could not be returned. This failure
5110 /// may be temporary; future attempts to access this
5111 /// same data may succeed, as defined by the memory
5112 /// object.
5113 MEMORY_ERROR = 10,
5114 /// The receive right is already a member of the portset.
5115 ALREADY_IN_SET = 11,
5116 /// The receive right is not a member of a port set.
5117 NOT_IN_SET = 12,
5118 /// The name already denotes a right in the task.
5119 NAME_EXISTS = 13,
5120 /// The operation was aborted. Ipc code will
5121 /// catch this and reflect it as a message error.
5122 ABORTED = 14,
5123 /// The name doesn't denote a right in the task.
5124 INVALID_NAME = 15,
5125 /// Target task isn't an active task.
5126 INVALID_TASK = 16,
5127 /// The name denotes a right, but not an appropriate right.
5128 INVALID_RIGHT = 17,
5129 /// A blatant range error.
5130 INVALID_VALUE = 18,
5131 /// Operation would overflow limit on user-references.
5132 UREFS_OVERFLOW = 19,
5133 /// The supplied (port) capability is improper.
5134 INVALID_CAPABILITY = 20,
5135 /// The task already has send or receive rights
5136 /// for the port under another name.
5137 RIGHT_EXISTS = 21,
5138 /// Target host isn't actually a host.
5139 INVALID_HOST = 22,
5140 /// An attempt was made to supply "precious" data
5141 /// for memory that is already present in a
5142 /// memory object.
5143 MEMORY_PRESENT = 23,
5144 /// A page was requested of a memory manager via
5145 /// memory_object_data_request for an object using
5146 /// a MEMORY_OBJECT_COPY_CALL strategy, with the
5147 /// VM_PROT_WANTS_COPY flag being used to specify
5148 /// that the page desired is for a copy of the
5149 /// object, and the memory manager has detected
5150 /// the page was pushed into a copy of the object
5151 /// while the kernel was walking the shadow chain
5152 /// from the copy to the object. This error code
5153 /// is delivered via memory_object_data_error
5154 /// and is handled by the kernel (it forces the
5155 /// kernel to restart the fault). It will not be
5156 /// seen by users.
5157 MEMORY_DATA_MOVED = 24,
5158 /// A strategic copy was attempted of an object
5159 /// upon which a quicker copy is now possible.
5160 /// The caller should retry the copy using
5161 /// vm_object_copy_quickly. This error code
5162 /// is seen only by the kernel.
5163 MEMORY_RESTART_COPY = 25,
5164 /// An argument applied to assert processor set privilege
5165 /// was not a processor set control port.
5166 INVALID_PROCESSOR_SET = 26,
5167 /// The specified scheduling attributes exceed the thread's
5168 /// limits.
5169 POLICY_LIMIT = 27,
5170 /// The specified scheduling policy is not currently
5171 /// enabled for the processor set.
5172 INVALID_POLICY = 28,
5173 /// The external memory manager failed to initialize the
5174 /// memory object.
5175 INVALID_OBJECT = 29,
5176 /// A thread is attempting to wait for an event for which
5177 /// there is already a waiting thread.
5178 ALREADY_WAITING = 30,
5179 /// An attempt was made to destroy the default processor
5180 /// set.
5181 DEFAULT_SET = 31,
5182 /// An attempt was made to fetch an exception port that is
5183 /// protected, or to abort a thread while processing a
5184 /// protected exception.
5185 EXCEPTION_PROTECTED = 32,
5186 /// A ledger was required but not supplied.
5187 INVALID_LEDGER = 33,
5188 /// The port was not a memory cache control port.
5189 INVALID_MEMORY_CONTROL = 34,
5190 /// An argument supplied to assert security privilege
5191 /// was not a host security port.
5192 INVALID_SECURITY = 35,
5193 /// thread_depress_abort was called on a thread which
5194 /// was not currently depressed.
5195 NOT_DEPRESSED = 36,
5196 /// Object has been terminated and is no longer available
5197 TERMINATED = 37,
5198 /// Lock set has been destroyed and is no longer available.
5199 LOCK_SET_DESTROYED = 38,
5200 /// The thread holding the lock terminated before releasing
5201 /// the lock
5202 LOCK_UNSTABLE = 39,
5203 /// The lock is already owned by another thread
5204 LOCK_OWNED = 40,
5205 /// The lock is already owned by the calling thread
5206 LOCK_OWNED_SELF = 41,
5207 /// Semaphore has been destroyed and is no longer available.
5208 SEMAPHORE_DESTROYED = 42,
5209 /// Return from RPC indicating the target server was
5210 /// terminated before it successfully replied
5211 RPC_SERVER_TERMINATED = 43,
5212 /// Terminate an orphaned activation.
5213 RPC_TERMINATE_ORPHAN = 44,
5214 /// Allow an orphaned activation to continue executing.
5215 RPC_CONTINUE_ORPHAN = 45,
5216 /// Empty thread activation (No thread linked to it)
5217 NOT_SUPPORTED = 46,
5218 /// Remote node down or inaccessible.
5219 NODE_DOWN = 47,
5220 /// A signalled thread was not actually waiting.
5221 NOT_WAITING = 48,
5222 /// Some thread-oriented operation (semaphore_wait) timed out
5223 OPERATION_TIMED_OUT = 49,
5224 /// During a page fault, indicates that the page was rejected
5225 /// as a result of a signature check.
5226 CODESIGN_ERROR = 50,
5227 /// The requested property cannot be changed at this time.
5228 POLICY_STATIC = 51,
5229 /// The provided buffer is of insufficient size for the requested data.
5230 INSUFFICIENT_BUFFER_SIZE = 52,
5231 /// Denied by security policy
5232 DENIED = 53,
5233 /// The KC on which the function is operating is missing
5234 MISSING_KC = 54,
5235 /// The KC on which the function is operating is invalid
5236 INVALID_KC = 55,
5237 /// A search or query operation did not return a result
5238 NOT_FOUND = 56,
5239 _,
5240};
5241
5242fn createThunks(macho_file: *MachO, sect_id: u8) !void {
5243 const tracy = trace(@src());
5244 defer tracy.end();
5245
5246 const gpa = macho_file.base.comp.gpa;
5247 const slice = macho_file.sections.slice();
5248 const header = &slice.items(.header)[sect_id];
5249 const thnks = &slice.items(.thunks)[sect_id];
5250 const atoms = slice.items(.atoms)[sect_id].items;
5251 assert(atoms.len > 0);
5252
5253 for (atoms) |ref| {
5254 ref.getAtom(macho_file).?.value = @bitCast(@as(i64, -1));
5255 }
5256
5257 var i: usize = 0;
5258 while (i < atoms.len) {
5259 const start = i;
5260 const start_atom = atoms[start].getAtom(macho_file).?;
5261 assert(start_atom.isAlive());
5262 start_atom.value = advanceSection(header, start_atom.size, start_atom.alignment);
5263 i += 1;
5264
5265 while (i < atoms.len and
5266 header.size - start_atom.value < max_allowed_distance) : (i += 1)
5267 {
5268 const atom = atoms[i].getAtom(macho_file).?;
5269 assert(atom.isAlive());
5270 atom.value = advanceSection(header, atom.size, atom.alignment);
5271 }
5272
5273 // Insert a thunk at the group end
5274 const thunk_index = try macho_file.addThunk();
5275 const thunk = macho_file.getThunk(thunk_index);
5276 thunk.out_n_sect = sect_id;
5277 try thnks.append(gpa, thunk_index);
5278
5279 // Scan relocs in the group and create trampolines for any unreachable callsite
5280 try scanThunkRelocs(thunk_index, gpa, atoms[start..i], macho_file);
5281 thunk.value = advanceSection(header, thunk.size(), .@"4");
5282
5283 log.debug("thunk({d}) : {f}", .{ thunk_index, thunk.fmt(macho_file) });
5284 }
5285}
5286
5287fn advanceSection(sect: *macho.section_64, adv_size: u64, alignment: Atom.Alignment) u64 {
5288 const offset = alignment.forward(sect.size);
5289 const padding = offset - sect.size;
5290 sect.size += padding + adv_size;
5291 sect.@"align" = @max(sect.@"align", alignment.toLog2Units());
5292 return offset;
5293}
5294
5295fn scanThunkRelocs(thunk_index: Thunk.Index, gpa: Allocator, atoms: []const MachO.Ref, macho_file: *MachO) !void {
5296 const tracy = trace(@src());
5297 defer tracy.end();
5298
5299 const thunk = macho_file.getThunk(thunk_index);
5300
5301 for (atoms) |ref| {
5302 const atom = ref.getAtom(macho_file).?;
5303 log.debug("atom({d}) {s}", .{ atom.atom_index, atom.getName(macho_file) });
5304 for (atom.getRelocs(macho_file)) |rel| {
5305 if (rel.type != .branch) continue;
5306 if (isReachable(atom, rel, macho_file)) continue;
5307 try thunk.symbols.put(gpa, rel.getTargetSymbolRef(atom.*, macho_file), {});
5308 }
5309 atom.addExtra(.{ .thunk = thunk_index }, macho_file);
5310 }
5311}
5312
5313fn isReachable(atom: *const Atom, rel: Relocation, macho_file: *MachO) bool {
5314 const target = rel.getTargetSymbol(atom.*, macho_file);
5315 if (target.getSectionFlags().stubs or target.getSectionFlags().objc_stubs) return false;
5316 if (atom.out_n_sect != target.getOutputSectionIndex(macho_file)) return false;
5317 const target_atom = target.getAtom(macho_file).?;
5318 if (target_atom.value == @as(u64, @bitCast(@as(i64, -1)))) return false;
5319 const saddr = @as(i64, @intCast(atom.getAddress(macho_file))) + @as(i64, @intCast(rel.offset - atom.off));
5320 const taddr: i64 = @intCast(rel.getTargetAddress(atom.*, macho_file));
5321 _ = math.cast(i28, taddr + rel.addend - saddr) orelse return false;
5322 return true;
5323}
5324
5325pub fn pwriteAll(macho_file: *MachO, bytes: []const u8, offset: u64) error{LinkFailure}!void {
5326 const comp = macho_file.base.comp;
5327 const diags = &comp.link_diags;
5328 macho_file.base.file.?.pwriteAll(bytes, offset) catch |err| {
5329 return diags.fail("failed to write: {s}", .{@errorName(err)});
5330 };
5331}
5332
5333pub fn setEndPos(macho_file: *MachO, length: u64) error{LinkFailure}!void {
5334 const comp = macho_file.base.comp;
5335 const diags = &comp.link_diags;
5336 macho_file.base.file.?.setEndPos(length) catch |err| {
5337 return diags.fail("failed to set file end pos: {s}", .{@errorName(err)});
5338 };
5339}
5340
5341pub fn cast(macho_file: *MachO, comptime T: type, x: anytype) error{LinkFailure}!T {
5342 return std.math.cast(T, x) orelse {
5343 const comp = macho_file.base.comp;
5344 const diags = &comp.link_diags;
5345 return diags.fail("encountered {d}, overflowing {d}-bit value", .{ x, @bitSizeOf(T) });
5346 };
5347}
5348
5349pub fn alignPow(macho_file: *MachO, x: u32) error{LinkFailure}!u32 {
5350 const result, const ov = @shlWithOverflow(@as(u32, 1), try cast(macho_file, u5, x));
5351 if (ov != 0) {
5352 const comp = macho_file.base.comp;
5353 const diags = &comp.link_diags;
5354 return diags.fail("alignment overflow", .{});
5355 }
5356 return result;
5357}
5358
5359/// Branch instruction has 26 bits immediate but is 4 byte aligned.
5360const jump_bits = @bitSizeOf(i28);
5361const max_distance = (1 << (jump_bits - 1));
5362
5363/// A branch will need an extender if its target is larger than
5364/// `2^(jump_bits - 1) - margin` where margin is some arbitrary number.
5365/// mold uses 5MiB margin, while ld64 uses 4MiB margin. We will follow mold
5366/// and assume margin to be 5MiB.
5367const max_allowed_distance = max_distance - 0x500_000;
5368
5369const MachO = @This();
5370
5371const std = @import("std");
5372const build_options = @import("build_options");
5373const builtin = @import("builtin");
5374const assert = std.debug.assert;
5375const fs = std.fs;
5376const log = std.log.scoped(.link);
5377const state_log = std.log.scoped(.link_state);
5378const macho = std.macho;
5379const math = std.math;
5380const mem = std.mem;
5381const meta = std.meta;
5382const Writer = std.Io.Writer;
5383
5384const aarch64 = codegen.aarch64.encoding;
5385const bind = @import("MachO/dyld_info/bind.zig");
5386const calcUuid = @import("MachO/uuid.zig").calcUuid;
5387const codegen = @import("../codegen.zig");
5388const dead_strip = @import("MachO/dead_strip.zig");
5389const eh_frame = @import("MachO/eh_frame.zig");
5390const fat = @import("MachO/fat.zig");
5391const link = @import("../link.zig");
5392const load_commands = @import("MachO/load_commands.zig");
5393const relocatable = @import("MachO/relocatable.zig");
5394const tapi = @import("tapi.zig");
5395const target_util = @import("../target.zig");
5396const trace = @import("../tracy.zig").trace;
5397const synthetic = @import("MachO/synthetic.zig");
5398
5399const Alignment = Atom.Alignment;
5400const Allocator = mem.Allocator;
5401const Archive = @import("MachO/Archive.zig");
5402const AtomicBool = std.atomic.Value(bool);
5403const Bind = bind.Bind;
5404const Cache = std.Build.Cache;
5405const CodeSignature = @import("MachO/CodeSignature.zig");
5406const Compilation = @import("../Compilation.zig");
5407const DataInCode = synthetic.DataInCode;
5408const Directory = Cache.Directory;
5409const Dylib = @import("MachO/Dylib.zig");
5410const ExportTrie = @import("MachO/dyld_info/Trie.zig");
5411const Path = Cache.Path;
5412const File = @import("MachO/file.zig").File;
5413const GotSection = synthetic.GotSection;
5414const Hash = std.hash.Wyhash;
5415const Indsymtab = synthetic.Indsymtab;
5416const InternalObject = @import("MachO/InternalObject.zig");
5417const ObjcStubsSection = synthetic.ObjcStubsSection;
5418const Object = @import("MachO/Object.zig");
5419const LazyBind = bind.LazyBind;
5420const LaSymbolPtrSection = synthetic.LaSymbolPtrSection;
5421const Md5 = std.crypto.hash.Md5;
5422const Zcu = @import("../Zcu.zig");
5423const InternPool = @import("../InternPool.zig");
5424const Rebase = @import("MachO/dyld_info/Rebase.zig");
5425const StringTable = @import("StringTable.zig");
5426const StubsSection = synthetic.StubsSection;
5427const StubsHelperSection = synthetic.StubsHelperSection;
5428const Symbol = @import("MachO/Symbol.zig");
5429const Thunk = @import("MachO/Thunk.zig");
5430const TlvPtrSection = synthetic.TlvPtrSection;
5431const Value = @import("../Value.zig");
5432const UnwindInfo = @import("MachO/UnwindInfo.zig");
5433const WeakBind = bind.WeakBind;
5434const ZigObject = @import("MachO/ZigObject.zig");
5435const dev = @import("../dev.zig");