master
1mutex: std.Thread.Mutex,
2/// Accessed through `Module.Adapter`.
3modules: std.ArrayHashMapUnmanaged(Module, void, Module.Context, false),
4
5pub const init: SelfInfo = .{
6 .mutex = .{},
7 .modules = .empty,
8};
9pub fn deinit(si: *SelfInfo, gpa: Allocator) void {
10 for (si.modules.keys()) |*module| {
11 unwind: {
12 const u = &(module.unwind orelse break :unwind catch break :unwind);
13 if (u.dwarf) |*dwarf| dwarf.deinit(gpa);
14 }
15 file: {
16 const f = &(module.file orelse break :file catch break :file);
17 f.deinit(gpa);
18 }
19 }
20 si.modules.deinit(gpa);
21}
22
23pub fn getSymbol(si: *SelfInfo, gpa: Allocator, io: Io, address: usize) Error!std.debug.Symbol {
24 _ = io;
25 const module = try si.findModule(gpa, address);
26 defer si.mutex.unlock();
27
28 const file = try module.getFile(gpa);
29
30 // This is not necessarily the same as the vmaddr_slide that dyld would report. This is
31 // because the segments in the file on disk might differ from the ones in memory. Normally
32 // we wouldn't necessarily expect that to work, but /usr/lib/dyld is incredibly annoying:
33 // it exists on disk (necessarily, because the kernel needs to load it!), but is also in
34 // the dyld cache (dyld actually restart itself from cache after loading it), and the two
35 // versions have (very) different segment base addresses. It's sort of like a large slide
36 // has been applied to all addresses in memory. For an optimal experience, we consider the
37 // on-disk vmaddr instead of the in-memory one.
38 const vaddr_offset = module.text_base - file.text_vmaddr;
39
40 const vaddr = address - vaddr_offset;
41
42 const ofile_dwarf, const ofile_vaddr = file.getDwarfForAddress(gpa, vaddr) catch {
43 // Return at least the symbol name if available.
44 return .{
45 .name = try file.lookupSymbolName(vaddr),
46 .compile_unit_name = null,
47 .source_location = null,
48 };
49 };
50
51 const compile_unit = ofile_dwarf.findCompileUnit(native_endian, ofile_vaddr) catch {
52 // Return at least the symbol name if available.
53 return .{
54 .name = try file.lookupSymbolName(vaddr),
55 .compile_unit_name = null,
56 .source_location = null,
57 };
58 };
59
60 return .{
61 .name = ofile_dwarf.getSymbolName(ofile_vaddr) orelse
62 try file.lookupSymbolName(vaddr),
63 .compile_unit_name = compile_unit.die.getAttrString(
64 ofile_dwarf,
65 native_endian,
66 std.dwarf.AT.name,
67 ofile_dwarf.section(.debug_str),
68 compile_unit,
69 ) catch |err| switch (err) {
70 error.MissingDebugInfo, error.InvalidDebugInfo => null,
71 },
72 .source_location = ofile_dwarf.getLineNumberInfo(
73 gpa,
74 native_endian,
75 compile_unit,
76 ofile_vaddr,
77 ) catch null,
78 };
79}
80pub fn getModuleName(si: *SelfInfo, gpa: Allocator, address: usize) Error![]const u8 {
81 const module = try si.findModule(gpa, address);
82 defer si.mutex.unlock();
83 return module.name;
84}
85pub fn getModuleSlide(si: *SelfInfo, gpa: Allocator, address: usize) Error!usize {
86 const module = try si.findModule(gpa, address);
87 defer si.mutex.unlock();
88 const header: *std.macho.mach_header_64 = @ptrFromInt(module.text_base);
89 const raw_macho: [*]u8 = @ptrCast(header);
90 var it = macho.LoadCommandIterator.init(header, raw_macho[@sizeOf(macho.mach_header_64)..][0..header.sizeofcmds]) catch unreachable;
91 const text_vmaddr = while (it.next() catch unreachable) |load_cmd| {
92 if (load_cmd.hdr.cmd != .SEGMENT_64) continue;
93 const segment_cmd = load_cmd.cast(macho.segment_command_64).?;
94 if (!mem.eql(u8, segment_cmd.segName(), "__TEXT")) continue;
95 break segment_cmd.vmaddr;
96 } else unreachable;
97 return module.text_base - text_vmaddr;
98}
99
100pub const can_unwind: bool = true;
101pub const UnwindContext = std.debug.Dwarf.SelfUnwinder;
102/// Unwind a frame using MachO compact unwind info (from `__unwind_info`).
103/// If the compact encoding can't encode a way to unwind a frame, it will
104/// defer unwinding to DWARF, in which case `__eh_frame` will be used if available.
105pub fn unwindFrame(si: *SelfInfo, gpa: Allocator, context: *UnwindContext) Error!usize {
106 return unwindFrameInner(si, gpa, context) catch |err| switch (err) {
107 error.InvalidDebugInfo,
108 error.MissingDebugInfo,
109 error.UnsupportedDebugInfo,
110 error.ReadFailed,
111 error.OutOfMemory,
112 error.Unexpected,
113 error.Canceled,
114 => |e| return e,
115
116 error.UnsupportedRegister,
117 error.UnsupportedAddrSize,
118 error.UnimplementedUserOpcode,
119 => return error.UnsupportedDebugInfo,
120
121 error.Overflow,
122 error.EndOfStream,
123 error.StreamTooLong,
124 error.InvalidOpcode,
125 error.InvalidOperation,
126 error.InvalidOperand,
127 error.InvalidRegister,
128 error.IncompatibleRegisterSize,
129 => return error.InvalidDebugInfo,
130 };
131}
132fn unwindFrameInner(si: *SelfInfo, gpa: Allocator, context: *UnwindContext) !usize {
133 const module = try si.findModule(gpa, context.pc);
134 defer si.mutex.unlock();
135
136 const unwind: *Module.Unwind = try module.getUnwindInfo(gpa);
137
138 const ip_reg_num = comptime Dwarf.ipRegNum(builtin.target.cpu.arch).?;
139 const fp_reg_num = comptime Dwarf.fpRegNum(builtin.target.cpu.arch);
140 const sp_reg_num = comptime Dwarf.spRegNum(builtin.target.cpu.arch);
141
142 const unwind_info = unwind.unwind_info orelse return error.MissingDebugInfo;
143 if (unwind_info.len < @sizeOf(macho.unwind_info_section_header)) return error.InvalidDebugInfo;
144 const header: *align(1) const macho.unwind_info_section_header = @ptrCast(unwind_info);
145
146 const index_byte_count = header.indexCount * @sizeOf(macho.unwind_info_section_header_index_entry);
147 if (unwind_info.len < header.indexSectionOffset + index_byte_count) return error.InvalidDebugInfo;
148 const indices: []align(1) const macho.unwind_info_section_header_index_entry = @ptrCast(unwind_info[header.indexSectionOffset..][0..index_byte_count]);
149 if (indices.len == 0) return error.MissingDebugInfo;
150
151 // offset of the PC into the `__TEXT` segment
152 const pc_text_offset = context.pc - module.text_base;
153
154 const start_offset: u32, const first_level_offset: u32 = index: {
155 var left: usize = 0;
156 var len: usize = indices.len;
157 while (len > 1) {
158 const mid = left + len / 2;
159 if (pc_text_offset < indices[mid].functionOffset) {
160 len /= 2;
161 } else {
162 left = mid;
163 len -= len / 2;
164 }
165 }
166 break :index .{ indices[left].secondLevelPagesSectionOffset, indices[left].functionOffset };
167 };
168 // An offset of 0 is a sentinel indicating a range does not have unwind info.
169 if (start_offset == 0) return error.MissingDebugInfo;
170
171 const common_encodings_byte_count = header.commonEncodingsArrayCount * @sizeOf(macho.compact_unwind_encoding_t);
172 if (unwind_info.len < header.commonEncodingsArraySectionOffset + common_encodings_byte_count) return error.InvalidDebugInfo;
173 const common_encodings: []align(1) const macho.compact_unwind_encoding_t = @ptrCast(
174 unwind_info[header.commonEncodingsArraySectionOffset..][0..common_encodings_byte_count],
175 );
176
177 if (unwind_info.len < start_offset + @sizeOf(macho.UNWIND_SECOND_LEVEL)) return error.InvalidDebugInfo;
178 const kind: *align(1) const macho.UNWIND_SECOND_LEVEL = @ptrCast(unwind_info[start_offset..]);
179
180 const entry: struct {
181 function_offset: usize,
182 raw_encoding: u32,
183 } = switch (kind.*) {
184 .REGULAR => entry: {
185 if (unwind_info.len < start_offset + @sizeOf(macho.unwind_info_regular_second_level_page_header)) return error.InvalidDebugInfo;
186 const page_header: *align(1) const macho.unwind_info_regular_second_level_page_header = @ptrCast(unwind_info[start_offset..]);
187
188 const entries_byte_count = page_header.entryCount * @sizeOf(macho.unwind_info_regular_second_level_entry);
189 if (unwind_info.len < start_offset + entries_byte_count) return error.InvalidDebugInfo;
190 const entries: []align(1) const macho.unwind_info_regular_second_level_entry = @ptrCast(
191 unwind_info[start_offset + page_header.entryPageOffset ..][0..entries_byte_count],
192 );
193 if (entries.len == 0) return error.InvalidDebugInfo;
194
195 var left: usize = 0;
196 var len: usize = entries.len;
197 while (len > 1) {
198 const mid = left + len / 2;
199 if (pc_text_offset < entries[mid].functionOffset) {
200 len /= 2;
201 } else {
202 left = mid;
203 len -= len / 2;
204 }
205 }
206 break :entry .{
207 .function_offset = entries[left].functionOffset,
208 .raw_encoding = entries[left].encoding,
209 };
210 },
211 .COMPRESSED => entry: {
212 if (unwind_info.len < start_offset + @sizeOf(macho.unwind_info_compressed_second_level_page_header)) return error.InvalidDebugInfo;
213 const page_header: *align(1) const macho.unwind_info_compressed_second_level_page_header = @ptrCast(unwind_info[start_offset..]);
214
215 const entries_byte_count = page_header.entryCount * @sizeOf(macho.UnwindInfoCompressedEntry);
216 if (unwind_info.len < start_offset + entries_byte_count) return error.InvalidDebugInfo;
217 const entries: []align(1) const macho.UnwindInfoCompressedEntry = @ptrCast(
218 unwind_info[start_offset + page_header.entryPageOffset ..][0..entries_byte_count],
219 );
220 if (entries.len == 0) return error.InvalidDebugInfo;
221
222 var left: usize = 0;
223 var len: usize = entries.len;
224 while (len > 1) {
225 const mid = left + len / 2;
226 if (pc_text_offset < first_level_offset + entries[mid].funcOffset) {
227 len /= 2;
228 } else {
229 left = mid;
230 len -= len / 2;
231 }
232 }
233 const entry = entries[left];
234
235 const function_offset = first_level_offset + entry.funcOffset;
236 if (entry.encodingIndex < common_encodings.len) {
237 break :entry .{
238 .function_offset = function_offset,
239 .raw_encoding = common_encodings[entry.encodingIndex],
240 };
241 }
242
243 const local_index = entry.encodingIndex - common_encodings.len;
244 const local_encodings_byte_count = page_header.encodingsCount * @sizeOf(macho.compact_unwind_encoding_t);
245 if (unwind_info.len < start_offset + page_header.encodingsPageOffset + local_encodings_byte_count) return error.InvalidDebugInfo;
246 const local_encodings: []align(1) const macho.compact_unwind_encoding_t = @ptrCast(
247 unwind_info[start_offset + page_header.encodingsPageOffset ..][0..local_encodings_byte_count],
248 );
249 if (local_index >= local_encodings.len) return error.InvalidDebugInfo;
250 break :entry .{
251 .function_offset = function_offset,
252 .raw_encoding = local_encodings[local_index],
253 };
254 },
255 else => return error.InvalidDebugInfo,
256 };
257
258 if (entry.raw_encoding == 0) return error.MissingDebugInfo;
259
260 const encoding: macho.CompactUnwindEncoding = @bitCast(entry.raw_encoding);
261 const new_ip = switch (builtin.cpu.arch) {
262 .x86_64 => switch (encoding.mode.x86_64) {
263 .OLD => return error.UnsupportedDebugInfo,
264 .RBP_FRAME => ip: {
265 const frame = encoding.value.x86_64.frame;
266
267 const fp = (try dwarfRegNative(&context.cpu_state, fp_reg_num)).*;
268 const new_sp = fp + 2 * @sizeOf(usize);
269
270 const ip_ptr = fp + @sizeOf(usize);
271 const new_ip = @as(*const usize, @ptrFromInt(ip_ptr)).*;
272 const new_fp = @as(*const usize, @ptrFromInt(fp)).*;
273
274 (try dwarfRegNative(&context.cpu_state, fp_reg_num)).* = new_fp;
275 (try dwarfRegNative(&context.cpu_state, sp_reg_num)).* = new_sp;
276 (try dwarfRegNative(&context.cpu_state, ip_reg_num)).* = new_ip;
277
278 const regs: [5]u3 = .{
279 frame.reg0,
280 frame.reg1,
281 frame.reg2,
282 frame.reg3,
283 frame.reg4,
284 };
285 for (regs, 0..) |reg, i| {
286 if (reg == 0) continue;
287 const addr = fp - frame.frame_offset * @sizeOf(usize) + i * @sizeOf(usize);
288 const reg_number = try Dwarf.compactUnwindToDwarfRegNumber(reg);
289 (try dwarfRegNative(&context.cpu_state, reg_number)).* = @as(*const usize, @ptrFromInt(addr)).*;
290 }
291
292 break :ip new_ip;
293 },
294 .STACK_IMMD,
295 .STACK_IND,
296 => ip: {
297 const frameless = encoding.value.x86_64.frameless;
298
299 const sp = (try dwarfRegNative(&context.cpu_state, sp_reg_num)).*;
300 const stack_size: usize = stack_size: {
301 if (encoding.mode.x86_64 == .STACK_IMMD) {
302 break :stack_size @as(usize, frameless.stack.direct.stack_size) * @sizeOf(usize);
303 }
304 // In .STACK_IND, the stack size is inferred from the subq instruction at the beginning of the function.
305 const sub_offset_addr =
306 module.text_base +
307 entry.function_offset +
308 frameless.stack.indirect.sub_offset;
309 // `sub_offset_addr` points to the offset of the literal within the instruction
310 const sub_operand = @as(*align(1) const u32, @ptrFromInt(sub_offset_addr)).*;
311 break :stack_size sub_operand + @sizeOf(usize) * @as(usize, frameless.stack.indirect.stack_adjust);
312 };
313
314 // Decode the Lehmer-coded sequence of registers.
315 // For a description of the encoding see lib/libc/include/any-macos.13-any/mach-o/compact_unwind_encoding.h
316
317 // Decode the variable-based permutation number into its digits. Each digit represents
318 // an index into the list of register numbers that weren't yet used in the sequence at
319 // the time the digit was added.
320 const reg_count = frameless.stack_reg_count;
321 const ip_ptr = ip_ptr: {
322 var digits: [6]u3 = undefined;
323 var accumulator: usize = frameless.stack_reg_permutation;
324 var base: usize = 2;
325 for (0..reg_count) |i| {
326 const div = accumulator / base;
327 digits[digits.len - 1 - i] = @intCast(accumulator - base * div);
328 accumulator = div;
329 base += 1;
330 }
331
332 var registers: [6]u3 = undefined;
333 var used_indices: [6]bool = @splat(false);
334 for (digits[digits.len - reg_count ..], 0..) |target_unused_index, i| {
335 var unused_count: u8 = 0;
336 const unused_index = for (used_indices, 0..) |used, index| {
337 if (!used) {
338 if (target_unused_index == unused_count) break index;
339 unused_count += 1;
340 }
341 } else unreachable;
342 registers[i] = @intCast(unused_index + 1);
343 used_indices[unused_index] = true;
344 }
345
346 var reg_addr = sp + stack_size - @sizeOf(usize) * @as(usize, reg_count + 1);
347 for (0..reg_count) |i| {
348 const reg_number = try Dwarf.compactUnwindToDwarfRegNumber(registers[i]);
349 (try dwarfRegNative(&context.cpu_state, reg_number)).* = @as(*const usize, @ptrFromInt(reg_addr)).*;
350 reg_addr += @sizeOf(usize);
351 }
352
353 break :ip_ptr reg_addr;
354 };
355
356 const new_ip = @as(*const usize, @ptrFromInt(ip_ptr)).*;
357 const new_sp = ip_ptr + @sizeOf(usize);
358
359 (try dwarfRegNative(&context.cpu_state, sp_reg_num)).* = new_sp;
360 (try dwarfRegNative(&context.cpu_state, ip_reg_num)).* = new_ip;
361
362 break :ip new_ip;
363 },
364 .DWARF => {
365 const dwarf = &(unwind.dwarf orelse return error.MissingDebugInfo);
366 const rules = try context.computeRules(gpa, dwarf, unwind.vmaddr_slide, encoding.value.x86_64.dwarf);
367 return context.next(gpa, &rules);
368 },
369 },
370 .aarch64 => switch (encoding.mode.arm64) {
371 .OLD => return error.UnsupportedDebugInfo,
372 .FRAMELESS => ip: {
373 const sp = (try dwarfRegNative(&context.cpu_state, sp_reg_num)).*;
374 const new_sp = sp + encoding.value.arm64.frameless.stack_size * 16;
375 const new_ip = (try dwarfRegNative(&context.cpu_state, 30)).*;
376 (try dwarfRegNative(&context.cpu_state, sp_reg_num)).* = new_sp;
377 break :ip new_ip;
378 },
379 .DWARF => {
380 const dwarf = &(unwind.dwarf orelse return error.MissingDebugInfo);
381 const rules = try context.computeRules(gpa, dwarf, unwind.vmaddr_slide, encoding.value.arm64.dwarf);
382 return context.next(gpa, &rules);
383 },
384 .FRAME => ip: {
385 const frame = encoding.value.arm64.frame;
386
387 const fp = (try dwarfRegNative(&context.cpu_state, fp_reg_num)).*;
388 const ip_ptr = fp + @sizeOf(usize);
389
390 var reg_addr = fp - @sizeOf(usize);
391 inline for (@typeInfo(@TypeOf(frame.x_reg_pairs)).@"struct".fields, 0..) |field, i| {
392 if (@field(frame.x_reg_pairs, field.name) != 0) {
393 (try dwarfRegNative(&context.cpu_state, 19 + i)).* = @as(*const usize, @ptrFromInt(reg_addr)).*;
394 reg_addr += @sizeOf(usize);
395 (try dwarfRegNative(&context.cpu_state, 20 + i)).* = @as(*const usize, @ptrFromInt(reg_addr)).*;
396 reg_addr += @sizeOf(usize);
397 }
398 }
399
400 // We intentionally skip restoring `frame.d_reg_pairs`; we know we don't support
401 // vector registers in the AArch64 `cpu_context` anyway, so there's no reason to
402 // fail a legitimate unwind just because we're asked to restore the registers here.
403 // If some weird/broken unwind info tells us to read them later, we will fail then.
404 reg_addr += 16 * @as(usize, @popCount(@as(u4, @bitCast(frame.d_reg_pairs))));
405
406 const new_ip = @as(*const usize, @ptrFromInt(ip_ptr)).*;
407 const new_fp = @as(*const usize, @ptrFromInt(fp)).*;
408
409 (try dwarfRegNative(&context.cpu_state, fp_reg_num)).* = new_fp;
410 (try dwarfRegNative(&context.cpu_state, ip_reg_num)).* = new_ip;
411
412 break :ip new_ip;
413 },
414 },
415 else => comptime unreachable, // unimplemented
416 };
417
418 const ret_addr = std.debug.stripInstructionPtrAuthCode(new_ip);
419
420 // Like `Dwarf.SelfUnwinder.next`, adjust our next lookup pc in case the `call` was this
421 // function's last instruction making `ret_addr` one byte past its end.
422 context.pc = ret_addr -| 1;
423
424 return ret_addr;
425}
426
427/// Acquires the mutex on success.
428fn findModule(si: *SelfInfo, gpa: Allocator, address: usize) Error!*Module {
429 var info: std.c.dl_info = undefined;
430 if (std.c.dladdr(@ptrFromInt(address), &info) == 0) {
431 return error.MissingDebugInfo;
432 }
433 si.mutex.lock();
434 errdefer si.mutex.unlock();
435 const gop = try si.modules.getOrPutAdapted(gpa, @intFromPtr(info.fbase), Module.Adapter{});
436 errdefer comptime unreachable;
437 if (!gop.found_existing) {
438 gop.key_ptr.* = .{
439 .text_base = @intFromPtr(info.fbase),
440 .name = std.mem.span(info.fname),
441 .unwind = null,
442 .file = null,
443 };
444 }
445 return gop.key_ptr;
446}
447
448const Module = struct {
449 text_base: usize,
450 name: []const u8,
451 unwind: ?(Error!Unwind),
452 file: ?(Error!MachOFile),
453
454 const Adapter = struct {
455 pub fn hash(_: Adapter, text_base: usize) u32 {
456 return @truncate(std.hash.int(text_base));
457 }
458 pub fn eql(_: Adapter, a_text_base: usize, b_module: Module, b_index: usize) bool {
459 _ = b_index;
460 return a_text_base == b_module.text_base;
461 }
462 };
463 const Context = struct {
464 pub fn hash(_: Context, module: Module) u32 {
465 return @truncate(std.hash.int(module.text_base));
466 }
467 pub fn eql(_: Context, a_module: Module, b_module: Module, b_index: usize) bool {
468 _ = b_index;
469 return a_module.text_base == b_module.text_base;
470 }
471 };
472
473 const Unwind = struct {
474 /// The slide applied to the `__unwind_info` and `__eh_frame` sections.
475 /// So, `unwind_info.ptr` is this many bytes higher than the section's vmaddr.
476 vmaddr_slide: u64,
477 /// Backed by the in-memory section mapped by the loader.
478 unwind_info: ?[]const u8,
479 /// Backed by the in-memory `__eh_frame` section mapped by the loader.
480 dwarf: ?Dwarf.Unwind,
481 };
482
483 fn getUnwindInfo(module: *Module, gpa: Allocator) Error!*Unwind {
484 if (module.unwind == null) module.unwind = loadUnwindInfo(module, gpa);
485 return if (module.unwind.?) |*unwind| unwind else |err| err;
486 }
487 fn loadUnwindInfo(module: *const Module, gpa: Allocator) Error!Unwind {
488 const header: *std.macho.mach_header_64 = @ptrFromInt(module.text_base);
489
490 const raw_macho: [*]u8 = @ptrCast(header);
491 var it = macho.LoadCommandIterator.init(header, raw_macho[@sizeOf(macho.mach_header_64)..][0..header.sizeofcmds]) catch unreachable;
492 const sections, const text_vmaddr = while (it.next() catch unreachable) |load_cmd| {
493 if (load_cmd.hdr.cmd != .SEGMENT_64) continue;
494 const segment_cmd = load_cmd.cast(macho.segment_command_64).?;
495 if (!mem.eql(u8, segment_cmd.segName(), "__TEXT")) continue;
496 break .{ load_cmd.getSections(), segment_cmd.vmaddr };
497 } else unreachable;
498
499 const vmaddr_slide = module.text_base - text_vmaddr;
500
501 var opt_unwind_info: ?[]const u8 = null;
502 var opt_eh_frame: ?[]const u8 = null;
503 for (sections) |sect| {
504 if (mem.eql(u8, sect.sectName(), "__unwind_info")) {
505 const sect_ptr: [*]u8 = @ptrFromInt(@as(usize, @intCast(vmaddr_slide + sect.addr)));
506 opt_unwind_info = sect_ptr[0..@intCast(sect.size)];
507 } else if (mem.eql(u8, sect.sectName(), "__eh_frame")) {
508 const sect_ptr: [*]u8 = @ptrFromInt(@as(usize, @intCast(vmaddr_slide + sect.addr)));
509 opt_eh_frame = sect_ptr[0..@intCast(sect.size)];
510 }
511 }
512 const eh_frame = opt_eh_frame orelse return .{
513 .vmaddr_slide = vmaddr_slide,
514 .unwind_info = opt_unwind_info,
515 .dwarf = null,
516 };
517 var dwarf: Dwarf.Unwind = .initSection(.eh_frame, @intFromPtr(eh_frame.ptr) - vmaddr_slide, eh_frame);
518 errdefer dwarf.deinit(gpa);
519 // We don't need lookups, so this call is just for scanning CIEs.
520 dwarf.prepare(gpa, @sizeOf(usize), native_endian, false, true) catch |err| switch (err) {
521 error.ReadFailed => unreachable, // it's all fixed buffers
522 error.InvalidDebugInfo,
523 error.MissingDebugInfo,
524 error.OutOfMemory,
525 => |e| return e,
526 error.EndOfStream,
527 error.Overflow,
528 error.StreamTooLong,
529 error.InvalidOperand,
530 error.InvalidOpcode,
531 error.InvalidOperation,
532 => return error.InvalidDebugInfo,
533 error.UnsupportedAddrSize,
534 error.UnsupportedDwarfVersion,
535 error.UnimplementedUserOpcode,
536 => return error.UnsupportedDebugInfo,
537 };
538
539 return .{
540 .vmaddr_slide = vmaddr_slide,
541 .unwind_info = opt_unwind_info,
542 .dwarf = dwarf,
543 };
544 }
545
546 fn getFile(module: *Module, gpa: Allocator) Error!*MachOFile {
547 if (module.file == null) module.file = MachOFile.load(gpa, module.name, builtin.cpu.arch) catch |err| switch (err) {
548 error.InvalidMachO, error.InvalidDwarf => error.InvalidDebugInfo,
549 error.MissingDebugInfo, error.OutOfMemory, error.UnsupportedDebugInfo, error.ReadFailed => |e| e,
550 };
551 return if (module.file.?) |*f| f else |err| err;
552 }
553};
554
555const MachoSymbol = struct {
556 strx: u32,
557 addr: u64,
558 /// Value may be `unknown_ofile`.
559 ofile: u32,
560 const unknown_ofile = std.math.maxInt(u32);
561 fn addressLessThan(context: void, lhs: MachoSymbol, rhs: MachoSymbol) bool {
562 _ = context;
563 return lhs.addr < rhs.addr;
564 }
565 /// Assumes that `symbols` is sorted in order of ascending `addr`.
566 fn find(symbols: []const MachoSymbol, address: usize) ?*const MachoSymbol {
567 if (symbols.len == 0) return null; // no potential match
568 if (address < symbols[0].addr) return null; // address is before the lowest-address symbol
569 var left: usize = 0;
570 var len: usize = symbols.len;
571 while (len > 1) {
572 const mid = left + len / 2;
573 if (address < symbols[mid].addr) {
574 len /= 2;
575 } else {
576 left = mid;
577 len -= len / 2;
578 }
579 }
580 return &symbols[left];
581 }
582
583 test find {
584 const symbols: []const MachoSymbol = &.{
585 .{ .addr = 100, .strx = undefined, .ofile = undefined },
586 .{ .addr = 200, .strx = undefined, .ofile = undefined },
587 .{ .addr = 300, .strx = undefined, .ofile = undefined },
588 };
589
590 try testing.expectEqual(null, find(symbols, 0));
591 try testing.expectEqual(null, find(symbols, 99));
592 try testing.expectEqual(&symbols[0], find(symbols, 100).?);
593 try testing.expectEqual(&symbols[0], find(symbols, 150).?);
594 try testing.expectEqual(&symbols[0], find(symbols, 199).?);
595
596 try testing.expectEqual(&symbols[1], find(symbols, 200).?);
597 try testing.expectEqual(&symbols[1], find(symbols, 250).?);
598 try testing.expectEqual(&symbols[1], find(symbols, 299).?);
599
600 try testing.expectEqual(&symbols[2], find(symbols, 300).?);
601 try testing.expectEqual(&symbols[2], find(symbols, 301).?);
602 try testing.expectEqual(&symbols[2], find(symbols, 5000).?);
603 }
604};
605test {
606 _ = MachoSymbol;
607}
608
609/// Uses `mmap` to map the file at `path` into memory.
610fn mapDebugInfoFile(path: []const u8) ![]align(std.heap.page_size_min) const u8 {
611 const file = std.fs.cwd().openFile(path, .{}) catch |err| switch (err) {
612 error.FileNotFound => return error.MissingDebugInfo,
613 else => return error.ReadFailed,
614 };
615 defer file.close();
616
617 const file_end_pos = file.getEndPos() catch |err| switch (err) {
618 error.Unexpected => |e| return e,
619 else => return error.ReadFailed,
620 };
621 const file_len = std.math.cast(usize, file_end_pos) orelse return error.InvalidDebugInfo;
622
623 return posix.mmap(
624 null,
625 file_len,
626 posix.PROT.READ,
627 .{ .TYPE = .SHARED },
628 file.handle,
629 0,
630 ) catch |err| switch (err) {
631 error.Unexpected => |e| return e,
632 else => return error.ReadFailed,
633 };
634}
635
636const std = @import("std");
637const Io = std.Io;
638const Allocator = std.mem.Allocator;
639const Dwarf = std.debug.Dwarf;
640const Error = std.debug.SelfInfoError;
641const MachOFile = std.debug.MachOFile;
642const assert = std.debug.assert;
643const posix = std.posix;
644const macho = std.macho;
645const mem = std.mem;
646const testing = std.testing;
647const dwarfRegNative = std.debug.Dwarf.SelfUnwinder.regNative;
648
649const builtin = @import("builtin");
650const native_endian = builtin.target.cpu.arch.endian();
651
652const SelfInfo = @This();