master
1rwlock: std.Thread.RwLock,
2
3modules: std.ArrayList(Module),
4ranges: std.ArrayList(Module.Range),
5
6unwind_cache: if (can_unwind) ?[]Dwarf.SelfUnwinder.CacheEntry else ?noreturn,
7
8pub const init: SelfInfo = .{
9 .rwlock = .{},
10 .modules = .empty,
11 .ranges = .empty,
12 .unwind_cache = null,
13};
14pub fn deinit(si: *SelfInfo, gpa: Allocator) void {
15 for (si.modules.items) |*mod| {
16 unwind: {
17 const u = &(mod.unwind orelse break :unwind catch break :unwind);
18 for (u.buf[0..u.len]) |*unwind| unwind.deinit(gpa);
19 }
20 loaded: {
21 const l = &(mod.loaded_elf orelse break :loaded catch break :loaded);
22 l.file.deinit(gpa);
23 }
24 }
25
26 si.modules.deinit(gpa);
27 si.ranges.deinit(gpa);
28 if (si.unwind_cache) |cache| gpa.free(cache);
29}
30
31pub fn getSymbol(si: *SelfInfo, gpa: Allocator, io: Io, address: usize) Error!std.debug.Symbol {
32 _ = io;
33 const module = try si.findModule(gpa, address, .exclusive);
34 defer si.rwlock.unlock();
35
36 const vaddr = address - module.load_offset;
37
38 const loaded_elf = try module.getLoadedElf(gpa);
39 if (loaded_elf.file.dwarf) |*dwarf| {
40 if (!loaded_elf.scanned_dwarf) {
41 dwarf.open(gpa, native_endian) catch |err| switch (err) {
42 error.InvalidDebugInfo,
43 error.MissingDebugInfo,
44 error.OutOfMemory,
45 => |e| return e,
46 error.EndOfStream,
47 error.Overflow,
48 error.ReadFailed,
49 error.StreamTooLong,
50 => return error.InvalidDebugInfo,
51 };
52 loaded_elf.scanned_dwarf = true;
53 }
54 if (dwarf.getSymbol(gpa, native_endian, vaddr)) |sym| {
55 return sym;
56 } else |err| switch (err) {
57 error.MissingDebugInfo => {},
58
59 error.InvalidDebugInfo,
60 error.OutOfMemory,
61 => |e| return e,
62
63 error.ReadFailed,
64 error.EndOfStream,
65 error.Overflow,
66 error.StreamTooLong,
67 => return error.InvalidDebugInfo,
68 }
69 }
70 // When DWARF is unavailable, fall back to searching the symtab.
71 return loaded_elf.file.searchSymtab(gpa, vaddr) catch |err| switch (err) {
72 error.NoSymtab, error.NoStrtab => return error.MissingDebugInfo,
73 error.BadSymtab => return error.InvalidDebugInfo,
74 error.OutOfMemory => |e| return e,
75 };
76}
77pub fn getModuleName(si: *SelfInfo, gpa: Allocator, address: usize) Error![]const u8 {
78 const module = try si.findModule(gpa, address, .shared);
79 defer si.rwlock.unlockShared();
80 if (module.name.len == 0) return error.MissingDebugInfo;
81 return module.name;
82}
83pub fn getModuleSlide(si: *SelfInfo, gpa: Allocator, address: usize) Error!usize {
84 const module = try si.findModule(gpa, address, .shared);
85 defer si.rwlock.unlockShared();
86 return module.load_offset;
87}
88
89pub const can_unwind: bool = s: {
90 // The DWARF code can't deal with ILP32 ABIs yet: https://github.com/ziglang/zig/issues/25447
91 switch (builtin.target.abi) {
92 .gnuabin32,
93 .muslabin32,
94 .gnux32,
95 .muslx32,
96 => break :s false,
97 else => {},
98 }
99
100 // Notably, we are yet to support unwinding on ARM. There, unwinding is not done through
101 // `.eh_frame`, but instead with the `.ARM.exidx` section, which has a different format.
102 const archs: []const std.Target.Cpu.Arch = switch (builtin.target.os.tag) {
103 // Not supported yet: arm
104 .haiku => &.{
105 .aarch64,
106 .m68k,
107 .riscv64,
108 .x86,
109 .x86_64,
110 },
111 // Not supported yet: arm/armeb/thumb/thumbeb, xtensa/xtensaeb
112 .linux => &.{
113 .aarch64,
114 .aarch64_be,
115 .arc,
116 .csky,
117 .loongarch64,
118 .m68k,
119 .mips,
120 .mipsel,
121 .mips64,
122 .mips64el,
123 .or1k,
124 .riscv32,
125 .riscv64,
126 .s390x,
127 .x86,
128 .x86_64,
129 },
130 .serenity => &.{
131 .aarch64,
132 .x86_64,
133 .riscv64,
134 },
135
136 .dragonfly => &.{
137 .x86_64,
138 },
139 // Not supported yet: arm
140 .freebsd => &.{
141 .aarch64,
142 .riscv64,
143 .x86_64,
144 },
145 // Not supported yet: arm/armeb, mips64/mips64el
146 .netbsd => &.{
147 .aarch64,
148 .aarch64_be,
149 .m68k,
150 .mips,
151 .mipsel,
152 .x86,
153 .x86_64,
154 },
155 // Not supported yet: arm
156 .openbsd => &.{
157 .aarch64,
158 .mips64,
159 .mips64el,
160 .riscv64,
161 .x86,
162 .x86_64,
163 },
164
165 .illumos => &.{
166 .x86,
167 .x86_64,
168 },
169
170 else => unreachable,
171 };
172 for (archs) |a| {
173 if (builtin.target.cpu.arch == a) break :s true;
174 }
175 break :s false;
176};
177comptime {
178 if (can_unwind) {
179 std.debug.assert(Dwarf.supportsUnwinding(&builtin.target));
180 }
181}
182pub const UnwindContext = Dwarf.SelfUnwinder;
183pub fn unwindFrame(si: *SelfInfo, gpa: Allocator, context: *UnwindContext) Error!usize {
184 comptime assert(can_unwind);
185
186 {
187 si.rwlock.lockShared();
188 defer si.rwlock.unlockShared();
189 if (si.unwind_cache) |cache| {
190 if (Dwarf.SelfUnwinder.CacheEntry.find(cache, context.pc)) |entry| {
191 return context.next(gpa, entry);
192 }
193 }
194 }
195
196 const module = try si.findModule(gpa, context.pc, .exclusive);
197 defer si.rwlock.unlock();
198
199 if (si.unwind_cache == null) {
200 si.unwind_cache = try gpa.alloc(Dwarf.SelfUnwinder.CacheEntry, 2048);
201 @memset(si.unwind_cache.?, .empty);
202 }
203
204 const unwind_sections = try module.getUnwindSections(gpa);
205 for (unwind_sections) |*unwind| {
206 if (context.computeRules(gpa, unwind, module.load_offset, null)) |entry| {
207 entry.populate(si.unwind_cache.?);
208 return context.next(gpa, &entry);
209 } else |err| switch (err) {
210 error.MissingDebugInfo => continue,
211
212 error.InvalidDebugInfo,
213 error.UnsupportedDebugInfo,
214 error.OutOfMemory,
215 => |e| return e,
216
217 error.EndOfStream,
218 error.StreamTooLong,
219 error.ReadFailed,
220 error.Overflow,
221 error.InvalidOpcode,
222 error.InvalidOperation,
223 error.InvalidOperand,
224 => return error.InvalidDebugInfo,
225
226 error.UnimplementedUserOpcode,
227 error.UnsupportedAddrSize,
228 => return error.UnsupportedDebugInfo,
229 }
230 }
231 return error.MissingDebugInfo;
232}
233
234const Module = struct {
235 load_offset: usize,
236 name: []const u8,
237 build_id: ?[]const u8,
238 gnu_eh_frame: ?[]const u8,
239
240 /// `null` means unwind information has not yet been loaded.
241 unwind: ?(Error!UnwindSections),
242
243 /// `null` means the ELF file has not yet been loaded.
244 loaded_elf: ?(Error!LoadedElf),
245
246 const LoadedElf = struct {
247 file: std.debug.ElfFile,
248 scanned_dwarf: bool,
249 };
250
251 const UnwindSections = struct {
252 buf: [2]Dwarf.Unwind,
253 len: usize,
254 };
255
256 const Range = struct {
257 start: usize,
258 len: usize,
259 /// Index into `modules`
260 module_index: usize,
261 };
262
263 /// Assumes we already hold an exclusive lock.
264 fn getUnwindSections(mod: *Module, gpa: Allocator) Error![]Dwarf.Unwind {
265 if (mod.unwind == null) mod.unwind = loadUnwindSections(mod, gpa);
266 const us = &(mod.unwind.? catch |err| return err);
267 return us.buf[0..us.len];
268 }
269 fn loadUnwindSections(mod: *Module, gpa: Allocator) Error!UnwindSections {
270 var us: UnwindSections = .{
271 .buf = undefined,
272 .len = 0,
273 };
274 if (mod.gnu_eh_frame) |section_bytes| {
275 const section_vaddr: u64 = @intFromPtr(section_bytes.ptr) - mod.load_offset;
276 const header = Dwarf.Unwind.EhFrameHeader.parse(section_vaddr, section_bytes, @sizeOf(usize), native_endian) catch |err| switch (err) {
277 error.ReadFailed => unreachable, // it's all fixed buffers
278 error.InvalidDebugInfo => |e| return e,
279 error.EndOfStream, error.Overflow => return error.InvalidDebugInfo,
280 error.UnsupportedAddrSize => return error.UnsupportedDebugInfo,
281 };
282 us.buf[us.len] = .initEhFrameHdr(header, section_vaddr, @ptrFromInt(@as(usize, @intCast(mod.load_offset + header.eh_frame_vaddr))));
283 us.len += 1;
284 } else {
285 // There is no `.eh_frame_hdr` section. There may still be an `.eh_frame` or `.debug_frame`
286 // section, but we'll have to load the binary to get at it.
287 const loaded = try mod.getLoadedElf(gpa);
288 // If both are present, we can't just pick one -- the info could be split between them.
289 // `.debug_frame` is likely to be the more complete section, so we'll prioritize that one.
290 if (loaded.file.debug_frame) |*debug_frame| {
291 us.buf[us.len] = .initSection(.debug_frame, debug_frame.vaddr, debug_frame.bytes);
292 us.len += 1;
293 }
294 if (loaded.file.eh_frame) |*eh_frame| {
295 us.buf[us.len] = .initSection(.eh_frame, eh_frame.vaddr, eh_frame.bytes);
296 us.len += 1;
297 }
298 }
299 errdefer for (us.buf[0..us.len]) |*u| u.deinit(gpa);
300 for (us.buf[0..us.len]) |*u| u.prepare(gpa, @sizeOf(usize), native_endian, true, false) catch |err| switch (err) {
301 error.ReadFailed => unreachable, // it's all fixed buffers
302 error.InvalidDebugInfo,
303 error.MissingDebugInfo,
304 error.OutOfMemory,
305 => |e| return e,
306 error.EndOfStream,
307 error.Overflow,
308 error.StreamTooLong,
309 error.InvalidOperand,
310 error.InvalidOpcode,
311 error.InvalidOperation,
312 => return error.InvalidDebugInfo,
313 error.UnsupportedAddrSize,
314 error.UnsupportedDwarfVersion,
315 error.UnimplementedUserOpcode,
316 => return error.UnsupportedDebugInfo,
317 };
318 return us;
319 }
320
321 /// Assumes we already hold an exclusive lock.
322 fn getLoadedElf(mod: *Module, gpa: Allocator) Error!*LoadedElf {
323 if (mod.loaded_elf == null) mod.loaded_elf = loadElf(mod, gpa);
324 return if (mod.loaded_elf.?) |*elf| elf else |err| err;
325 }
326 fn loadElf(mod: *Module, gpa: Allocator) Error!LoadedElf {
327 const load_result = if (mod.name.len > 0) res: {
328 var file = std.fs.cwd().openFile(mod.name, .{}) catch return error.MissingDebugInfo;
329 defer file.close();
330 break :res std.debug.ElfFile.load(gpa, file, mod.build_id, &.native(mod.name));
331 } else res: {
332 const path = std.fs.selfExePathAlloc(gpa) catch |err| switch (err) {
333 error.OutOfMemory => |e| return e,
334 else => return error.ReadFailed,
335 };
336 defer gpa.free(path);
337 var file = std.fs.cwd().openFile(path, .{}) catch return error.MissingDebugInfo;
338 defer file.close();
339 break :res std.debug.ElfFile.load(gpa, file, mod.build_id, &.native(path));
340 };
341
342 var elf_file = load_result catch |err| switch (err) {
343 error.OutOfMemory,
344 error.Unexpected,
345 error.Canceled,
346 => |e| return e,
347
348 error.Overflow,
349 error.TruncatedElfFile,
350 error.InvalidCompressedSection,
351 error.InvalidElfMagic,
352 error.InvalidElfVersion,
353 error.InvalidElfClass,
354 error.InvalidElfEndian,
355 => return error.InvalidDebugInfo,
356
357 error.SystemResources,
358 error.MemoryMappingNotSupported,
359 error.AccessDenied,
360 error.LockedMemoryLimitExceeded,
361 error.ProcessFdQuotaExceeded,
362 error.SystemFdQuotaExceeded,
363 error.Streaming,
364 => return error.ReadFailed,
365 };
366 errdefer elf_file.deinit(gpa);
367
368 if (elf_file.endian != native_endian) return error.InvalidDebugInfo;
369 if (elf_file.is_64 != (@sizeOf(usize) == 8)) return error.InvalidDebugInfo;
370
371 return .{
372 .file = elf_file,
373 .scanned_dwarf = false,
374 };
375 }
376};
377
378fn findModule(si: *SelfInfo, gpa: Allocator, address: usize, lock: enum { shared, exclusive }) Error!*Module {
379 // With the requested lock, scan the module ranges looking for `address`.
380 switch (lock) {
381 .shared => si.rwlock.lockShared(),
382 .exclusive => si.rwlock.lock(),
383 }
384 for (si.ranges.items) |*range| {
385 if (address >= range.start and address < range.start + range.len) {
386 return &si.modules.items[range.module_index];
387 }
388 }
389 // The address wasn't in a known range. We will rebuild the module/range lists, since it's possible
390 // a new module was loaded. Upgrade to an exclusive lock if necessary.
391 switch (lock) {
392 .shared => {
393 si.rwlock.unlockShared();
394 si.rwlock.lock();
395 },
396 .exclusive => {},
397 }
398 // Rebuild module list with the exclusive lock.
399 {
400 errdefer si.rwlock.unlock();
401 for (si.modules.items) |*mod| {
402 unwind: {
403 const u = &(mod.unwind orelse break :unwind catch break :unwind);
404 for (u.buf[0..u.len]) |*unwind| unwind.deinit(gpa);
405 }
406 loaded: {
407 const l = &(mod.loaded_elf orelse break :loaded catch break :loaded);
408 l.file.deinit(gpa);
409 }
410 }
411 si.modules.clearRetainingCapacity();
412 si.ranges.clearRetainingCapacity();
413 var ctx: DlIterContext = .{ .si = si, .gpa = gpa };
414 try std.posix.dl_iterate_phdr(&ctx, error{OutOfMemory}, DlIterContext.callback);
415 }
416 // Downgrade the lock back to shared if necessary.
417 switch (lock) {
418 .shared => {
419 si.rwlock.unlock();
420 si.rwlock.lockShared();
421 },
422 .exclusive => {},
423 }
424 // Scan the newly rebuilt module ranges.
425 for (si.ranges.items) |*range| {
426 if (address >= range.start and address < range.start + range.len) {
427 return &si.modules.items[range.module_index];
428 }
429 }
430 // Still nothing; unlock and error.
431 switch (lock) {
432 .shared => si.rwlock.unlockShared(),
433 .exclusive => si.rwlock.unlock(),
434 }
435 return error.MissingDebugInfo;
436}
437const DlIterContext = struct {
438 si: *SelfInfo,
439 gpa: Allocator,
440
441 fn callback(info: *std.posix.dl_phdr_info, size: usize, context: *@This()) !void {
442 _ = size;
443
444 var build_id: ?[]const u8 = null;
445 var gnu_eh_frame: ?[]const u8 = null;
446
447 // Populate `build_id` and `gnu_eh_frame`
448 for (info.phdr[0..info.phnum]) |phdr| {
449 switch (phdr.type) {
450 .NOTE => {
451 // Look for .note.gnu.build-id
452 const segment_ptr: [*]const u8 = @ptrFromInt(info.addr + phdr.vaddr);
453 var r: std.Io.Reader = .fixed(segment_ptr[0..phdr.memsz]);
454 const name_size = r.takeInt(u32, native_endian) catch continue;
455 const desc_size = r.takeInt(u32, native_endian) catch continue;
456 const note_type = r.takeInt(u32, native_endian) catch continue;
457 const name = r.take(name_size) catch continue;
458 if (note_type != std.elf.NT_GNU_BUILD_ID) continue;
459 if (!std.mem.eql(u8, name, "GNU\x00")) continue;
460 const desc = r.take(desc_size) catch continue;
461 build_id = desc;
462 },
463 std.elf.PT.GNU_EH_FRAME => {
464 const segment_ptr: [*]const u8 = @ptrFromInt(info.addr + phdr.vaddr);
465 gnu_eh_frame = segment_ptr[0..phdr.memsz];
466 },
467 else => {},
468 }
469 }
470
471 const gpa = context.gpa;
472 const si = context.si;
473
474 const module_index = si.modules.items.len;
475 try si.modules.append(gpa, .{
476 .load_offset = info.addr,
477 // Android libc uses NULL instead of "" to mark the main program
478 .name = std.mem.sliceTo(info.name, 0) orelse "",
479 .build_id = build_id,
480 .gnu_eh_frame = gnu_eh_frame,
481 .unwind = null,
482 .loaded_elf = null,
483 });
484
485 for (info.phdr[0..info.phnum]) |phdr| {
486 if (phdr.type != .LOAD) continue;
487 try context.si.ranges.append(gpa, .{
488 // Overflowing addition handles VSDOs having p_vaddr = 0xffffffffff700000
489 .start = info.addr +% phdr.vaddr,
490 .len = phdr.memsz,
491 .module_index = module_index,
492 });
493 }
494 }
495};
496
497const std = @import("std");
498const Io = std.Io;
499const Allocator = std.mem.Allocator;
500const Dwarf = std.debug.Dwarf;
501const Error = std.debug.SelfInfoError;
502const assert = std.debug.assert;
503
504const builtin = @import("builtin");
505const native_endian = builtin.target.cpu.arch.endian();
506
507const SelfInfo = @This();