master
1mutex: std.Thread.Mutex,
2modules: std.ArrayList(Module),
3module_name_arena: std.heap.ArenaAllocator.State,
4
5pub const init: SelfInfo = .{
6 .mutex = .{},
7 .modules = .empty,
8 .module_name_arena = .{},
9};
10pub fn deinit(si: *SelfInfo, gpa: Allocator) void {
11 for (si.modules.items) |*module| {
12 di: {
13 const di = &(module.di orelse break :di catch break :di);
14 di.deinit(gpa);
15 }
16 }
17 si.modules.deinit(gpa);
18
19 var module_name_arena = si.module_name_arena.promote(gpa);
20 module_name_arena.deinit();
21}
22
23pub fn getSymbol(si: *SelfInfo, gpa: Allocator, io: Io, address: usize) Error!std.debug.Symbol {
24 si.mutex.lock();
25 defer si.mutex.unlock();
26 const module = try si.findModule(gpa, address);
27 const di = try module.getDebugInfo(gpa, io);
28 return di.getSymbol(gpa, address - module.base_address);
29}
30pub fn getModuleName(si: *SelfInfo, gpa: Allocator, address: usize) Error![]const u8 {
31 si.mutex.lock();
32 defer si.mutex.unlock();
33 const module = try si.findModule(gpa, address);
34 return module.name;
35}
36pub fn getModuleSlide(si: *SelfInfo, gpa: Allocator, address: usize) Error!usize {
37 si.mutex.lock();
38 defer si.mutex.unlock();
39 const module = try si.findModule(gpa, address);
40 return module.base_address;
41}
42
43pub const can_unwind: bool = switch (builtin.cpu.arch) {
44 else => true,
45 // On x86, `RtlVirtualUnwind` does not exist. We could in theory use `RtlCaptureStackBackTrace`
46 // instead, but on x86, it turns out that function is just... doing FP unwinding with esp! It's
47 // hard to find implementation details to confirm that, but the most authoritative source I have
48 // is an entry in the LLVM mailing list from 2020/08/16 which contains this quote:
49 //
50 // > x86 doesn't have what most architectures would consider an "unwinder" in the sense of
51 // > restoring registers; there is simply a linked list of frames that participate in SEH and
52 // > that desire to be called for a dynamic unwind operation, so RtlCaptureStackBackTrace
53 // > assumes that EBP-based frames are in use and walks an EBP-based frame chain on x86 - not
54 // > all x86 code is written with EBP-based frames so while even though we generally build the
55 // > OS that way, you might always run the risk of encountering external code that uses EBP as a
56 // > general purpose register for which such an unwind attempt for a stack trace would fail.
57 //
58 // Regardless, it's easy to effectively confirm this hypothesis just by compiling some code with
59 // `-fomit-frame-pointer -OReleaseFast` and observing that `RtlCaptureStackBackTrace` returns an
60 // empty trace when it's called in such an application. Note that without `-OReleaseFast` or
61 // similar, LLVM seems reluctant to ever clobber ebp, so you'll get a trace returned which just
62 // contains all of the kernel32/ntdll frames but none of your own. Don't be deceived---this is
63 // just coincidental!
64 //
65 // Anyway, the point is, the only stack walking primitive on x86-windows is FP unwinding. We
66 // *could* ask Microsoft to do that for us with `RtlCaptureStackBackTrace`... but better to just
67 // use our existing FP unwinder in `std.debug`!
68 .x86 => false,
69};
70pub const UnwindContext = struct {
71 pc: usize,
72 cur: windows.CONTEXT,
73 history_table: windows.UNWIND_HISTORY_TABLE,
74 pub fn init(ctx: *const std.debug.cpu_context.Native) UnwindContext {
75 return .{
76 .pc = @returnAddress(),
77 .cur = switch (builtin.cpu.arch) {
78 .x86_64 => std.mem.zeroInit(windows.CONTEXT, .{
79 .Rax = ctx.gprs.get(.rax),
80 .Rcx = ctx.gprs.get(.rcx),
81 .Rdx = ctx.gprs.get(.rdx),
82 .Rbx = ctx.gprs.get(.rbx),
83 .Rsp = ctx.gprs.get(.rsp),
84 .Rbp = ctx.gprs.get(.rbp),
85 .Rsi = ctx.gprs.get(.rsi),
86 .Rdi = ctx.gprs.get(.rdi),
87 .R8 = ctx.gprs.get(.r8),
88 .R9 = ctx.gprs.get(.r9),
89 .R10 = ctx.gprs.get(.r10),
90 .R11 = ctx.gprs.get(.r11),
91 .R12 = ctx.gprs.get(.r12),
92 .R13 = ctx.gprs.get(.r13),
93 .R14 = ctx.gprs.get(.r14),
94 .R15 = ctx.gprs.get(.r15),
95 .Rip = ctx.gprs.get(.rip),
96 }),
97 .aarch64 => .{
98 .ContextFlags = 0,
99 .Cpsr = 0,
100 .DUMMYUNIONNAME = .{ .X = ctx.x },
101 .Sp = ctx.sp,
102 .Pc = ctx.pc,
103 .V = @splat(.{ .B = @splat(0) }),
104 .Fpcr = 0,
105 .Fpsr = 0,
106 .Bcr = @splat(0),
107 .Bvr = @splat(0),
108 .Wcr = @splat(0),
109 .Wvr = @splat(0),
110 },
111 .thumb => .{
112 .ContextFlags = 0,
113 .R0 = ctx.r[0],
114 .R1 = ctx.r[1],
115 .R2 = ctx.r[2],
116 .R3 = ctx.r[3],
117 .R4 = ctx.r[4],
118 .R5 = ctx.r[5],
119 .R6 = ctx.r[6],
120 .R7 = ctx.r[7],
121 .R8 = ctx.r[8],
122 .R9 = ctx.r[9],
123 .R10 = ctx.r[10],
124 .R11 = ctx.r[11],
125 .R12 = ctx.r[12],
126 .Sp = ctx.r[13],
127 .Lr = ctx.r[14],
128 .Pc = ctx.r[15],
129 .Cpsr = 0,
130 .Fpcsr = 0,
131 .Padding = 0,
132 .DUMMYUNIONNAME = .{ .S = @splat(0) },
133 .Bvr = @splat(0),
134 .Bcr = @splat(0),
135 .Wvr = @splat(0),
136 .Wcr = @splat(0),
137 .Padding2 = @splat(0),
138 },
139 else => comptime unreachable,
140 },
141 .history_table = std.mem.zeroes(windows.UNWIND_HISTORY_TABLE),
142 };
143 }
144 pub fn deinit(ctx: *UnwindContext, gpa: Allocator) void {
145 _ = ctx;
146 _ = gpa;
147 }
148 pub fn getFp(ctx: *UnwindContext) usize {
149 return ctx.cur.getRegs().bp;
150 }
151};
152pub fn unwindFrame(si: *SelfInfo, gpa: Allocator, context: *UnwindContext) Error!usize {
153 _ = si;
154 _ = gpa;
155
156 const current_regs = context.cur.getRegs();
157 var image_base: windows.DWORD64 = undefined;
158 if (windows.ntdll.RtlLookupFunctionEntry(current_regs.ip, &image_base, &context.history_table)) |runtime_function| {
159 var handler_data: ?*anyopaque = null;
160 var establisher_frame: u64 = undefined;
161 _ = windows.ntdll.RtlVirtualUnwind(
162 windows.UNW_FLAG_NHANDLER,
163 image_base,
164 current_regs.ip,
165 runtime_function,
166 &context.cur,
167 &handler_data,
168 &establisher_frame,
169 null,
170 );
171 } else {
172 // leaf function
173 context.cur.setIp(@as(*const usize, @ptrFromInt(current_regs.sp)).*);
174 context.cur.setSp(current_regs.sp + @sizeOf(usize));
175 }
176
177 const next_regs = context.cur.getRegs();
178 const tib = &windows.teb().NtTib;
179 if (next_regs.sp < @intFromPtr(tib.StackLimit) or next_regs.sp > @intFromPtr(tib.StackBase)) {
180 context.pc = 0;
181 return 0;
182 }
183 // Like `DwarfUnwindContext.unwindFrame`, adjust our next lookup pc in case the `call` was this
184 // function's last instruction making `next_regs.ip` one byte past its end.
185 context.pc = next_regs.ip -| 1;
186 return next_regs.ip;
187}
188
189const Module = struct {
190 base_address: usize,
191 size: u32,
192 name: []const u8,
193 handle: windows.HMODULE,
194
195 di: ?(Error!DebugInfo),
196
197 const DebugInfo = struct {
198 arena: std.heap.ArenaAllocator.State,
199 io: Io,
200 coff_image_base: u64,
201 mapped_file: ?MappedFile,
202 dwarf: ?Dwarf,
203 pdb: ?Pdb,
204 coff_section_headers: []coff.SectionHeader,
205
206 const MappedFile = struct {
207 file: fs.File,
208 section_handle: windows.HANDLE,
209 section_view: []const u8,
210 fn deinit(mf: *const MappedFile) void {
211 const process_handle = windows.GetCurrentProcess();
212 assert(windows.ntdll.NtUnmapViewOfSection(process_handle, @constCast(mf.section_view.ptr)) == .SUCCESS);
213 windows.CloseHandle(mf.section_handle);
214 mf.file.close();
215 }
216 };
217
218 fn deinit(di: *DebugInfo, gpa: Allocator) void {
219 const io = di.io;
220 if (di.dwarf) |*dwarf| dwarf.deinit(gpa);
221 if (di.pdb) |*pdb| {
222 pdb.file_reader.file.close(io);
223 pdb.deinit();
224 }
225 if (di.mapped_file) |*mf| mf.deinit();
226
227 var arena = di.arena.promote(gpa);
228 arena.deinit();
229 }
230
231 fn getSymbol(di: *DebugInfo, gpa: Allocator, vaddr: usize) Error!std.debug.Symbol {
232 pdb: {
233 const pdb = &(di.pdb orelse break :pdb);
234 var coff_section: *align(1) const coff.SectionHeader = undefined;
235 const mod_index = for (pdb.sect_contribs) |sect_contrib| {
236 if (sect_contrib.section > di.coff_section_headers.len) continue;
237 // Remember that SectionContribEntry.Section is 1-based.
238 coff_section = &di.coff_section_headers[sect_contrib.section - 1];
239
240 const vaddr_start = coff_section.virtual_address + sect_contrib.offset;
241 const vaddr_end = vaddr_start + sect_contrib.size;
242 if (vaddr >= vaddr_start and vaddr < vaddr_end) {
243 break sect_contrib.module_index;
244 }
245 } else {
246 // we have no information to add to the address
247 break :pdb;
248 };
249 const module = pdb.getModule(mod_index) catch |err| switch (err) {
250 error.InvalidDebugInfo,
251 error.MissingDebugInfo,
252 error.OutOfMemory,
253 => |e| return e,
254
255 error.ReadFailed,
256 error.EndOfStream,
257 => return error.InvalidDebugInfo,
258 } orelse {
259 return error.InvalidDebugInfo; // bad module index
260 };
261 return .{
262 .name = pdb.getSymbolName(module, vaddr - coff_section.virtual_address),
263 .compile_unit_name = fs.path.basename(module.obj_file_name),
264 .source_location = pdb.getLineNumberInfo(module, vaddr - coff_section.virtual_address) catch null,
265 };
266 }
267 dwarf: {
268 const dwarf = &(di.dwarf orelse break :dwarf);
269 const dwarf_address = vaddr + di.coff_image_base;
270 return dwarf.getSymbol(gpa, native_endian, dwarf_address) catch |err| switch (err) {
271 error.MissingDebugInfo => break :dwarf,
272
273 error.InvalidDebugInfo,
274 error.OutOfMemory,
275 => |e| return e,
276
277 error.ReadFailed,
278 error.EndOfStream,
279 error.Overflow,
280 error.StreamTooLong,
281 => return error.InvalidDebugInfo,
282 };
283 }
284 return error.MissingDebugInfo;
285 }
286 };
287
288 fn getDebugInfo(module: *Module, gpa: Allocator, io: Io) Error!*DebugInfo {
289 if (module.di == null) module.di = loadDebugInfo(module, gpa, io);
290 return if (module.di.?) |*di| di else |err| err;
291 }
292 fn loadDebugInfo(module: *const Module, gpa: Allocator, io: Io) Error!DebugInfo {
293 const mapped_ptr: [*]const u8 = @ptrFromInt(module.base_address);
294 const mapped = mapped_ptr[0..module.size];
295 var coff_obj = coff.Coff.init(mapped, true) catch return error.InvalidDebugInfo;
296
297 var arena_instance: std.heap.ArenaAllocator = .init(gpa);
298 errdefer arena_instance.deinit();
299 const arena = arena_instance.allocator();
300
301 // The string table is not mapped into memory by the loader, so if a section name is in the
302 // string table then we have to map the full image file from disk. This can happen when
303 // a binary is produced with -gdwarf, since the section names are longer than 8 bytes.
304 const mapped_file: ?DebugInfo.MappedFile = mapped: {
305 if (!coff_obj.strtabRequired()) break :mapped null;
306 var name_buffer: [windows.PATH_MAX_WIDE + 4:0]u16 = undefined;
307 name_buffer[0..4].* = .{ '\\', '?', '?', '\\' }; // openFileAbsoluteW requires the prefix to be present
308 const process_handle = windows.GetCurrentProcess();
309 const len = windows.kernel32.GetModuleFileNameExW(
310 process_handle,
311 module.handle,
312 name_buffer[4..],
313 windows.PATH_MAX_WIDE,
314 );
315 if (len == 0) return error.MissingDebugInfo;
316 const name_w = name_buffer[0 .. len + 4 :0];
317 var threaded: Io.Threaded = .init_single_threaded;
318 const coff_file = threaded.dirOpenFileWtf16(null, name_w, .{}) catch |err| switch (err) {
319 error.Canceled => |e| return e,
320 error.Unexpected => |e| return e,
321 error.FileNotFound => return error.MissingDebugInfo,
322
323 error.FileTooBig,
324 error.IsDir,
325 error.NotDir,
326 error.SymLinkLoop,
327 error.NameTooLong,
328 error.BadPathName,
329 => return error.InvalidDebugInfo,
330
331 error.SystemResources,
332 error.WouldBlock,
333 error.AccessDenied,
334 error.ProcessNotFound,
335 error.PermissionDenied,
336 error.NoSpaceLeft,
337 error.DeviceBusy,
338 error.NoDevice,
339 error.SharingViolation,
340 error.PathAlreadyExists,
341 error.PipeBusy,
342 error.NetworkNotFound,
343 error.AntivirusInterference,
344 error.ProcessFdQuotaExceeded,
345 error.SystemFdQuotaExceeded,
346 error.FileLocksNotSupported,
347 error.FileBusy,
348 => return error.ReadFailed,
349 };
350 errdefer coff_file.close(io);
351 var section_handle: windows.HANDLE = undefined;
352 const create_section_rc = windows.ntdll.NtCreateSection(
353 §ion_handle,
354 windows.STANDARD_RIGHTS_REQUIRED | windows.SECTION_QUERY | windows.SECTION_MAP_READ,
355 null,
356 null,
357 windows.PAGE_READONLY,
358 // The documentation states that if no AllocationAttribute is specified, then SEC_COMMIT is the default.
359 // In practice, this isn't the case and specifying 0 will result in INVALID_PARAMETER_6.
360 windows.SEC_COMMIT,
361 coff_file.handle,
362 );
363 if (create_section_rc != .SUCCESS) return error.MissingDebugInfo;
364 errdefer windows.CloseHandle(section_handle);
365 var coff_len: usize = 0;
366 var section_view_ptr: ?[*]const u8 = null;
367 const map_section_rc = windows.ntdll.NtMapViewOfSection(
368 section_handle,
369 process_handle,
370 @ptrCast(§ion_view_ptr),
371 null,
372 0,
373 null,
374 &coff_len,
375 .ViewUnmap,
376 0,
377 windows.PAGE_READONLY,
378 );
379 if (map_section_rc != .SUCCESS) return error.MissingDebugInfo;
380 errdefer assert(windows.ntdll.NtUnmapViewOfSection(process_handle, @constCast(section_view_ptr.?)) == .SUCCESS);
381 const section_view = section_view_ptr.?[0..coff_len];
382 coff_obj = coff.Coff.init(section_view, false) catch return error.InvalidDebugInfo;
383 break :mapped .{
384 .file = .adaptFromNewApi(coff_file),
385 .section_handle = section_handle,
386 .section_view = section_view,
387 };
388 };
389 errdefer if (mapped_file) |*mf| mf.deinit();
390
391 const coff_image_base = coff_obj.getImageBase();
392
393 var opt_dwarf: ?Dwarf = dwarf: {
394 if (coff_obj.getSectionByName(".debug_info") == null) break :dwarf null;
395
396 var sections: Dwarf.SectionArray = undefined;
397 inline for (@typeInfo(Dwarf.Section.Id).@"enum".fields, 0..) |section, i| {
398 sections[i] = if (coff_obj.getSectionByName("." ++ section.name)) |section_header| .{
399 .data = try coff_obj.getSectionDataAlloc(section_header, arena),
400 .owned = false,
401 } else null;
402 }
403 break :dwarf .{ .sections = sections };
404 };
405 errdefer if (opt_dwarf) |*dwarf| dwarf.deinit(gpa);
406
407 if (opt_dwarf) |*dwarf| {
408 dwarf.open(gpa, native_endian) catch |err| switch (err) {
409 error.Overflow,
410 error.EndOfStream,
411 error.StreamTooLong,
412 error.ReadFailed,
413 => return error.InvalidDebugInfo,
414
415 error.InvalidDebugInfo,
416 error.MissingDebugInfo,
417 error.OutOfMemory,
418 => |e| return e,
419 };
420 }
421
422 var opt_pdb: ?Pdb = pdb: {
423 const path = coff_obj.getPdbPath() catch {
424 return error.InvalidDebugInfo;
425 } orelse {
426 break :pdb null;
427 };
428 const pdb_file_open_result = if (fs.path.isAbsolute(path)) res: {
429 break :res std.fs.cwd().openFile(path, .{});
430 } else res: {
431 const self_dir = fs.selfExeDirPathAlloc(gpa) catch |err| switch (err) {
432 error.OutOfMemory, error.Unexpected => |e| return e,
433 else => return error.ReadFailed,
434 };
435 defer gpa.free(self_dir);
436 const abs_path = try fs.path.join(gpa, &.{ self_dir, path });
437 defer gpa.free(abs_path);
438 break :res std.fs.cwd().openFile(abs_path, .{});
439 };
440 const pdb_file = pdb_file_open_result catch |err| switch (err) {
441 error.FileNotFound, error.IsDir => break :pdb null,
442 else => return error.ReadFailed,
443 };
444 errdefer pdb_file.close();
445
446 const pdb_reader = try arena.create(Io.File.Reader);
447 pdb_reader.* = pdb_file.reader(io, try arena.alloc(u8, 4096));
448
449 var pdb = Pdb.init(gpa, pdb_reader) catch |err| switch (err) {
450 error.OutOfMemory, error.ReadFailed, error.Unexpected => |e| return e,
451 else => return error.InvalidDebugInfo,
452 };
453 errdefer pdb.deinit();
454 pdb.parseInfoStream() catch |err| switch (err) {
455 error.UnknownPDBVersion => return error.UnsupportedDebugInfo,
456 error.EndOfStream => return error.InvalidDebugInfo,
457
458 error.InvalidDebugInfo,
459 error.MissingDebugInfo,
460 error.OutOfMemory,
461 error.ReadFailed,
462 => |e| return e,
463 };
464 pdb.parseDbiStream() catch |err| switch (err) {
465 error.UnknownPDBVersion => return error.UnsupportedDebugInfo,
466
467 error.EndOfStream,
468 error.EOF,
469 error.StreamTooLong,
470 error.WriteFailed,
471 => return error.InvalidDebugInfo,
472
473 error.InvalidDebugInfo,
474 error.OutOfMemory,
475 error.ReadFailed,
476 => |e| return e,
477 };
478
479 if (!std.mem.eql(u8, &coff_obj.guid, &pdb.guid) or coff_obj.age != pdb.age)
480 return error.InvalidDebugInfo;
481
482 break :pdb pdb;
483 };
484 errdefer if (opt_pdb) |*pdb| {
485 pdb.file_reader.file.close(io);
486 pdb.deinit();
487 };
488
489 const coff_section_headers: []coff.SectionHeader = if (opt_pdb != null) csh: {
490 break :csh try coff_obj.getSectionHeadersAlloc(arena);
491 } else &.{};
492
493 return .{
494 .arena = arena_instance.state,
495 .io = io,
496 .coff_image_base = coff_image_base,
497 .mapped_file = mapped_file,
498 .dwarf = opt_dwarf,
499 .pdb = opt_pdb,
500 .coff_section_headers = coff_section_headers,
501 };
502 }
503};
504
505/// Assumes we already hold `si.mutex`.
506fn findModule(si: *SelfInfo, gpa: Allocator, address: usize) error{ MissingDebugInfo, OutOfMemory, Unexpected }!*Module {
507 for (si.modules.items) |*mod| {
508 if (address >= mod.base_address and address < mod.base_address + mod.size) {
509 return mod;
510 }
511 }
512
513 // A new module might have been loaded; rebuild the list.
514 {
515 for (si.modules.items) |*mod| {
516 const di = &(mod.di orelse continue catch continue);
517 di.deinit(gpa);
518 }
519 si.modules.clearRetainingCapacity();
520
521 var module_name_arena = si.module_name_arena.promote(gpa);
522 defer si.module_name_arena = module_name_arena.state;
523 _ = module_name_arena.reset(.retain_capacity);
524
525 const handle = windows.kernel32.CreateToolhelp32Snapshot(windows.TH32CS_SNAPMODULE | windows.TH32CS_SNAPMODULE32, 0);
526 if (handle == windows.INVALID_HANDLE_VALUE) {
527 return windows.unexpectedError(windows.GetLastError());
528 }
529 defer windows.CloseHandle(handle);
530 var entry: windows.MODULEENTRY32 = undefined;
531 entry.dwSize = @sizeOf(windows.MODULEENTRY32);
532 var result = windows.kernel32.Module32First(handle, &entry);
533 while (result != 0) : (result = windows.kernel32.Module32Next(handle, &entry)) {
534 try si.modules.append(gpa, .{
535 .base_address = @intFromPtr(entry.modBaseAddr),
536 .size = entry.modBaseSize,
537 .name = try module_name_arena.allocator().dupe(
538 u8,
539 std.mem.sliceTo(&entry.szModule, 0),
540 ),
541 .handle = entry.hModule,
542 .di = null,
543 });
544 }
545 }
546
547 for (si.modules.items) |*mod| {
548 if (address >= mod.base_address and address < mod.base_address + mod.size) {
549 return mod;
550 }
551 }
552
553 return error.MissingDebugInfo;
554}
555
556const std = @import("std");
557const Io = std.Io;
558const Allocator = std.mem.Allocator;
559const Dwarf = std.debug.Dwarf;
560const Pdb = std.debug.Pdb;
561const Error = std.debug.SelfInfoError;
562const assert = std.debug.assert;
563const coff = std.coff;
564const fs = std.fs;
565const windows = std.os.windows;
566
567const builtin = @import("builtin");
568const native_endian = builtin.target.cpu.arch.endian();
569
570const SelfInfo = @This();