master
  1const std = @import("../std.zig");
  2const Allocator = std.mem.Allocator;
  3const Hash = std.hash.Wyhash;
  4const Dwarf = std.debug.Dwarf;
  5const assert = std.debug.assert;
  6
  7const Coverage = @This();
  8
  9/// Provides a globally-scoped integer index for directories.
 10///
 11/// As opposed to, for example, a directory index that is compilation-unit
 12/// scoped inside a single ELF module.
 13///
 14/// String memory references the memory-mapped debug information.
 15///
 16/// Protected by `mutex`.
 17directories: std.ArrayHashMapUnmanaged(String, void, String.MapContext, false),
 18/// Provides a globally-scoped integer index for files.
 19///
 20/// String memory references the memory-mapped debug information.
 21///
 22/// Protected by `mutex`.
 23files: std.ArrayHashMapUnmanaged(File, void, File.MapContext, false),
 24string_bytes: std.ArrayList(u8),
 25/// Protects the other fields.
 26mutex: std.Thread.Mutex,
 27
 28pub const init: Coverage = .{
 29    .directories = .{},
 30    .files = .{},
 31    .mutex = .{},
 32    .string_bytes = .{},
 33};
 34
 35pub const String = enum(u32) {
 36    _,
 37
 38    pub const MapContext = struct {
 39        string_bytes: []const u8,
 40
 41        pub fn eql(self: @This(), a: String, b: String, b_index: usize) bool {
 42            _ = b_index;
 43            const a_slice = span(self.string_bytes[@intFromEnum(a)..]);
 44            const b_slice = span(self.string_bytes[@intFromEnum(b)..]);
 45            return std.mem.eql(u8, a_slice, b_slice);
 46        }
 47
 48        pub fn hash(self: @This(), a: String) u32 {
 49            return @truncate(Hash.hash(0, span(self.string_bytes[@intFromEnum(a)..])));
 50        }
 51    };
 52
 53    pub const SliceAdapter = struct {
 54        string_bytes: []const u8,
 55
 56        pub fn eql(self: @This(), a_slice: []const u8, b: String, b_index: usize) bool {
 57            _ = b_index;
 58            const b_slice = span(self.string_bytes[@intFromEnum(b)..]);
 59            return std.mem.eql(u8, a_slice, b_slice);
 60        }
 61        pub fn hash(self: @This(), a: []const u8) u32 {
 62            _ = self;
 63            return @truncate(Hash.hash(0, a));
 64        }
 65    };
 66};
 67
 68pub const SourceLocation = extern struct {
 69    file: File.Index,
 70    line: u32,
 71    column: u32,
 72
 73    pub const invalid: SourceLocation = .{
 74        .file = .invalid,
 75        .line = 0,
 76        .column = 0,
 77    };
 78};
 79
 80pub const File = extern struct {
 81    directory_index: u32,
 82    basename: String,
 83
 84    pub const Index = enum(u32) {
 85        invalid = std.math.maxInt(u32),
 86        _,
 87    };
 88
 89    pub const MapContext = struct {
 90        string_bytes: []const u8,
 91
 92        pub fn hash(self: MapContext, a: File) u32 {
 93            const a_basename = span(self.string_bytes[@intFromEnum(a.basename)..]);
 94            return @truncate(Hash.hash(a.directory_index, a_basename));
 95        }
 96
 97        pub fn eql(self: MapContext, a: File, b: File, b_index: usize) bool {
 98            _ = b_index;
 99            if (a.directory_index != b.directory_index) return false;
100            const a_basename = span(self.string_bytes[@intFromEnum(a.basename)..]);
101            const b_basename = span(self.string_bytes[@intFromEnum(b.basename)..]);
102            return std.mem.eql(u8, a_basename, b_basename);
103        }
104    };
105
106    pub const SliceAdapter = struct {
107        string_bytes: []const u8,
108
109        pub const Entry = struct {
110            directory_index: u32,
111            basename: []const u8,
112        };
113
114        pub fn hash(self: @This(), a: Entry) u32 {
115            _ = self;
116            return @truncate(Hash.hash(a.directory_index, a.basename));
117        }
118
119        pub fn eql(self: @This(), a: Entry, b: File, b_index: usize) bool {
120            _ = b_index;
121            if (a.directory_index != b.directory_index) return false;
122            const b_basename = span(self.string_bytes[@intFromEnum(b.basename)..]);
123            return std.mem.eql(u8, a.basename, b_basename);
124        }
125    };
126};
127
128pub fn deinit(cov: *Coverage, gpa: Allocator) void {
129    cov.directories.deinit(gpa);
130    cov.files.deinit(gpa);
131    cov.string_bytes.deinit(gpa);
132    cov.* = undefined;
133}
134
135pub fn fileAt(cov: *Coverage, index: File.Index) *File {
136    return &cov.files.keys()[@intFromEnum(index)];
137}
138
139pub fn stringAt(cov: *Coverage, index: String) [:0]const u8 {
140    return span(cov.string_bytes.items[@intFromEnum(index)..]);
141}
142
143pub const ResolveAddressesDwarfError = Dwarf.ScanError;
144
145pub fn resolveAddressesDwarf(
146    cov: *Coverage,
147    gpa: Allocator,
148    endian: std.builtin.Endian,
149    /// Asserts the addresses are in ascending order.
150    sorted_pc_addrs: []const u64,
151    /// Asserts its length equals length of `sorted_pc_addrs`.
152    output: []SourceLocation,
153    d: *Dwarf,
154) ResolveAddressesDwarfError!void {
155    assert(sorted_pc_addrs.len == output.len);
156    assert(d.ranges.items.len != 0); // call `populateRanges` first.
157
158    var range_i: usize = 0;
159    var range: *std.debug.Dwarf.Range = &d.ranges.items[0];
160    var line_table_i: usize = undefined;
161    var prev_pc: u64 = 0;
162    var prev_cu: ?*std.debug.Dwarf.CompileUnit = null;
163    // Protects directories and files tables from other threads.
164    cov.mutex.lock();
165    defer cov.mutex.unlock();
166    next_pc: for (sorted_pc_addrs, output) |pc, *out| {
167        assert(pc >= prev_pc);
168        prev_pc = pc;
169
170        while (pc >= range.end) {
171            range_i += 1;
172            if (range_i >= d.ranges.items.len) {
173                out.* = SourceLocation.invalid;
174                continue :next_pc;
175            }
176            range = &d.ranges.items[range_i];
177        }
178        if (pc < range.start) {
179            out.* = SourceLocation.invalid;
180            continue :next_pc;
181        }
182        const cu = &d.compile_unit_list.items[range.compile_unit_index];
183        if (cu != prev_cu) {
184            prev_cu = cu;
185            if (cu.src_loc_cache == null) {
186                cov.mutex.unlock();
187                defer cov.mutex.lock();
188                d.populateSrcLocCache(gpa, endian, cu) catch |err| switch (err) {
189                    error.MissingDebugInfo, error.InvalidDebugInfo => {
190                        out.* = SourceLocation.invalid;
191                        continue :next_pc;
192                    },
193                    else => |e| return e,
194                };
195            }
196            const slc = &cu.src_loc_cache.?;
197            const table_addrs = slc.line_table.keys();
198            line_table_i = std.sort.upperBound(u64, table_addrs, pc, struct {
199                fn order(context: u64, item: u64) std.math.Order {
200                    return std.math.order(context, item);
201                }
202            }.order);
203        }
204        const slc = &cu.src_loc_cache.?;
205        const table_addrs = slc.line_table.keys();
206        while (line_table_i < table_addrs.len and table_addrs[line_table_i] <= pc) line_table_i += 1;
207
208        const entry = slc.line_table.values()[line_table_i - 1];
209        const corrected_file_index = entry.file - @intFromBool(slc.version < 5);
210        const file_entry = slc.files[corrected_file_index];
211        const dir_path = slc.directories[file_entry.dir_index].path;
212        try cov.string_bytes.ensureUnusedCapacity(gpa, dir_path.len + file_entry.path.len + 2);
213        const dir_gop = try cov.directories.getOrPutContextAdapted(gpa, dir_path, String.SliceAdapter{
214            .string_bytes = cov.string_bytes.items,
215        }, String.MapContext{
216            .string_bytes = cov.string_bytes.items,
217        });
218        if (!dir_gop.found_existing)
219            dir_gop.key_ptr.* = addStringAssumeCapacity(cov, dir_path);
220        const file_gop = try cov.files.getOrPutContextAdapted(gpa, File.SliceAdapter.Entry{
221            .directory_index = @intCast(dir_gop.index),
222            .basename = file_entry.path,
223        }, File.SliceAdapter{
224            .string_bytes = cov.string_bytes.items,
225        }, File.MapContext{
226            .string_bytes = cov.string_bytes.items,
227        });
228        if (!file_gop.found_existing) file_gop.key_ptr.* = .{
229            .directory_index = @intCast(dir_gop.index),
230            .basename = addStringAssumeCapacity(cov, file_entry.path),
231        };
232        out.* = .{
233            .file = @enumFromInt(file_gop.index),
234            .line = entry.line,
235            .column = entry.column,
236        };
237    }
238}
239
240pub fn addStringAssumeCapacity(cov: *Coverage, s: []const u8) String {
241    const result: String = @enumFromInt(cov.string_bytes.items.len);
242    cov.string_bytes.appendSliceAssumeCapacity(s);
243    cov.string_bytes.appendAssumeCapacity(0);
244    return result;
245}
246
247fn span(s: []const u8) [:0]const u8 {
248    return std.mem.sliceTo(@as([:0]const u8, @ptrCast(s)), 0);
249}