Commit b8d99a3323
Changed files (5)
lib
src
lib/fuzzer.zig
@@ -30,19 +30,6 @@ fn logOverride(
export threadlocal var __sancov_lowest_stack: usize = std.math.maxInt(usize);
-var module_count_8bc: usize = 0;
-var module_count_pcs: usize = 0;
-
-export fn __sanitizer_cov_8bit_counters_init(start: [*]u8, end: [*]u8) void {
- assert(@atomicRmw(usize, &module_count_8bc, .Add, 1, .monotonic) == 0);
- fuzzer.pc_counters = start[0 .. end - start];
-}
-
-export fn __sanitizer_cov_pcs_init(start: [*]const Fuzzer.FlaggedPc, end: [*]const Fuzzer.FlaggedPc) void {
- assert(@atomicRmw(usize, &module_count_pcs, .Add, 1, .monotonic) == 0);
- fuzzer.flagged_pcs = start[0 .. end - start];
-}
-
export fn __sanitizer_cov_trace_const_cmp1(arg1: u8, arg2: u8) void {
handleCmp(@returnAddress(), arg1, arg2);
}
@@ -105,7 +92,7 @@ const Fuzzer = struct {
gpa: Allocator,
rng: std.Random.DefaultPrng,
input: std.ArrayListUnmanaged(u8),
- flagged_pcs: []const FlaggedPc,
+ pcs: []const usize,
pc_counters: []u8,
n_runs: usize,
recent_cases: RunMap,
@@ -174,32 +161,18 @@ const Fuzzer = struct {
}
};
- const FlaggedPc = extern struct {
- addr: usize,
- flags: packed struct(usize) {
- entry: bool,
- _: @Type(.{ .int = .{ .signedness = .unsigned, .bits = @bitSizeOf(usize) - 1 } }),
- },
- };
-
const Analysis = struct {
score: usize,
id: Run.Id,
};
- fn init(f: *Fuzzer, cache_dir: std.fs.Dir) !void {
- const flagged_pcs = f.flagged_pcs;
-
+ fn init(f: *Fuzzer, cache_dir: std.fs.Dir, pc_counters: []u8, pcs: []const usize) !void {
f.cache_dir = cache_dir;
+ f.pc_counters = pc_counters;
+ f.pcs = pcs;
// Choose a file name for the coverage based on a hash of the PCs that will be stored within.
- const pc_digest = d: {
- var hasher = std.hash.Wyhash.init(0);
- for (flagged_pcs) |flagged_pc| {
- hasher.update(std.mem.asBytes(&flagged_pc.addr));
- }
- break :d f.coverage.run_id_hasher.final();
- };
+ const pc_digest = std.hash.Wyhash.hash(0, std.mem.sliceAsBytes(pcs));
f.coverage_id = pc_digest;
const hex_digest = std.fmt.hex(pc_digest);
const coverage_file_path = "v/" ++ hex_digest;
@@ -213,12 +186,12 @@ const Fuzzer = struct {
.truncate = false,
});
defer coverage_file.close();
- const n_bitset_elems = (flagged_pcs.len + @bitSizeOf(usize) - 1) / @bitSizeOf(usize);
+ const n_bitset_elems = (pcs.len + @bitSizeOf(usize) - 1) / @bitSizeOf(usize);
comptime assert(SeenPcsHeader.trailing[0] == .pc_bits_usize);
comptime assert(SeenPcsHeader.trailing[1] == .pc_addr);
const bytes_len = @sizeOf(SeenPcsHeader) +
n_bitset_elems * @sizeOf(usize) +
- flagged_pcs.len * @sizeOf(usize);
+ pcs.len * @sizeOf(usize);
const existing_len = coverage_file.getEndPos() catch |err| {
fatal("unable to check len of coverage file: {s}", .{@errorName(err)});
};
@@ -233,12 +206,12 @@ const Fuzzer = struct {
fatal("unable to init coverage memory map: {s}", .{@errorName(err)});
};
if (existing_len != 0) {
- const existing_pcs_bytes = f.seen_pcs.items[@sizeOf(SeenPcsHeader) + @sizeOf(usize) * n_bitset_elems ..][0 .. flagged_pcs.len * @sizeOf(usize)];
+ const existing_pcs_bytes = f.seen_pcs.items[@sizeOf(SeenPcsHeader) + @sizeOf(usize) * n_bitset_elems ..][0 .. pcs.len * @sizeOf(usize)];
const existing_pcs = std.mem.bytesAsSlice(usize, existing_pcs_bytes);
- for (existing_pcs, flagged_pcs, 0..) |old, new, i| {
- if (old != new.addr) {
+ for (existing_pcs, pcs, 0..) |old, new, i| {
+ if (old != new) {
fatal("incompatible existing coverage file (differing PC at index {d}: {x} != {x})", .{
- i, old, new.addr,
+ i, old, new,
});
}
}
@@ -246,14 +219,12 @@ const Fuzzer = struct {
const header: SeenPcsHeader = .{
.n_runs = 0,
.unique_runs = 0,
- .pcs_len = flagged_pcs.len,
+ .pcs_len = pcs.len,
.lowest_stack = std.math.maxInt(usize),
};
f.seen_pcs.appendSliceAssumeCapacity(std.mem.asBytes(&header));
f.seen_pcs.appendNTimesAssumeCapacity(0, n_bitset_elems * @sizeOf(usize));
- for (flagged_pcs) |flagged_pc| {
- f.seen_pcs.appendSliceAssumeCapacity(std.mem.asBytes(&flagged_pc.addr));
- }
+ f.seen_pcs.appendSliceAssumeCapacity(std.mem.sliceAsBytes(pcs));
}
}
@@ -307,8 +278,8 @@ const Fuzzer = struct {
// Track code coverage from all runs.
comptime assert(SeenPcsHeader.trailing[0] == .pc_bits_usize);
const header_end_ptr: [*]volatile usize = @ptrCast(f.seen_pcs.items[@sizeOf(SeenPcsHeader)..]);
- const remainder = f.flagged_pcs.len % @bitSizeOf(usize);
- const aligned_len = f.flagged_pcs.len - remainder;
+ const remainder = f.pcs.len % @bitSizeOf(usize);
+ const aligned_len = f.pcs.len - remainder;
const seen_pcs = header_end_ptr[0..aligned_len];
const pc_counters = std.mem.bytesAsSlice([@bitSizeOf(usize)]u8, f.pc_counters[0..aligned_len]);
const V = @Vector(@bitSizeOf(usize), u8);
@@ -433,7 +404,7 @@ var fuzzer: Fuzzer = .{
.gpa = general_purpose_allocator.allocator(),
.rng = std.Random.DefaultPrng.init(0),
.input = .{},
- .flagged_pcs = undefined,
+ .pcs = undefined,
.pc_counters = undefined,
.n_runs = 0,
.recent_cases = .{},
@@ -455,8 +426,32 @@ export fn fuzzer_next() Fuzzer.Slice {
}
export fn fuzzer_init(cache_dir_struct: Fuzzer.Slice) void {
- if (module_count_8bc == 0) fatal("__sanitizer_cov_8bit_counters_init was never called", .{});
- if (module_count_pcs == 0) fatal("__sanitizer_cov_pcs_init was never called", .{});
+ // Linkers are expected to automatically add `__start_<section>` and
+ // `__stop_<section>` symbols when section names are valid C identifiers.
+
+ const pc_counters_start = @extern([*]u8, .{
+ .name = "__start___sancov_cntrs",
+ .linkage = .weak,
+ }) orelse fatal("missing __start___sancov_cntrs symbol");
+
+ const pc_counters_end = @extern([*]u8, .{
+ .name = "__stop___sancov_cntrs",
+ .linkage = .weak,
+ }) orelse fatal("missing __stop___sancov_cntrs symbol");
+
+ const pc_counters = pc_counters_start[0 .. pc_counters_end - pc_counters_start];
+
+ const pcs_start = @extern([*]usize, .{
+ .name = "__start___sancov_pcs1",
+ .linkage = .weak,
+ }) orelse fatal("missing __start___sancov_pcs1 symbol");
+
+ const pcs_end = @extern([*]usize, .{
+ .name = "__stop___sancov_pcs1",
+ .linkage = .weak,
+ }) orelse fatal("missing __stop___sancov_pcs1 symbol");
+
+ const pcs = pcs_start[0 .. pcs_end - pcs_start];
const cache_dir_path = cache_dir_struct.toZig();
const cache_dir = if (cache_dir_path.len == 0)
@@ -466,7 +461,8 @@ export fn fuzzer_init(cache_dir_struct: Fuzzer.Slice) void {
fatal("unable to open fuzz directory '{s}': {s}", .{ cache_dir_path, @errorName(err) });
};
- fuzzer.init(cache_dir) catch |err| fatal("unable to init fuzzer: {s}", .{@errorName(err)});
+ fuzzer.init(cache_dir, pc_counters, pcs) catch |err|
+ fatal("unable to init fuzzer: {s}", .{@errorName(err)});
}
/// Like `std.ArrayListUnmanaged(u8)` but backed by memory mapping.
src/codegen/llvm.zig
@@ -1275,7 +1275,7 @@ pub const Object = struct {
.is_small = options.is_small,
.time_report = options.time_report,
.tsan = options.sanitize_thread,
- .sancov = options.fuzz,
+ .sancov = sanCovPassEnabled(comp.config.san_cov_trace_pc_guard),
.lto = options.lto,
.asm_filename = null,
.bin_filename = options.bin_path,
@@ -1283,19 +1283,19 @@ pub const Object = struct {
.bitcode_filename = null,
.coverage = .{
.CoverageType = .Edge,
- .IndirectCalls = true,
+ .IndirectCalls = false,
.TraceBB = false,
- .TraceCmp = true,
+ .TraceCmp = false,
.TraceDiv = false,
.TraceGep = false,
.Use8bitCounters = false,
.TracePC = false,
.TracePCGuard = comp.config.san_cov_trace_pc_guard,
- .Inline8bitCounters = true,
+ .Inline8bitCounters = false,
.InlineBoolFlag = false,
- .PCTable = true,
+ .PCTable = false,
.NoPrune = false,
- .StackDepth = true,
+ .StackDepth = false,
.TraceLoads = false,
.TraceStores = false,
.CollectControlFlow = false,
@@ -1655,6 +1655,25 @@ pub const Object = struct {
break :debug_info .{ file, subprogram };
} else .{.none} ** 2;
+ const fuzz: ?FuncGen.Fuzz = f: {
+ if (!owner_mod.fuzz) break :f null;
+ if (func_analysis.disable_instrumentation) break :f null;
+ if (is_naked) break :f null;
+
+ // The void type used here is a placeholder to be replaced with an
+ // array of the appropriate size after the POI count is known.
+
+ const counters_variable = try o.builder.addVariable(.empty, .void, .default);
+ counters_variable.setLinkage(.private, &o.builder);
+ counters_variable.setAlignment(comptime Builder.Alignment.fromByteUnits(1), &o.builder);
+ counters_variable.setSection(try o.builder.string("__sancov_cntrs"), &o.builder);
+
+ break :f .{
+ .counters_variable = counters_variable,
+ .pcs = .{},
+ };
+ };
+
var fg: FuncGen = .{
.gpa = gpa,
.air = air,
@@ -1662,6 +1681,7 @@ pub const Object = struct {
.ng = &ng,
.wip = wip,
.is_naked = fn_info.cc == .Naked,
+ .fuzz = fuzz,
.ret_ptr = ret_ptr,
.args = args.items,
.arg_index = 0,
@@ -1679,7 +1699,7 @@ pub const Object = struct {
defer fg.deinit();
deinit_wip = false;
- fg.genBody(air.getMainBody()) catch |err| switch (err) {
+ fg.genBody(air.getMainBody(), .poi) catch |err| switch (err) {
error.CodegenFail => {
try zcu.failed_codegen.put(zcu.gpa, func.owner_nav, ng.err_msg.?);
ng.err_msg = null;
@@ -1688,6 +1708,24 @@ pub const Object = struct {
else => |e| return e,
};
+ if (fg.fuzz) |*f| {
+ {
+ const array_llvm_ty = try o.builder.arrayType(f.pcs.items.len, .i8);
+ f.counters_variable.ptrConst(&o.builder).global.ptr(&o.builder).type = array_llvm_ty;
+ const zero_init = try o.builder.zeroInitConst(array_llvm_ty);
+ try f.counters_variable.setInitializer(zero_init, &o.builder);
+ }
+
+ const array_llvm_ty = try o.builder.arrayType(f.pcs.items.len, .ptr);
+ const init_val = try o.builder.arrayConst(array_llvm_ty, f.pcs.items);
+ const pcs_variable = try o.builder.addVariable(.empty, array_llvm_ty, .default);
+ pcs_variable.setLinkage(.private, &o.builder);
+ pcs_variable.setMutability(.constant, &o.builder);
+ pcs_variable.setAlignment(Type.usize.abiAlignment(zcu).toLlvm(), &o.builder);
+ pcs_variable.setSection(try o.builder.string("__sancov_pcs1"), &o.builder);
+ try pcs_variable.setInitializer(init_val, &o.builder);
+ }
+
try fg.wip.finish();
}
@@ -4729,6 +4767,7 @@ pub const FuncGen = struct {
liveness: Liveness,
wip: Builder.WipFunction,
is_naked: bool,
+ fuzz: ?Fuzz,
file: Builder.Metadata,
scope: Builder.Metadata,
@@ -4769,6 +4808,16 @@ pub const FuncGen = struct {
sync_scope: Builder.SyncScope,
+ const Fuzz = struct {
+ counters_variable: Builder.Variable.Index,
+ pcs: std.ArrayListUnmanaged(Builder.Constant),
+
+ fn deinit(f: *Fuzz, gpa: Allocator) void {
+ f.pcs.deinit(gpa);
+ f.* = undefined;
+ }
+ };
+
const BreakList = union {
list: std.MultiArrayList(struct {
bb: Builder.Function.Block.Index,
@@ -4778,9 +4827,11 @@ pub const FuncGen = struct {
};
fn deinit(self: *FuncGen) void {
+ const gpa = self.gpa;
+ if (self.fuzz) |*f| f.deinit(self.gpa);
self.wip.deinit();
- self.func_inst_table.deinit(self.gpa);
- self.blocks.deinit(self.gpa);
+ self.func_inst_table.deinit(gpa);
+ self.blocks.deinit(gpa);
}
fn todo(self: *FuncGen, comptime format: []const u8, args: anytype) Error {
@@ -4836,11 +4887,33 @@ pub const FuncGen = struct {
return o.null_opt_usize;
}
- fn genBody(self: *FuncGen, body: []const Air.Inst.Index) Error!void {
+ fn genBody(self: *FuncGen, body: []const Air.Inst.Index, coverage_point: Air.CoveragePoint) Error!void {
const o = self.ng.object;
const zcu = o.pt.zcu;
const ip = &zcu.intern_pool;
const air_tags = self.air.instructions.items(.tag);
+ switch (coverage_point) {
+ .none => {},
+ .poi => if (self.fuzz) |*fuzz| {
+ const poi_index = fuzz.pcs.items.len;
+ const base_ptr = fuzz.counters_variable.toValue(&o.builder);
+ const ptr = if (poi_index == 0) base_ptr else try self.wip.gep(.inbounds, .i8, base_ptr, &.{
+ try o.builder.intValue(.i32, poi_index),
+ }, "");
+ const counter = try self.wip.load(.normal, .i8, ptr, .default, "");
+ const one = try o.builder.intValue(.i8, 1);
+ const counter_incremented = try self.wip.bin(.add, counter, one, "");
+ _ = try self.wip.store(.normal, counter_incremented, ptr, .default);
+
+ // LLVM does not allow blockaddress on the entry block.
+ const pc = if (self.wip.cursor.block == .entry)
+ self.wip.function.toConst(&o.builder)
+ else
+ try o.builder.blockAddrConst(self.wip.function, self.wip.cursor.block);
+ const gpa = self.gpa;
+ try fuzz.pcs.append(gpa, pc);
+ },
+ }
for (body, 0..) |inst, i| {
if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) continue;
@@ -4949,7 +5022,7 @@ pub const FuncGen = struct {
.ret_ptr => try self.airRetPtr(inst),
.arg => try self.airArg(inst),
.bitcast => try self.airBitCast(inst),
- .int_from_bool => try self.airIntFromBool(inst),
+ .int_from_bool => try self.airIntFromBool(inst),
.block => try self.airBlock(inst),
.br => try self.airBr(inst),
.switch_br => try self.airSwitchBr(inst),
@@ -4966,7 +5039,7 @@ pub const FuncGen = struct {
.trunc => try self.airTrunc(inst),
.fptrunc => try self.airFptrunc(inst),
.fpext => try self.airFpext(inst),
- .int_from_ptr => try self.airIntFromPtr(inst),
+ .int_from_ptr => try self.airIntFromPtr(inst),
.load => try self.airLoad(body[i..]),
.loop => try self.airLoop(inst),
.not => try self.airNot(inst),
@@ -5089,8 +5162,13 @@ pub const FuncGen = struct {
}
}
- fn genBodyDebugScope(self: *FuncGen, maybe_inline_func: ?InternPool.Index, body: []const Air.Inst.Index) Error!void {
- if (self.wip.strip) return self.genBody(body);
+ fn genBodyDebugScope(
+ self: *FuncGen,
+ maybe_inline_func: ?InternPool.Index,
+ body: []const Air.Inst.Index,
+ coverage_point: Air.CoveragePoint,
+ ) Error!void {
+ if (self.wip.strip) return self.genBody(body, coverage_point);
const old_file = self.file;
const old_inlined = self.inlined;
@@ -5137,7 +5215,8 @@ pub const FuncGen = struct {
.sp_flags = .{
.Optimized = mod.optimize_mode != .Debug,
.Definition = true,
- .LocalToUnit = true, // TODO: we can't know this at this point, since the function could be exported later!
+ // TODO: we can't know this at this point, since the function could be exported later!
+ .LocalToUnit = true,
},
},
o.debug_compile_unit,
@@ -5171,7 +5250,7 @@ pub const FuncGen = struct {
.no_location => {},
};
- try self.genBody(body);
+ try self.genBody(body, coverage_point);
}
pub const CallAttr = enum {
@@ -5881,7 +5960,7 @@ pub const FuncGen = struct {
const inst_ty = self.typeOfIndex(inst);
if (inst_ty.isNoReturn(zcu)) {
- try self.genBodyDebugScope(maybe_inline_func, body);
+ try self.genBodyDebugScope(maybe_inline_func, body, .none);
return .none;
}
@@ -5897,7 +5976,7 @@ pub const FuncGen = struct {
});
defer assert(self.blocks.remove(inst));
- try self.genBodyDebugScope(maybe_inline_func, body);
+ try self.genBodyDebugScope(maybe_inline_func, body, .none);
self.wip.cursor = .{ .block = parent_bb };
@@ -5996,11 +6075,11 @@ pub const FuncGen = struct {
self.wip.cursor = .{ .block = then_block };
if (hint == .then_cold) _ = try self.wip.callIntrinsicAssumeCold();
- try self.genBodyDebugScope(null, then_body);
+ try self.genBodyDebugScope(null, then_body, extra.data.branch_hints.then_cov);
self.wip.cursor = .{ .block = else_block };
if (hint == .else_cold) _ = try self.wip.callIntrinsicAssumeCold();
- try self.genBodyDebugScope(null, else_body);
+ try self.genBodyDebugScope(null, else_body, extra.data.branch_hints.else_cov);
// No need to reset the insert cursor since this instruction is noreturn.
return .none;
@@ -6085,7 +6164,7 @@ pub const FuncGen = struct {
fg.wip.cursor = .{ .block = return_block };
if (err_cold) _ = try fg.wip.callIntrinsicAssumeCold();
- try fg.genBodyDebugScope(null, body);
+ try fg.genBodyDebugScope(null, body, .poi);
fg.wip.cursor = .{ .block = continue_block };
}
@@ -6196,14 +6275,14 @@ pub const FuncGen = struct {
}
self.wip.cursor = .{ .block = case_block };
if (switch_br.getHint(case.idx) == .cold) _ = try self.wip.callIntrinsicAssumeCold();
- try self.genBodyDebugScope(null, case.body);
+ try self.genBodyDebugScope(null, case.body, .poi);
}
const else_body = it.elseBody();
self.wip.cursor = .{ .block = else_block };
if (switch_br.getElseHint() == .cold) _ = try self.wip.callIntrinsicAssumeCold();
if (else_body.len != 0) {
- try self.genBodyDebugScope(null, else_body);
+ try self.genBodyDebugScope(null, else_body, .poi);
} else {
_ = try self.wip.@"unreachable"();
}
@@ -6222,7 +6301,7 @@ pub const FuncGen = struct {
_ = try self.wip.br(loop_block);
self.wip.cursor = .{ .block = loop_block };
- try self.genBodyDebugScope(null, body);
+ try self.genBodyDebugScope(null, body, .none);
// TODO instead of this logic, change AIR to have the property that
// every block is guaranteed to end with a noreturn instruction.
@@ -12194,3 +12273,7 @@ pub fn initializeLLVMTarget(arch: std.Target.Cpu.Arch) void {
=> unreachable,
}
}
+
+fn sanCovPassEnabled(trace_pc_guard: bool) bool {
+ return trace_pc_guard;
+}
src/Air.zig
@@ -1126,7 +1126,9 @@ pub const CondBr = struct {
pub const BranchHints = packed struct(u32) {
true: std.builtin.BranchHint,
false: std.builtin.BranchHint,
- _: u26 = 0,
+ then_cov: CoveragePoint,
+ else_cov: CoveragePoint,
+ _: u24 = 0,
};
};
@@ -1903,3 +1905,12 @@ pub fn unwrapSwitch(air: *const Air, switch_inst: Inst.Index) UnwrappedSwitch {
pub const typesFullyResolved = types_resolved.typesFullyResolved;
pub const typeFullyResolved = types_resolved.checkType;
pub const valFullyResolved = types_resolved.checkVal;
+
+pub const CoveragePoint = enum(u1) {
+ /// Indicates the block is not a place of interest corresponding to
+ /// a source location for coverage purposes.
+ none,
+ /// Point of interest. The next instruction emitted corresponds to
+ /// a source location used for coverage instrumentation.
+ poi,
+};
src/print_air.zig
@@ -795,6 +795,9 @@ const Writer = struct {
if (extra.data.branch_hints.true != .none) {
try s.print(" {s}", .{@tagName(extra.data.branch_hints.true)});
}
+ if (extra.data.branch_hints.then_cov != .none) {
+ try s.print(" {s}", .{@tagName(extra.data.branch_hints.then_cov)});
+ }
try s.writeAll(" {\n");
const old_indent = w.indent;
w.indent += 2;
@@ -814,6 +817,9 @@ const Writer = struct {
if (extra.data.branch_hints.false != .none) {
try s.print(" {s}", .{@tagName(extra.data.branch_hints.false)});
}
+ if (extra.data.branch_hints.else_cov != .none) {
+ try s.print(" {s}", .{@tagName(extra.data.branch_hints.else_cov)});
+ }
try s.writeAll(" {\n");
if (liveness_condbr.else_deaths.len != 0) {
src/Sema.zig
@@ -6898,8 +6898,14 @@ fn popErrorReturnTrace(
.payload = sema.addExtraAssumeCapacity(Air.CondBr{
.then_body_len = @intCast(then_block.instructions.items.len),
.else_body_len = @intCast(else_block.instructions.items.len),
- // weight against error branch
- .branch_hints = .{ .true = .likely, .false = .unlikely },
+ .branch_hints = .{
+ // Weight against error branch.
+ .true = .likely,
+ .false = .unlikely,
+ // Code coverage is not valuable on either branch.
+ .then_cov = .none,
+ .else_cov = .none,
+ },
}),
},
},
@@ -11796,14 +11802,22 @@ fn zirSwitchBlockErrUnion(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Comp
_ = try child_block.addInst(.{
.tag = .cond_br,
- .data = .{ .pl_op = .{
- .operand = cond,
- .payload = sema.addExtraAssumeCapacity(Air.CondBr{
- .then_body_len = @intCast(true_instructions.len),
- .else_body_len = @intCast(sub_block.instructions.items.len),
- .branch_hints = .{ .true = non_error_hint, .false = .none },
- }),
- } },
+ .data = .{
+ .pl_op = .{
+ .operand = cond,
+ .payload = sema.addExtraAssumeCapacity(Air.CondBr{
+ .then_body_len = @intCast(true_instructions.len),
+ .else_body_len = @intCast(sub_block.instructions.items.len),
+ .branch_hints = .{
+ .true = non_error_hint,
+ .false = .none,
+ // Code coverage is desired for error handling.
+ .then_cov = .poi,
+ .else_cov = .poi,
+ },
+ }),
+ },
+ },
});
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(true_instructions));
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(sub_block.instructions.items));
@@ -12853,7 +12867,13 @@ fn analyzeSwitchRuntimeBlock(
sema.air_instructions.items(.data)[@intFromEnum(prev_cond_br)].pl_op.payload = sema.addExtraAssumeCapacity(Air.CondBr{
.then_body_len = @intCast(prev_then_body.len),
.else_body_len = @intCast(cond_body.len),
- .branch_hints = .{ .true = prev_hint, .false = .none },
+ .branch_hints = .{
+ .true = prev_hint,
+ .false = .none,
+ // Code coverage is desired for error handling.
+ .then_cov = .poi,
+ .else_cov = .poi,
+ },
});
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(prev_then_body));
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(cond_body));
@@ -13133,7 +13153,12 @@ fn analyzeSwitchRuntimeBlock(
sema.air_instructions.items(.data)[@intFromEnum(prev_cond_br)].pl_op.payload = sema.addExtraAssumeCapacity(Air.CondBr{
.then_body_len = @intCast(prev_then_body.len),
.else_body_len = @intCast(case_block.instructions.items.len),
- .branch_hints = .{ .true = prev_hint, .false = else_hint },
+ .branch_hints = .{
+ .true = prev_hint,
+ .false = else_hint,
+ .then_cov = .poi,
+ .else_cov = .poi,
+ },
});
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(prev_then_body));
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items));
@@ -19250,7 +19275,17 @@ fn zirBoolBr(
&else_block,
lhs,
block_inst,
- if (is_bool_or) .{ .true = .none, .false = rhs_hint } else .{ .true = rhs_hint, .false = .none },
+ if (is_bool_or) .{
+ .true = .none,
+ .false = rhs_hint,
+ .then_cov = .poi,
+ .else_cov = .poi,
+ } else .{
+ .true = rhs_hint,
+ .false = .none,
+ .then_cov = .poi,
+ .else_cov = .poi,
+ },
);
if (!rhs_noret) {
if (try sema.resolveDefinedValue(rhs_block, rhs_src, coerced_rhs_result)) |rhs_val| {
@@ -19467,14 +19502,22 @@ fn zirCondbr(
true_instructions.len + sub_block.instructions.items.len);
_ = try parent_block.addInst(.{
.tag = .cond_br,
- .data = .{ .pl_op = .{
- .operand = cond,
- .payload = sema.addExtraAssumeCapacity(Air.CondBr{
- .then_body_len = @intCast(true_instructions.len),
- .else_body_len = @intCast(sub_block.instructions.items.len),
- .branch_hints = .{ .true = true_hint, .false = false_hint },
- }),
- } },
+ .data = .{
+ .pl_op = .{
+ .operand = cond,
+ .payload = sema.addExtraAssumeCapacity(Air.CondBr{
+ .then_body_len = @intCast(true_instructions.len),
+ .else_body_len = @intCast(sub_block.instructions.items.len),
+ .branch_hints = .{
+ .true = true_hint,
+ .false = false_hint,
+ // Code coverage is desired for error handling.
+ .then_cov = .poi,
+ .else_cov = .poi,
+ },
+ }),
+ },
+ },
});
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(true_instructions));
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(sub_block.instructions.items));
@@ -19851,8 +19894,14 @@ fn retWithErrTracing(
const cond_br_payload = sema.addExtraAssumeCapacity(Air.CondBr{
.then_body_len = @intCast(then_block.instructions.items.len),
.else_body_len = @intCast(else_block.instructions.items.len),
- // weight against error branch
- .branch_hints = .{ .true = .likely, .false = .unlikely },
+ .branch_hints = .{
+ // Weight against error branch.
+ .true = .likely,
+ .false = .unlikely,
+ // Code coverage is not valuable on either branch.
+ .then_cov = .none,
+ .else_cov = .none,
+ },
});
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(then_block.instructions.items));
sema.air_extra.appendSliceAssumeCapacity(@ptrCast(else_block.instructions.items));
@@ -27473,8 +27522,14 @@ fn addSafetyCheckExtra(
.payload = sema.addExtraAssumeCapacity(Air.CondBr{
.then_body_len = 1,
.else_body_len = @intCast(fail_block.instructions.items.len),
- // safety check failure branch is cold
- .branch_hints = .{ .true = .likely, .false = .cold },
+ .branch_hints = .{
+ // Safety check failure branch is cold.
+ .true = .likely,
+ .false = .cold,
+ // Code coverage not wanted for panic branches.
+ .then_cov = .none,
+ .else_cov = .none,
+ },
}),
},
},