Commit e17a050bc6
Changed files (5)
src/libs/freebsd.zig
@@ -977,10 +977,6 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
});
}
-pub fn sharedObjectsCount() u8 {
- return libs.len;
-}
-
fn queueSharedObjects(comp: *Compilation, so_files: BuiltSharedObjects) void {
assert(comp.freebsd_so_files == null);
comp.freebsd_so_files = so_files;
src/libs/glibc.zig
@@ -1130,18 +1130,6 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
});
}
-pub fn sharedObjectsCount(target: *const std.Target) u8 {
- const target_version = target.os.versionRange().gnuLibCVersion() orelse return 0;
- var count: u8 = 0;
- for (libs) |lib| {
- if (lib.removed_in) |rem_in| {
- if (target_version.order(rem_in) != .lt) continue;
- }
- count += 1;
- }
- return count;
-}
-
fn queueSharedObjects(comp: *Compilation, so_files: BuiltSharedObjects) void {
const target_version = comp.getTarget().os.versionRange().gnuLibCVersion().?;
src/libs/netbsd.zig
@@ -642,10 +642,6 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
});
}
-pub fn sharedObjectsCount() u8 {
- return libs.len;
-}
-
fn queueSharedObjects(comp: *Compilation, so_files: BuiltSharedObjects) void {
assert(comp.netbsd_so_files == null);
comp.netbsd_so_files = so_files;
src/link/Queue.zig
@@ -16,9 +16,9 @@ mutex: std.Thread.Mutex,
/// Validates that only one `flushTaskQueue` thread is running at a time.
flush_safety: std.debug.SafetyLock,
-/// This is the number of prelink tasks which are expected but have not yet been enqueued.
-/// Guarded by `mutex`.
-pending_prelink_tasks: u32,
+/// This value is positive while there are still prelink tasks yet to be queued. Once they are
+/// all queued, this value becomes 0, and ZCU tasks can be run. Guarded by `mutex`.
+prelink_wait_count: u32,
/// Prelink tasks which have been enqueued and are not yet owned by the worker thread.
/// Allocated into `gpa`, guarded by `mutex`.
@@ -59,7 +59,7 @@ state: union(enum) {
/// The link thread is currently running or queued to run.
running,
/// The link thread is not running or queued, because it has exhausted all immediately available
- /// tasks. It should be spawned when more tasks are enqueued. If `pending_prelink_tasks` is not
+ /// tasks. It should be spawned when more tasks are enqueued. If `prelink_wait_count` is not
/// zero, we are specifically waiting for prelink tasks.
finished,
/// The link thread is not running or queued, because it is waiting for this MIR to be populated.
@@ -73,11 +73,11 @@ state: union(enum) {
const max_air_bytes_in_flight = 10 * 1024 * 1024;
/// The initial `Queue` state, containing no tasks, expecting no prelink tasks, and with no running worker thread.
-/// The `pending_prelink_tasks` and `queued_prelink` fields may be modified as needed before calling `start`.
+/// The `queued_prelink` field may be appended to before calling `start`.
pub const empty: Queue = .{
.mutex = .{},
.flush_safety = .{},
- .pending_prelink_tasks = 0,
+ .prelink_wait_count = undefined, // set in `start`
.queued_prelink = .empty,
.wip_prelink = .empty,
.queued_zcu = .empty,
@@ -100,17 +100,49 @@ pub fn deinit(q: *Queue, comp: *Compilation) void {
}
/// This is expected to be called exactly once, after which the caller must not directly access
-/// `queued_prelink` or `pending_prelink_tasks` any longer. This will spawn the link thread if
-/// necessary.
+/// `queued_prelink` any longer. This will spawn the link thread if necessary.
pub fn start(q: *Queue, comp: *Compilation) void {
assert(q.state == .finished);
assert(q.queued_zcu.items.len == 0);
+ // Reset this to 1. We can't init it to 1 in `empty`, because it would fall to 0 on successive
+ // incremental updates, but we still need the initial 1.
+ q.prelink_wait_count = 1;
if (q.queued_prelink.items.len != 0) {
q.state = .running;
comp.thread_pool.spawnWgId(&comp.link_task_wait_group, flushTaskQueue, .{ q, comp });
}
}
+/// Every call to this must be paired with a call to `finishPrelinkItem`.
+pub fn startPrelinkItem(q: *Queue) void {
+ q.mutex.lock();
+ defer q.mutex.unlock();
+ assert(q.prelink_wait_count > 0); // must not have finished everything already
+ q.prelink_wait_count += 1;
+}
+/// This function must be called exactly one more time than `startPrelinkItem` is. The final call
+/// indicates that we have finished calling `startPrelinkItem`, so once all pending items finish,
+/// we are ready to move on to ZCU tasks.
+pub fn finishPrelinkItem(q: *Queue, comp: *Compilation) void {
+ {
+ q.mutex.lock();
+ defer q.mutex.unlock();
+ q.prelink_wait_count -= 1;
+ if (q.prelink_wait_count != 0) return;
+ // The prelink task count dropped to 0; restart the linker thread if necessary.
+ switch (q.state) {
+ .wait_for_mir => unreachable, // we've not started zcu tasks yet
+ .running => return,
+ .finished => {},
+ }
+ assert(q.queued_prelink.items.len == 0);
+ // Even if there are no ZCU tasks, we must restart the linker thread to make sure
+ // that `link.File.prelink()` is called.
+ q.state = .running;
+ }
+ comp.thread_pool.spawnWgId(&comp.link_task_wait_group, flushTaskQueue, .{ q, comp });
+}
+
/// Called by codegen workers after they have populated a `ZcuTask.LinkFunc.SharedMir`. If the link
/// thread was waiting for this MIR, it can resume.
pub fn mirReady(q: *Queue, comp: *Compilation, func_index: InternPool.Index, mir: *ZcuTask.LinkFunc.SharedMir) void {
@@ -130,14 +162,14 @@ pub fn mirReady(q: *Queue, comp: *Compilation, func_index: InternPool.Index, mir
comp.thread_pool.spawnWgId(&comp.link_task_wait_group, flushTaskQueue, .{ q, comp });
}
-/// Enqueues all prelink tasks in `tasks`. Asserts that they were expected, i.e. that `tasks.len` is
-/// less than or equal to `q.pending_prelink_tasks`. Also asserts that `tasks.len` is not 0.
+/// Enqueues all prelink tasks in `tasks`. Asserts that they were expected, i.e. that
+/// `prelink_wait_count` is not yet 0. Also asserts that `tasks.len` is not 0.
pub fn enqueuePrelink(q: *Queue, comp: *Compilation, tasks: []const PrelinkTask) Allocator.Error!void {
{
q.mutex.lock();
defer q.mutex.unlock();
+ assert(q.prelink_wait_count > 0);
try q.queued_prelink.appendSlice(comp.gpa, tasks);
- q.pending_prelink_tasks -= @intCast(tasks.len);
switch (q.state) {
.wait_for_mir => unreachable, // we've not started zcu tasks yet
.running => return,
@@ -167,7 +199,7 @@ pub fn enqueueZcu(q: *Queue, comp: *Compilation, task: ZcuTask) Allocator.Error!
try q.queued_zcu.append(comp.gpa, task);
switch (q.state) {
.running, .wait_for_mir => return,
- .finished => if (q.pending_prelink_tasks != 0) return,
+ .finished => if (q.prelink_wait_count > 0) return,
}
// Restart the linker thread, unless it would immediately be blocked
if (task == .link_func and task.link_func.mir.status.load(.acquire) == .pending) {
@@ -194,7 +226,7 @@ fn flushTaskQueue(tid: usize, q: *Queue, comp: *Compilation) void {
defer q.mutex.unlock();
std.mem.swap(std.ArrayListUnmanaged(PrelinkTask), &q.queued_prelink, &q.wip_prelink);
if (q.wip_prelink.items.len == 0) {
- if (q.pending_prelink_tasks == 0) {
+ if (q.prelink_wait_count == 0) {
break :prelink; // prelink is done
} else {
// We're expecting more prelink tasks so can't move on to ZCU tasks.
src/Compilation.zig
@@ -2384,7 +2384,6 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
};
comp.c_object_table.putAssumeCapacityNoClobber(c_object, {});
}
- comp.link_task_queue.pending_prelink_tasks += @intCast(comp.c_object_table.count());
// Add a `Win32Resource` for each `rc_source_files` and one for `manifest_file`.
const win32_resource_count =
@@ -2392,10 +2391,6 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
if (win32_resource_count > 0) {
dev.check(.win32_resource);
try comp.win32_resource_table.ensureTotalCapacity(gpa, win32_resource_count);
- // Add this after adding logic to updateWin32Resource to pass the
- // result into link.loadInput. loadInput integration is not implemented
- // for Windows linking logic yet.
- //comp.link_task_queue.pending_prelink_tasks += @intCast(win32_resource_count);
for (options.rc_source_files) |rc_source_file| {
const win32_resource = try gpa.create(Win32Resource);
errdefer gpa.destroy(win32_resource);
@@ -2454,58 +2449,47 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
if (musl.needsCrt0(comp.config.output_mode, comp.config.link_mode, comp.config.pie)) |f| {
comp.queued_jobs.musl_crt_file[@intFromEnum(f)] = true;
- comp.link_task_queue.pending_prelink_tasks += 1;
}
switch (comp.config.link_mode) {
.static => comp.queued_jobs.musl_crt_file[@intFromEnum(musl.CrtFile.libc_a)] = true,
.dynamic => comp.queued_jobs.musl_crt_file[@intFromEnum(musl.CrtFile.libc_so)] = true,
}
- comp.link_task_queue.pending_prelink_tasks += 1;
} else if (target.isGnuLibC()) {
if (!std.zig.target.canBuildLibC(target)) return error.LibCUnavailable;
if (glibc.needsCrt0(comp.config.output_mode)) |f| {
comp.queued_jobs.glibc_crt_file[@intFromEnum(f)] = true;
- comp.link_task_queue.pending_prelink_tasks += 1;
}
comp.queued_jobs.glibc_shared_objects = true;
- comp.link_task_queue.pending_prelink_tasks += glibc.sharedObjectsCount(target);
comp.queued_jobs.glibc_crt_file[@intFromEnum(glibc.CrtFile.libc_nonshared_a)] = true;
- comp.link_task_queue.pending_prelink_tasks += 1;
} else if (target.isFreeBSDLibC()) {
if (!std.zig.target.canBuildLibC(target)) return error.LibCUnavailable;
if (freebsd.needsCrt0(comp.config.output_mode)) |f| {
comp.queued_jobs.freebsd_crt_file[@intFromEnum(f)] = true;
- comp.link_task_queue.pending_prelink_tasks += 1;
}
comp.queued_jobs.freebsd_shared_objects = true;
- comp.link_task_queue.pending_prelink_tasks += freebsd.sharedObjectsCount();
} else if (target.isNetBSDLibC()) {
if (!std.zig.target.canBuildLibC(target)) return error.LibCUnavailable;
if (netbsd.needsCrt0(comp.config.output_mode)) |f| {
comp.queued_jobs.netbsd_crt_file[@intFromEnum(f)] = true;
- comp.link_task_queue.pending_prelink_tasks += 1;
}
comp.queued_jobs.netbsd_shared_objects = true;
- comp.link_task_queue.pending_prelink_tasks += netbsd.sharedObjectsCount();
} else if (target.isWasiLibC()) {
if (!std.zig.target.canBuildLibC(target)) return error.LibCUnavailable;
comp.queued_jobs.wasi_libc_crt_file[@intFromEnum(wasi_libc.execModelCrtFile(comp.config.wasi_exec_model))] = true;
comp.queued_jobs.wasi_libc_crt_file[@intFromEnum(wasi_libc.CrtFile.libc_a)] = true;
- comp.link_task_queue.pending_prelink_tasks += 2;
} else if (target.isMinGW()) {
if (!std.zig.target.canBuildLibC(target)) return error.LibCUnavailable;
const main_crt_file: mingw.CrtFile = if (is_dyn_lib) .dllcrt2_o else .crt2_o;
comp.queued_jobs.mingw_crt_file[@intFromEnum(main_crt_file)] = true;
comp.queued_jobs.mingw_crt_file[@intFromEnum(mingw.CrtFile.libmingw32_lib)] = true;
- comp.link_task_queue.pending_prelink_tasks += 2;
// When linking mingw-w64 there are some import libs we always need.
try comp.windows_libs.ensureUnusedCapacity(gpa, mingw.always_link_libs.len);
@@ -2519,7 +2503,6 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
target.isMinGW())
{
comp.queued_jobs.zigc_lib = true;
- comp.link_task_queue.pending_prelink_tasks += 1;
}
}
@@ -2536,50 +2519,41 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
}
if (comp.wantBuildLibUnwindFromSource()) {
comp.queued_jobs.libunwind = true;
- comp.link_task_queue.pending_prelink_tasks += 1;
}
if (build_options.have_llvm and is_exe_or_dyn_lib and comp.config.link_libcpp) {
comp.queued_jobs.libcxx = true;
comp.queued_jobs.libcxxabi = true;
- comp.link_task_queue.pending_prelink_tasks += 2;
}
if (build_options.have_llvm and is_exe_or_dyn_lib and comp.config.any_sanitize_thread) {
comp.queued_jobs.libtsan = true;
- comp.link_task_queue.pending_prelink_tasks += 1;
}
if (can_build_compiler_rt) {
if (comp.compiler_rt_strat == .lib) {
log.debug("queuing a job to build compiler_rt_lib", .{});
comp.queued_jobs.compiler_rt_lib = true;
- comp.link_task_queue.pending_prelink_tasks += 1;
} else if (comp.compiler_rt_strat == .obj) {
log.debug("queuing a job to build compiler_rt_obj", .{});
// In this case we are making a static library, so we ask
// for a compiler-rt object to put in it.
comp.queued_jobs.compiler_rt_obj = true;
- comp.link_task_queue.pending_prelink_tasks += 1;
} else if (comp.compiler_rt_strat == .dyn_lib) {
// hack for stage2_x86_64 + coff
log.debug("queuing a job to build compiler_rt_dyn_lib", .{});
comp.queued_jobs.compiler_rt_dyn_lib = true;
- comp.link_task_queue.pending_prelink_tasks += 1;
}
if (comp.ubsan_rt_strat == .lib) {
log.debug("queuing a job to build ubsan_rt_lib", .{});
comp.queued_jobs.ubsan_rt_lib = true;
- comp.link_task_queue.pending_prelink_tasks += 1;
} else if (comp.ubsan_rt_strat == .obj) {
log.debug("queuing a job to build ubsan_rt_obj", .{});
comp.queued_jobs.ubsan_rt_obj = true;
- comp.link_task_queue.pending_prelink_tasks += 1;
}
if (is_exe_or_dyn_lib and comp.config.any_fuzz) {
log.debug("queuing a job to build libfuzzer", .{});
comp.queued_jobs.fuzzer_lib = true;
- comp.link_task_queue.pending_prelink_tasks += 1;
}
}
}
@@ -2587,8 +2561,6 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
try comp.link_task_queue.queued_prelink.append(gpa, .load_explicitly_provided);
}
log.debug("queued prelink tasks: {d}", .{comp.link_task_queue.queued_prelink.items.len});
- log.debug("pending prelink tasks: {d}", .{comp.link_task_queue.pending_prelink_tasks});
-
return comp;
}
@@ -4408,10 +4380,8 @@ fn performAllTheWork(
comp.link_task_wait_group.reset();
defer comp.link_task_wait_group.wait();
- comp.link_prog_node.increaseEstimatedTotalItems(
- comp.link_task_queue.queued_prelink.items.len + // already queued prelink tasks
- comp.link_task_queue.pending_prelink_tasks, // prelink tasks which will be queued
- );
+ // Already-queued prelink tasks
+ comp.link_prog_node.increaseEstimatedTotalItems(comp.link_task_queue.queued_prelink.items.len);
comp.link_task_queue.start(comp);
if (comp.emit_docs != null) {
@@ -4427,6 +4397,7 @@ fn performAllTheWork(
// compiler-rt due to LLD bugs as well, e.g.:
//
// https://github.com/llvm/llvm-project/issues/43698#issuecomment-2542660611
+ comp.link_task_queue.startPrelinkItem();
comp.link_task_wait_group.spawnManager(buildRt, .{
comp,
"compiler_rt.zig",
@@ -4444,6 +4415,7 @@ fn performAllTheWork(
}
if (comp.queued_jobs.compiler_rt_obj and comp.compiler_rt_obj == null) {
+ comp.link_task_queue.startPrelinkItem();
comp.link_task_wait_group.spawnManager(buildRt, .{
comp,
"compiler_rt.zig",
@@ -4462,6 +4434,7 @@ fn performAllTheWork(
// hack for stage2_x86_64 + coff
if (comp.queued_jobs.compiler_rt_dyn_lib and comp.compiler_rt_dyn_lib == null) {
+ comp.link_task_queue.startPrelinkItem();
comp.link_task_wait_group.spawnManager(buildRt, .{
comp,
"compiler_rt.zig",
@@ -4479,6 +4452,7 @@ fn performAllTheWork(
}
if (comp.queued_jobs.fuzzer_lib and comp.fuzzer_lib == null) {
+ comp.link_task_queue.startPrelinkItem();
comp.link_task_wait_group.spawnManager(buildRt, .{
comp,
"fuzzer.zig",
@@ -4493,6 +4467,7 @@ fn performAllTheWork(
}
if (comp.queued_jobs.ubsan_rt_lib and comp.ubsan_rt_lib == null) {
+ comp.link_task_queue.startPrelinkItem();
comp.link_task_wait_group.spawnManager(buildRt, .{
comp,
"ubsan_rt.zig",
@@ -4509,6 +4484,7 @@ fn performAllTheWork(
}
if (comp.queued_jobs.ubsan_rt_obj and comp.ubsan_rt_obj == null) {
+ comp.link_task_queue.startPrelinkItem();
comp.link_task_wait_group.spawnManager(buildRt, .{
comp,
"ubsan_rt.zig",
@@ -4525,40 +4501,49 @@ fn performAllTheWork(
}
if (comp.queued_jobs.glibc_shared_objects) {
+ comp.link_task_queue.startPrelinkItem();
comp.link_task_wait_group.spawnManager(buildGlibcSharedObjects, .{ comp, main_progress_node });
}
if (comp.queued_jobs.freebsd_shared_objects) {
+ comp.link_task_queue.startPrelinkItem();
comp.link_task_wait_group.spawnManager(buildFreeBSDSharedObjects, .{ comp, main_progress_node });
}
if (comp.queued_jobs.netbsd_shared_objects) {
+ comp.link_task_queue.startPrelinkItem();
comp.link_task_wait_group.spawnManager(buildNetBSDSharedObjects, .{ comp, main_progress_node });
}
if (comp.queued_jobs.libunwind) {
+ comp.link_task_queue.startPrelinkItem();
comp.link_task_wait_group.spawnManager(buildLibUnwind, .{ comp, main_progress_node });
}
if (comp.queued_jobs.libcxx) {
+ comp.link_task_queue.startPrelinkItem();
comp.link_task_wait_group.spawnManager(buildLibCxx, .{ comp, main_progress_node });
}
if (comp.queued_jobs.libcxxabi) {
+ comp.link_task_queue.startPrelinkItem();
comp.link_task_wait_group.spawnManager(buildLibCxxAbi, .{ comp, main_progress_node });
}
if (comp.queued_jobs.libtsan) {
+ comp.link_task_queue.startPrelinkItem();
comp.link_task_wait_group.spawnManager(buildLibTsan, .{ comp, main_progress_node });
}
if (comp.queued_jobs.zigc_lib and comp.zigc_static_lib == null) {
+ comp.link_task_queue.startPrelinkItem();
comp.link_task_wait_group.spawnManager(buildLibZigC, .{ comp, main_progress_node });
}
for (0..@typeInfo(musl.CrtFile).@"enum".fields.len) |i| {
if (comp.queued_jobs.musl_crt_file[i]) {
const tag: musl.CrtFile = @enumFromInt(i);
+ comp.link_task_queue.startPrelinkItem();
comp.link_task_wait_group.spawnManager(buildMuslCrtFile, .{ comp, tag, main_progress_node });
}
}
@@ -4566,6 +4551,7 @@ fn performAllTheWork(
for (0..@typeInfo(glibc.CrtFile).@"enum".fields.len) |i| {
if (comp.queued_jobs.glibc_crt_file[i]) {
const tag: glibc.CrtFile = @enumFromInt(i);
+ comp.link_task_queue.startPrelinkItem();
comp.link_task_wait_group.spawnManager(buildGlibcCrtFile, .{ comp, tag, main_progress_node });
}
}
@@ -4573,6 +4559,7 @@ fn performAllTheWork(
for (0..@typeInfo(freebsd.CrtFile).@"enum".fields.len) |i| {
if (comp.queued_jobs.freebsd_crt_file[i]) {
const tag: freebsd.CrtFile = @enumFromInt(i);
+ comp.link_task_queue.startPrelinkItem();
comp.link_task_wait_group.spawnManager(buildFreeBSDCrtFile, .{ comp, tag, main_progress_node });
}
}
@@ -4580,6 +4567,7 @@ fn performAllTheWork(
for (0..@typeInfo(netbsd.CrtFile).@"enum".fields.len) |i| {
if (comp.queued_jobs.netbsd_crt_file[i]) {
const tag: netbsd.CrtFile = @enumFromInt(i);
+ comp.link_task_queue.startPrelinkItem();
comp.link_task_wait_group.spawnManager(buildNetBSDCrtFile, .{ comp, tag, main_progress_node });
}
}
@@ -4587,6 +4575,7 @@ fn performAllTheWork(
for (0..@typeInfo(wasi_libc.CrtFile).@"enum".fields.len) |i| {
if (comp.queued_jobs.wasi_libc_crt_file[i]) {
const tag: wasi_libc.CrtFile = @enumFromInt(i);
+ comp.link_task_queue.startPrelinkItem();
comp.link_task_wait_group.spawnManager(buildWasiLibcCrtFile, .{ comp, tag, main_progress_node });
}
}
@@ -4594,6 +4583,7 @@ fn performAllTheWork(
for (0..@typeInfo(mingw.CrtFile).@"enum".fields.len) |i| {
if (comp.queued_jobs.mingw_crt_file[i]) {
const tag: mingw.CrtFile = @enumFromInt(i);
+ comp.link_task_queue.startPrelinkItem();
comp.link_task_wait_group.spawnManager(buildMingwCrtFile, .{ comp, tag, main_progress_node });
}
}
@@ -4665,12 +4655,14 @@ fn performAllTheWork(
}
while (comp.c_object_work_queue.readItem()) |c_object| {
+ comp.link_task_queue.startPrelinkItem();
comp.thread_pool.spawnWg(&comp.link_task_wait_group, workerUpdateCObject, .{
comp, c_object, main_progress_node,
});
}
while (comp.win32_resource_work_queue.readItem()) |win32_resource| {
+ comp.link_task_queue.startPrelinkItem();
comp.thread_pool.spawnWg(&comp.link_task_wait_group, workerUpdateWin32Resource, .{
comp, win32_resource, main_progress_node,
});
@@ -4773,15 +4765,14 @@ fn performAllTheWork(
}
};
+ // We aren't going to queue any more prelink tasks.
+ comp.link_task_queue.finishPrelinkItem(comp);
+
if (!comp.separateCodegenThreadOk()) {
// Waits until all input files have been parsed.
comp.link_task_wait_group.wait();
comp.link_task_wait_group.reset();
std.log.scoped(.link).debug("finished waiting for link_task_wait_group", .{});
- if (comp.link_task_queue.pending_prelink_tasks > 0) {
- // Indicates an error occurred preventing prelink phase from completing.
- return;
- }
}
if (comp.zcu != null) {
@@ -5568,6 +5559,7 @@ fn workerUpdateCObject(
c_object: *CObject,
progress_node: std.Progress.Node,
) void {
+ defer comp.link_task_queue.finishPrelinkItem(comp);
comp.updateCObject(c_object, progress_node) catch |err| switch (err) {
error.AnalysisFail => return,
else => {
@@ -5585,6 +5577,7 @@ fn workerUpdateWin32Resource(
win32_resource: *Win32Resource,
progress_node: std.Progress.Node,
) void {
+ defer comp.link_task_queue.finishPrelinkItem(comp);
comp.updateWin32Resource(win32_resource, progress_node) catch |err| switch (err) {
error.AnalysisFail => return,
else => {
@@ -5628,6 +5621,7 @@ fn buildRt(
options: RtOptions,
out: *?CrtFile,
) void {
+ defer comp.link_task_queue.finishPrelinkItem(comp);
comp.buildOutputFromZig(
root_source_name,
root_name,
@@ -5646,6 +5640,7 @@ fn buildRt(
}
fn buildMuslCrtFile(comp: *Compilation, crt_file: musl.CrtFile, prog_node: std.Progress.Node) void {
+ defer comp.link_task_queue.finishPrelinkItem(comp);
if (musl.buildCrtFile(comp, crt_file, prog_node)) |_| {
comp.queued_jobs.musl_crt_file[@intFromEnum(crt_file)] = false;
} else |err| switch (err) {
@@ -5657,6 +5652,7 @@ fn buildMuslCrtFile(comp: *Compilation, crt_file: musl.CrtFile, prog_node: std.P
}
fn buildGlibcCrtFile(comp: *Compilation, crt_file: glibc.CrtFile, prog_node: std.Progress.Node) void {
+ defer comp.link_task_queue.finishPrelinkItem(comp);
if (glibc.buildCrtFile(comp, crt_file, prog_node)) |_| {
comp.queued_jobs.glibc_crt_file[@intFromEnum(crt_file)] = false;
} else |err| switch (err) {
@@ -5668,6 +5664,7 @@ fn buildGlibcCrtFile(comp: *Compilation, crt_file: glibc.CrtFile, prog_node: std
}
fn buildGlibcSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) void {
+ defer comp.link_task_queue.finishPrelinkItem(comp);
if (glibc.buildSharedObjects(comp, prog_node)) |_| {
// The job should no longer be queued up since it succeeded.
comp.queued_jobs.glibc_shared_objects = false;
@@ -5680,6 +5677,7 @@ fn buildGlibcSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) voi
}
fn buildFreeBSDCrtFile(comp: *Compilation, crt_file: freebsd.CrtFile, prog_node: std.Progress.Node) void {
+ defer comp.link_task_queue.finishPrelinkItem(comp);
if (freebsd.buildCrtFile(comp, crt_file, prog_node)) |_| {
comp.queued_jobs.freebsd_crt_file[@intFromEnum(crt_file)] = false;
} else |err| switch (err) {
@@ -5691,6 +5689,7 @@ fn buildFreeBSDCrtFile(comp: *Compilation, crt_file: freebsd.CrtFile, prog_node:
}
fn buildFreeBSDSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) void {
+ defer comp.link_task_queue.finishPrelinkItem(comp);
if (freebsd.buildSharedObjects(comp, prog_node)) |_| {
// The job should no longer be queued up since it succeeded.
comp.queued_jobs.freebsd_shared_objects = false;
@@ -5703,6 +5702,7 @@ fn buildFreeBSDSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) v
}
fn buildNetBSDCrtFile(comp: *Compilation, crt_file: netbsd.CrtFile, prog_node: std.Progress.Node) void {
+ defer comp.link_task_queue.finishPrelinkItem(comp);
if (netbsd.buildCrtFile(comp, crt_file, prog_node)) |_| {
comp.queued_jobs.netbsd_crt_file[@intFromEnum(crt_file)] = false;
} else |err| switch (err) {
@@ -5714,6 +5714,7 @@ fn buildNetBSDCrtFile(comp: *Compilation, crt_file: netbsd.CrtFile, prog_node: s
}
fn buildNetBSDSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) void {
+ defer comp.link_task_queue.finishPrelinkItem(comp);
if (netbsd.buildSharedObjects(comp, prog_node)) |_| {
// The job should no longer be queued up since it succeeded.
comp.queued_jobs.netbsd_shared_objects = false;
@@ -5726,6 +5727,7 @@ fn buildNetBSDSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) vo
}
fn buildMingwCrtFile(comp: *Compilation, crt_file: mingw.CrtFile, prog_node: std.Progress.Node) void {
+ defer comp.link_task_queue.finishPrelinkItem(comp);
if (mingw.buildCrtFile(comp, crt_file, prog_node)) |_| {
comp.queued_jobs.mingw_crt_file[@intFromEnum(crt_file)] = false;
} else |err| switch (err) {
@@ -5737,6 +5739,7 @@ fn buildMingwCrtFile(comp: *Compilation, crt_file: mingw.CrtFile, prog_node: std
}
fn buildWasiLibcCrtFile(comp: *Compilation, crt_file: wasi_libc.CrtFile, prog_node: std.Progress.Node) void {
+ defer comp.link_task_queue.finishPrelinkItem(comp);
if (wasi_libc.buildCrtFile(comp, crt_file, prog_node)) |_| {
comp.queued_jobs.wasi_libc_crt_file[@intFromEnum(crt_file)] = false;
} else |err| switch (err) {
@@ -5748,6 +5751,7 @@ fn buildWasiLibcCrtFile(comp: *Compilation, crt_file: wasi_libc.CrtFile, prog_no
}
fn buildLibUnwind(comp: *Compilation, prog_node: std.Progress.Node) void {
+ defer comp.link_task_queue.finishPrelinkItem(comp);
if (libunwind.buildStaticLib(comp, prog_node)) |_| {
comp.queued_jobs.libunwind = false;
} else |err| switch (err) {
@@ -5757,6 +5761,7 @@ fn buildLibUnwind(comp: *Compilation, prog_node: std.Progress.Node) void {
}
fn buildLibCxx(comp: *Compilation, prog_node: std.Progress.Node) void {
+ defer comp.link_task_queue.finishPrelinkItem(comp);
if (libcxx.buildLibCxx(comp, prog_node)) |_| {
comp.queued_jobs.libcxx = false;
} else |err| switch (err) {
@@ -5766,6 +5771,7 @@ fn buildLibCxx(comp: *Compilation, prog_node: std.Progress.Node) void {
}
fn buildLibCxxAbi(comp: *Compilation, prog_node: std.Progress.Node) void {
+ defer comp.link_task_queue.finishPrelinkItem(comp);
if (libcxx.buildLibCxxAbi(comp, prog_node)) |_| {
comp.queued_jobs.libcxxabi = false;
} else |err| switch (err) {
@@ -5775,6 +5781,7 @@ fn buildLibCxxAbi(comp: *Compilation, prog_node: std.Progress.Node) void {
}
fn buildLibTsan(comp: *Compilation, prog_node: std.Progress.Node) void {
+ defer comp.link_task_queue.finishPrelinkItem(comp);
if (libtsan.buildTsan(comp, prog_node)) |_| {
comp.queued_jobs.libtsan = false;
} else |err| switch (err) {
@@ -5784,6 +5791,7 @@ fn buildLibTsan(comp: *Compilation, prog_node: std.Progress.Node) void {
}
fn buildLibZigC(comp: *Compilation, prog_node: std.Progress.Node) void {
+ defer comp.link_task_queue.finishPrelinkItem(comp);
comp.buildOutputFromZig(
"c.zig",
"zigc",
@@ -7721,6 +7729,7 @@ pub fn queuePrelinkTaskMode(comp: *Compilation, path: Cache.Path, config: *const
/// Only valid to call during `update`. Automatically handles queuing up a
/// linker worker task if there is not already one.
pub fn queuePrelinkTasks(comp: *Compilation, tasks: []const link.PrelinkTask) void {
+ comp.link_prog_node.increaseEstimatedTotalItems(tasks.len);
comp.link_task_queue.enqueuePrelink(comp, tasks) catch |err| switch (err) {
error.OutOfMemory => return comp.setAllocFailure(),
};