Commit 5ca54036ca
Changed files (12)
src/link/Elf/relocatable.zig
@@ -2,26 +2,10 @@ pub fn flushStaticLib(elf_file: *Elf, comp: *Compilation, module_obj_path: ?Path
const gpa = comp.gpa;
const diags = &comp.link_diags;
- for (comp.link_inputs) |link_input| switch (link_input) {
- .object => |obj| parseObjectStaticLibReportingFailure(elf_file, obj.path),
- .archive => |obj| parseArchiveStaticLibReportingFailure(elf_file, obj.path),
- .dso_exact => unreachable,
- .res => unreachable,
- .dso => unreachable,
- };
-
- for (comp.c_object_table.keys()) |key| {
- parseObjectStaticLibReportingFailure(elf_file, key.status.success.object_path);
- }
-
if (module_obj_path) |path| {
parseObjectStaticLibReportingFailure(elf_file, path);
}
- if (comp.include_compiler_rt) {
- parseObjectStaticLibReportingFailure(elf_file, comp.compiler_rt_obj.?.full_object_path);
- }
-
if (diags.hasErrors()) return error.FlushFailure;
// First, we flush relocatable object file generated with our backends.
@@ -153,17 +137,6 @@ pub fn flushStaticLib(elf_file: *Elf, comp: *Compilation, module_obj_path: ?Path
pub fn flushObject(elf_file: *Elf, comp: *Compilation, module_obj_path: ?Path) link.File.FlushError!void {
const diags = &comp.link_diags;
- for (comp.link_inputs) |link_input| {
- elf_file.parseInputReportingFailure(link_input);
- }
-
- // This is a set of object files emitted by clang in a single `build-exe` invocation.
- // For instance, the implicit `a.o` as compiled by `zig build-exe a.c` will end up
- // in this set.
- for (comp.c_object_table.keys()) |key| {
- elf_file.openParseObjectReportingFailure(key.status.success.object_path);
- }
-
if (module_obj_path) |path| elf_file.openParseObjectReportingFailure(path);
if (diags.hasErrors()) return error.FlushFailure;
@@ -223,14 +196,6 @@ fn parseObjectStaticLibReportingFailure(elf_file: *Elf, path: Path) void {
};
}
-fn parseArchiveStaticLibReportingFailure(elf_file: *Elf, path: Path) void {
- const diags = &elf_file.base.comp.link_diags;
- parseArchiveStaticLib(elf_file, path) catch |err| switch (err) {
- error.LinkFailure => return,
- else => |e| diags.addParseError(path, "parsing static library failed: {s}", .{@errorName(e)}),
- };
-}
-
fn parseObjectStaticLib(elf_file: *Elf, path: Path) Elf.ParseError!void {
const gpa = elf_file.base.comp.gpa;
const file_handles = &elf_file.file_handles;
@@ -253,27 +218,6 @@ fn parseObjectStaticLib(elf_file: *Elf, path: Path) Elf.ParseError!void {
try object.parseAr(path, elf_file);
}
-fn parseArchiveStaticLib(elf_file: *Elf, path: Path) Elf.ParseError!void {
- const gpa = elf_file.base.comp.gpa;
- const diags = &elf_file.base.comp.link_diags;
- const file_handles = &elf_file.file_handles;
-
- const handle = try path.root_dir.handle.openFile(path.sub_path, .{});
- const fh = try Elf.addFileHandle(gpa, file_handles, handle);
-
- var archive = try Archive.parse(gpa, diags, file_handles, path, fh);
- defer archive.deinit(gpa);
-
- for (archive.objects) |extracted| {
- const index: File.Index = @intCast(try elf_file.files.addOne(gpa));
- elf_file.files.set(index, .{ .object = extracted });
- const object = &elf_file.files.items(.data)[index].object;
- object.index = index;
- try object.parseAr(path, elf_file);
- try elf_file.objects.append(gpa, index);
- }
-}
-
fn claimUnresolved(elf_file: *Elf) void {
if (elf_file.zigObjectPtr()) |zig_object| {
zig_object.claimUnresolvedRelocatable(elf_file);
src/link/Elf.zig
@@ -35,8 +35,7 @@ ptr_width: PtrWidth,
llvm_object: ?LlvmObject.Ptr = null,
/// A list of all input files.
-/// Index of each input file also encodes the priority or precedence of one input file
-/// over another.
+/// First index is a special "null file". Order is otherwise not observed.
files: std.MultiArrayList(File.Entry) = .{},
/// Long-lived list of all file descriptors.
/// We store them globally rather than per actual File so that we can re-use
@@ -350,6 +349,9 @@ pub fn createEmpty(
return self;
}
+ // --verbose-link
+ if (comp.verbose_link) try self.dumpArgv(comp);
+
const is_obj = output_mode == .Obj;
const is_obj_or_ar = is_obj or (output_mode == .Lib and link_mode == .static);
@@ -750,6 +752,22 @@ pub fn allocateChunk(self: *Elf, args: struct {
return res;
}
+pub fn loadInput(self: *Elf, input: link.Input) !void {
+ const gpa = self.base.comp.gpa;
+ const diags = &self.base.comp.link_diags;
+ const target = self.getTarget();
+ const debug_fmt_strip = self.base.comp.config.debug_format == .strip;
+ const default_sym_version = self.default_sym_version;
+
+ switch (input) {
+ .res => unreachable,
+ .dso_exact => @panic("TODO"),
+ .object => |obj| try parseObject(self, obj),
+ .archive => |obj| try parseArchive(gpa, diags, &self.file_handles, &self.files, &self.first_eflags, target, debug_fmt_strip, default_sym_version, &self.objects, obj),
+ .dso => |dso| try parseDso(gpa, diags, dso, &self.shared_objects, &self.files, target),
+ }
+}
+
pub fn flush(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
const use_lld = build_options.have_llvm and self.base.comp.config.use_lld;
if (use_lld) {
@@ -775,8 +793,6 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod
const sub_prog_node = prog_node.start("ELF Flush", 0);
defer sub_prog_node.end();
- const target = self.getTarget();
- const link_mode = comp.config.link_mode;
const directory = self.base.emit.root_dir; // Just an alias to make it shorter to type.
const module_obj_path: ?Path = if (self.base.zcu_object_sub_path) |path| .{
.root_dir = directory,
@@ -786,9 +802,6 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod
path,
} else null;
- // --verbose-link
- if (comp.verbose_link) try self.dumpArgv(comp);
-
if (self.zigObjectPtr()) |zig_object| try zig_object.flush(self, tid);
switch (comp.config.output_mode) {
@@ -800,124 +813,8 @@ pub fn flushModule(self: *Elf, arena: Allocator, tid: Zcu.PerThread.Id, prog_nod
.Exe => {},
}
- const csu = try comp.getCrtPaths(arena);
-
- // csu prelude
- if (csu.crt0) |path| openParseObjectReportingFailure(self, path);
- if (csu.crti) |path| openParseObjectReportingFailure(self, path);
- if (csu.crtbegin) |path| openParseObjectReportingFailure(self, path);
-
- // objects and archives
- for (comp.link_inputs) |link_input| switch (link_input) {
- .object, .archive => parseInputReportingFailure(self, link_input),
- .dso_exact => @panic("TODO"),
- .dso => continue, // handled below
- .res => unreachable,
- };
-
- // This is a set of object files emitted by clang in a single `build-exe` invocation.
- // For instance, the implicit `a.o` as compiled by `zig build-exe a.c` will end up
- // in this set.
- for (comp.c_object_table.keys()) |key| {
- openParseObjectReportingFailure(self, key.status.success.object_path);
- }
-
if (module_obj_path) |path| openParseObjectReportingFailure(self, path);
- if (comp.config.any_sanitize_thread)
- openParseArchiveReportingFailure(self, comp.tsan_lib.?.full_object_path);
-
- if (comp.config.any_fuzz)
- openParseArchiveReportingFailure(self, comp.fuzzer_lib.?.full_object_path);
-
- // libc
- if (!comp.skip_linker_dependencies and !comp.config.link_libc) {
- if (comp.libc_static_lib) |lib|
- openParseArchiveReportingFailure(self, lib.full_object_path);
- }
-
- // dynamic libraries
- for (comp.link_inputs) |link_input| switch (link_input) {
- .object, .archive, .dso_exact => continue, // handled above
- .dso => parseInputReportingFailure(self, link_input),
- .res => unreachable,
- };
-
- // libc++ dep
- if (comp.config.link_libcpp) {
- openParseArchiveReportingFailure(self, comp.libcxxabi_static_lib.?.full_object_path);
- openParseArchiveReportingFailure(self, comp.libcxx_static_lib.?.full_object_path);
- }
-
- // libunwind dep
- if (comp.config.link_libunwind) {
- openParseArchiveReportingFailure(self, comp.libunwind_static_lib.?.full_object_path);
- }
-
- // libc dep
- diags.flags.missing_libc = false;
- if (comp.config.link_libc) {
- if (comp.libc_installation) |lc| {
- const flags = target_util.libcFullLinkFlags(target);
-
- for (flags) |flag| {
- assert(mem.startsWith(u8, flag, "-l"));
- const lib_name = flag["-l".len..];
- const suffix = switch (comp.config.link_mode) {
- .static => target.staticLibSuffix(),
- .dynamic => target.dynamicLibSuffix(),
- };
- const lib_path = try std.fmt.allocPrint(arena, "{s}/lib{s}{s}", .{
- lc.crt_dir.?, lib_name, suffix,
- });
- const resolved_path = Path.initCwd(lib_path);
- switch (comp.config.link_mode) {
- .static => openParseArchiveReportingFailure(self, resolved_path),
- .dynamic => openParseDsoReportingFailure(self, resolved_path),
- }
- }
- } else if (target.isGnuLibC()) {
- for (glibc.libs) |lib| {
- if (lib.removed_in) |rem_in| {
- if (target.os.version_range.linux.glibc.order(rem_in) != .lt) continue;
- }
-
- const lib_path = Path.initCwd(try std.fmt.allocPrint(arena, "{s}{c}lib{s}.so.{d}", .{
- comp.glibc_so_files.?.dir_path, fs.path.sep, lib.name, lib.sover,
- }));
- openParseDsoReportingFailure(self, lib_path);
- }
- const crt_file_path = try comp.get_libc_crt_file(arena, "libc_nonshared.a");
- openParseArchiveReportingFailure(self, crt_file_path);
- } else if (target.isMusl()) {
- const path = try comp.get_libc_crt_file(arena, switch (link_mode) {
- .static => "libc.a",
- .dynamic => "libc.so",
- });
- switch (link_mode) {
- .static => openParseArchiveReportingFailure(self, path),
- .dynamic => openParseDsoReportingFailure(self, path),
- }
- } else {
- diags.flags.missing_libc = true;
- }
- }
-
- // Finally, as the last input objects we add compiler_rt and CSU postlude (if any).
-
- // compiler-rt. Since compiler_rt exports symbols like `memset`, it needs
- // to be after the shared libraries, so they are picked up from the shared
- // libraries, not libcompiler_rt.
- if (comp.compiler_rt_lib) |crt_file| {
- openParseArchiveReportingFailure(self, crt_file.full_object_path);
- } else if (comp.compiler_rt_obj) |crt_file| {
- openParseObjectReportingFailure(self, crt_file.full_object_path);
- }
-
- // csu postlude
- if (csu.crtend) |path| openParseObjectReportingFailure(self, path);
- if (csu.crtn) |path| openParseObjectReportingFailure(self, path);
-
if (diags.hasErrors()) return error.FlushFailure;
// If we haven't already, create a linker-generated input file comprising of
@@ -1087,7 +984,17 @@ fn dumpArgv(self: *Elf, comp: *Compilation) !void {
}
} else null;
- const csu = try comp.getCrtPaths(arena);
+ const crt_basenames = std.zig.LibCInstallation.CrtBasenames.get(.{
+ .target = target,
+ .link_libc = comp.config.link_libc,
+ .output_mode = comp.config.output_mode,
+ .link_mode = link_mode,
+ .pie = comp.config.pie,
+ });
+ const crt_paths: std.zig.LibCInstallation.CrtPaths = if (comp.libc_installation) |lci|
+ try lci.resolveCrtPaths(arena, crt_basenames, target)
+ else
+ .{};
const compiler_rt_path: ?[]const u8 = blk: {
if (comp.compiler_rt_lib) |x| break :blk try x.full_object_path.toString(arena);
if (comp.compiler_rt_obj) |x| break :blk try x.full_object_path.toString(arena);
@@ -1204,10 +1111,9 @@ fn dumpArgv(self: *Elf, comp: *Compilation) !void {
try argv.append("-s");
}
- // csu prelude
- if (csu.crt0) |path| try argv.append(try path.toString(arena));
- if (csu.crti) |path| try argv.append(try path.toString(arena));
- if (csu.crtbegin) |path| try argv.append(try path.toString(arena));
+ if (crt_paths.crt0) |path| try argv.append(try path.toString(arena));
+ if (crt_paths.crti) |path| try argv.append(try path.toString(arena));
+ if (crt_paths.crtbegin) |path| try argv.append(try path.toString(arena));
if (comp.config.link_libc) {
if (self.base.comp.libc_installation) |libc_installation| {
@@ -1339,9 +1245,8 @@ fn dumpArgv(self: *Elf, comp: *Compilation) !void {
try argv.append(p);
}
- // crt postlude
- if (csu.crtend) |path| try argv.append(try path.toString(arena));
- if (csu.crtn) |path| try argv.append(try path.toString(arena));
+ if (crt_paths.crtend) |path| try argv.append(try path.toString(arena));
+ if (crt_paths.crtn) |path| try argv.append(try path.toString(arena));
}
Compilation.dump_argv(argv.items);
@@ -1361,20 +1266,6 @@ pub const ParseError = error{
UnknownFileType,
} || fs.Dir.AccessError || fs.File.SeekError || fs.File.OpenError || fs.File.ReadError;
-pub fn parseInputReportingFailure(self: *Elf, input: link.Input) void {
- const gpa = self.base.comp.gpa;
- const diags = &self.base.comp.link_diags;
- const target = self.getTarget();
-
- switch (input) {
- .res => unreachable,
- .dso_exact => unreachable,
- .object => |obj| parseObjectReportingFailure(self, obj),
- .archive => |obj| parseArchiveReportingFailure(self, obj),
- .dso => |dso| parseDsoReportingFailure(gpa, diags, dso, &self.shared_objects, &self.files, target),
- }
-}
-
pub fn openParseObjectReportingFailure(self: *Elf, path: Path) void {
const diags = &self.base.comp.link_diags;
const obj = link.openObject(path, false, false) catch |err| {
@@ -1385,7 +1276,7 @@ pub fn openParseObjectReportingFailure(self: *Elf, path: Path) void {
self.parseObjectReportingFailure(obj);
}
-pub fn parseObjectReportingFailure(self: *Elf, obj: link.Input.Object) void {
+fn parseObjectReportingFailure(self: *Elf, obj: link.Input.Object) void {
const diags = &self.base.comp.link_diags;
self.parseObject(obj) catch |err| switch (err) {
error.LinkFailure => return, // already reported
@@ -1423,33 +1314,6 @@ fn parseObject(self: *Elf, obj: link.Input.Object) ParseError!void {
try object.parse(gpa, diags, obj.path, handle, first_eflags, target, debug_fmt_strip, default_sym_version);
}
-pub fn openParseArchiveReportingFailure(self: *Elf, path: Path) void {
- const diags = &self.base.comp.link_diags;
- const obj = link.openObject(path, false, false) catch |err| {
- switch (diags.failParse(path, "failed to open archive {}: {s}", .{ path, @errorName(err) })) {
- error.LinkFailure => return,
- }
- };
- parseArchiveReportingFailure(self, obj);
-}
-
-pub fn parseArchiveReportingFailure(self: *Elf, obj: link.Input.Object) void {
- const gpa = self.base.comp.gpa;
- const diags = &self.base.comp.link_diags;
- const first_eflags = &self.first_eflags;
- const target = self.base.comp.root_mod.resolved_target.result;
- const debug_fmt_strip = self.base.comp.config.debug_format == .strip;
- const default_sym_version = self.default_sym_version;
- const file_handles = &self.file_handles;
- const files = &self.files;
- const objects = &self.objects;
-
- parseArchive(gpa, diags, file_handles, files, first_eflags, target, debug_fmt_strip, default_sym_version, objects, obj) catch |err| switch (err) {
- error.LinkFailure => return, // already reported
- else => |e| diags.addParseError(obj.path, "failed to parse archive: {s}", .{@errorName(e)}),
- };
-}
-
fn parseArchive(
gpa: Allocator,
diags: *Diags,
@@ -1480,38 +1344,6 @@ fn parseArchive(
}
}
-fn openParseDsoReportingFailure(self: *Elf, path: Path) void {
- const diags = &self.base.comp.link_diags;
- const target = self.getTarget();
- const dso = link.openDso(path, false, false, false) catch |err| {
- switch (diags.failParse(path, "failed to open shared object {}: {s}", .{ path, @errorName(err) })) {
- error.LinkFailure => return,
- }
- };
- const gpa = self.base.comp.gpa;
- parseDsoReportingFailure(gpa, diags, dso, &self.shared_objects, &self.files, target);
-}
-
-fn parseDsoReportingFailure(
- gpa: Allocator,
- diags: *Diags,
- dso: link.Input.Dso,
- shared_objects: *std.StringArrayHashMapUnmanaged(File.Index),
- files: *std.MultiArrayList(File.Entry),
- target: std.Target,
-) void {
- parseDso(gpa, diags, dso, shared_objects, files, target) catch |err| switch (err) {
- error.LinkFailure => return, // already reported
- error.BadMagic, error.UnexpectedEndOfFile => {
- var notes = diags.addErrorWithNotes(2) catch return diags.setAllocFailure();
- notes.addMsg("failed to parse shared object: {s}", .{@errorName(err)}) catch return diags.setAllocFailure();
- notes.addNote("while parsing {}", .{dso.path}) catch return diags.setAllocFailure();
- notes.addNote("{s}", .{@as([]const u8, "the file may be a GNU ld script, in which case it is not an ELF file but a text file referencing other libraries to link. In this case, avoid depending on the library, convince your system administrators to refrain from using this kind of file, or pass -fallow-so-scripts to force the compiler to check every shared library in case it is an ld script.")}) catch return diags.setAllocFailure();
- },
- else => |e| diags.addParseError(dso.path, "failed to parse shared object: {s}", .{@errorName(e)}),
- };
-}
-
fn parseDso(
gpa: Allocator,
diags: *Diags,
@@ -1524,7 +1356,6 @@ fn parseDso(
defer tracy.end();
const handle = dso.file;
- defer handle.close();
const stat = Stat.fromFs(try handle.stat());
var header = try SharedObject.parseHeader(gpa, diags, dso.path, handle, stat, target);
src/Compilation.zig
@@ -10,6 +10,7 @@ const Target = std.Target;
const ThreadPool = std.Thread.Pool;
const WaitGroup = std.Thread.WaitGroup;
const ErrorBundle = std.zig.ErrorBundle;
+const Path = Cache.Path;
const Value = @import("Value.zig");
const Type = @import("Type.zig");
@@ -39,9 +40,9 @@ const Air = @import("Air.zig");
const Builtin = @import("Builtin.zig");
const LlvmObject = @import("codegen/llvm.zig").Object;
const dev = @import("dev.zig");
-pub const Directory = Cache.Directory;
-const Path = Cache.Path;
+const ThreadSafeQueue = @import("ThreadSafeQueue.zig").ThreadSafeQueue;
+pub const Directory = Cache.Directory;
pub const Config = @import("Compilation/Config.zig");
/// General-purpose allocator. Used for both temporary and long-term storage.
@@ -108,6 +109,7 @@ win32_resource_table: if (dev.env.supports(.win32_resource)) std.AutoArrayHashMa
} = .{},
link_diags: link.Diags,
+link_task_queue: ThreadSafeQueue(link.File.Task) = .empty,
work_queues: [
len: {
@@ -263,6 +265,9 @@ emit_asm: ?EmitLoc,
emit_llvm_ir: ?EmitLoc,
emit_llvm_bc: ?EmitLoc,
+work_queue_wait_group: WaitGroup = .{},
+work_queue_progress_node: std.Progress.Node = .none,
+
llvm_opt_bisect_limit: c_int,
file_system_inputs: ?*std.ArrayListUnmanaged(u8),
@@ -358,9 +363,6 @@ const Job = union(enum) {
/// After analysis, a `codegen_func` job will be queued.
/// These must be separate jobs to ensure any needed type resolution occurs *before* codegen.
analyze_func: InternPool.Index,
- /// The source file containing the Decl has been updated, and so the
- /// Decl may need its line number information updated in the debug info.
- update_line_number: void, // TODO
/// The main source file for the module needs to be analyzed.
analyze_mod: *Package.Module,
/// Fully resolve the given `struct` or `union` type.
@@ -374,6 +376,7 @@ const Job = union(enum) {
musl_crt_file: musl.CrtFile,
/// one of the mingw-w64 static objects
mingw_crt_file: mingw.CrtFile,
+
/// libunwind.a, usually needed when linking libc
libunwind: void,
libcxx: void,
@@ -1769,68 +1772,107 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
}
// If we need to build glibc for the target, add work items for it.
// We go through the work queue so that building can be done in parallel.
- if (comp.wantBuildGLibCFromSource()) {
- if (!std.zig.target.canBuildLibC(target)) return error.LibCUnavailable;
+ // If linking against host libc installation, instead queue up jobs
+ // for loading those files in the linker.
+ if (comp.config.link_libc and is_exe_or_dyn_lib and target.ofmt != .c) {
+ if (comp.libc_installation) |lci| {
+ const basenames = LibCInstallation.CrtBasenames.get(.{
+ .target = target,
+ .link_libc = comp.config.link_libc,
+ .output_mode = comp.config.output_mode,
+ .link_mode = comp.config.link_mode,
+ .pie = comp.config.pie,
+ });
+ const paths = try lci.resolveCrtPaths(arena, basenames, target);
+
+ const fields = @typeInfo(@TypeOf(paths)).@"struct".fields;
+ try comp.link_task_queue.shared.ensureUnusedCapacity(gpa, fields.len);
+ inline for (fields) |field| {
+ if (@field(paths, field.name)) |path| {
+ comp.link_task_queue.shared.appendAssumeCapacity(.{ .load_object = path });
+ }
+ }
+
+ const flags = target_util.libcFullLinkFlags(target);
+ try comp.link_task_queue.shared.ensureUnusedCapacity(gpa, flags.len);
+ for (flags) |flag| {
+ assert(mem.startsWith(u8, flag, "-l"));
+ const lib_name = flag["-l".len..];
+ const suffix = switch (comp.config.link_mode) {
+ .static => target.staticLibSuffix(),
+ .dynamic => target.dynamicLibSuffix(),
+ };
+ const sep = std.fs.path.sep_str;
+ const lib_path = try std.fmt.allocPrint(arena, "{s}" ++ sep ++ "lib{s}{s}", .{
+ lci.crt_dir.?, lib_name, suffix,
+ });
+ const resolved_path = Path.initCwd(lib_path);
+ comp.link_task_queue.shared.appendAssumeCapacity(switch (comp.config.link_mode) {
+ .static => .{ .load_archive = resolved_path },
+ .dynamic => .{ .load_dso = resolved_path },
+ });
+ }
+ } else if (target.isMusl() and !target.isWasm()) {
+ if (!std.zig.target.canBuildLibC(target)) return error.LibCUnavailable;
- if (glibc.needsCrtiCrtn(target)) {
+ if (musl.needsCrtiCrtn(target)) {
+ try comp.queueJobs(&[_]Job{
+ .{ .musl_crt_file = .crti_o },
+ .{ .musl_crt_file = .crtn_o },
+ });
+ }
try comp.queueJobs(&[_]Job{
- .{ .glibc_crt_file = .crti_o },
- .{ .glibc_crt_file = .crtn_o },
+ .{ .musl_crt_file = .crt1_o },
+ .{ .musl_crt_file = .scrt1_o },
+ .{ .musl_crt_file = .rcrt1_o },
+ switch (comp.config.link_mode) {
+ .static => .{ .musl_crt_file = .libc_a },
+ .dynamic => .{ .musl_crt_file = .libc_so },
+ },
});
- }
- try comp.queueJobs(&[_]Job{
- .{ .glibc_crt_file = .scrt1_o },
- .{ .glibc_crt_file = .libc_nonshared_a },
- .{ .glibc_shared_objects = {} },
- });
- }
- if (comp.wantBuildMuslFromSource()) {
- if (!std.zig.target.canBuildLibC(target)) return error.LibCUnavailable;
+ } else if (target.isGnuLibC()) {
+ if (!std.zig.target.canBuildLibC(target)) return error.LibCUnavailable;
- if (musl.needsCrtiCrtn(target)) {
+ if (glibc.needsCrtiCrtn(target)) {
+ try comp.queueJobs(&[_]Job{
+ .{ .glibc_crt_file = .crti_o },
+ .{ .glibc_crt_file = .crtn_o },
+ });
+ }
try comp.queueJobs(&[_]Job{
- .{ .musl_crt_file = .crti_o },
- .{ .musl_crt_file = .crtn_o },
+ .{ .glibc_crt_file = .scrt1_o },
+ .{ .glibc_crt_file = .libc_nonshared_a },
+ .{ .glibc_shared_objects = {} },
});
- }
- try comp.queueJobs(&[_]Job{
- .{ .musl_crt_file = .crt1_o },
- .{ .musl_crt_file = .scrt1_o },
- .{ .musl_crt_file = .rcrt1_o },
- switch (comp.config.link_mode) {
- .static => .{ .musl_crt_file = .libc_a },
- .dynamic => .{ .musl_crt_file = .libc_so },
- },
- });
- }
+ } else if (target.isWasm() and target.os.tag == .wasi) {
+ if (!std.zig.target.canBuildLibC(target)) return error.LibCUnavailable;
- if (comp.wantBuildWasiLibcFromSource()) {
- if (!std.zig.target.canBuildLibC(target)) return error.LibCUnavailable;
+ for (comp.wasi_emulated_libs) |crt_file| {
+ try comp.queueJob(.{
+ .wasi_libc_crt_file = crt_file,
+ });
+ }
+ try comp.queueJobs(&[_]Job{
+ .{ .wasi_libc_crt_file = wasi_libc.execModelCrtFile(comp.config.wasi_exec_model) },
+ .{ .wasi_libc_crt_file = .libc_a },
+ });
+ } else if (target.isMinGW()) {
+ if (!std.zig.target.canBuildLibC(target)) return error.LibCUnavailable;
- for (comp.wasi_emulated_libs) |crt_file| {
- try comp.queueJob(.{
- .wasi_libc_crt_file = crt_file,
+ const crt_job: Job = .{ .mingw_crt_file = if (is_dyn_lib) .dllcrt2_o else .crt2_o };
+ try comp.queueJobs(&.{
+ .{ .mingw_crt_file = .mingw32_lib },
+ crt_job,
});
+
+ // When linking mingw-w64 there are some import libs we always need.
+ try comp.windows_libs.ensureUnusedCapacity(gpa, mingw.always_link_libs.len);
+ for (mingw.always_link_libs) |name| comp.windows_libs.putAssumeCapacity(name, {});
+ } else {
+ return error.LibCUnavailable;
}
- try comp.queueJobs(&[_]Job{
- .{ .wasi_libc_crt_file = wasi_libc.execModelCrtFile(comp.config.wasi_exec_model) },
- .{ .wasi_libc_crt_file = .libc_a },
- });
}
- if (comp.wantBuildMinGWFromSource()) {
- if (!std.zig.target.canBuildLibC(target)) return error.LibCUnavailable;
-
- const crt_job: Job = .{ .mingw_crt_file = if (is_dyn_lib) .dllcrt2_o else .crt2_o };
- try comp.queueJobs(&.{
- .{ .mingw_crt_file = .mingw32_lib },
- crt_job,
- });
-
- // When linking mingw-w64 there are some import libs we always need.
- try comp.windows_libs.ensureUnusedCapacity(gpa, mingw.always_link_libs.len);
- for (mingw.always_link_libs) |name| comp.windows_libs.putAssumeCapacity(name, {});
- }
// Generate Windows import libs.
if (target.os.tag == .windows) {
const count = comp.windows_libs.count();
@@ -1885,12 +1927,16 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
{
try comp.queueJob(.{ .zig_libc = {} });
}
+
+ try comp.link_task_queue.shared.append(gpa, .load_explicitly_provided);
}
return comp;
}
pub fn destroy(comp: *Compilation) void {
+ const gpa = comp.gpa;
+
if (comp.bin_file) |lf| lf.destroy();
if (comp.zcu) |zcu| zcu.deinit();
comp.cache_use.deinit();
@@ -1901,7 +1947,6 @@ pub fn destroy(comp: *Compilation) void {
comp.astgen_work_queue.deinit();
comp.embed_file_work_queue.deinit();
- const gpa = comp.gpa;
comp.windows_libs.deinit(gpa);
{
@@ -3446,6 +3491,9 @@ pub fn performAllTheWork(
comp: *Compilation,
main_progress_node: std.Progress.Node,
) JobError!void {
+ comp.work_queue_progress_node = main_progress_node;
+ defer comp.work_queue_progress_node = .none;
+
defer if (comp.zcu) |zcu| {
zcu.sema_prog_node.end();
zcu.sema_prog_node = std.Progress.Node.none;
@@ -3467,12 +3515,20 @@ fn performAllTheWorkInner(
// (at least for now) single-threaded main work queue. However, C object compilation
// only needs to be finished by the end of this function.
- var work_queue_wait_group: WaitGroup = .{};
+ const work_queue_wait_group = &comp.work_queue_wait_group;
+
+ work_queue_wait_group.reset();
defer work_queue_wait_group.wait();
+ if (comp.bin_file) |lf| {
+ if (try comp.link_task_queue.enqueue(comp.gpa, &.{.load_explicitly_provided})) {
+ comp.thread_pool.spawnWg(work_queue_wait_group, link.File.flushTaskQueue, .{ lf, main_progress_node });
+ }
+ }
+
if (comp.docs_emit != null) {
dev.check(.docs_emit);
- comp.thread_pool.spawnWg(&work_queue_wait_group, workerDocsCopy, .{comp});
+ comp.thread_pool.spawnWg(work_queue_wait_group, workerDocsCopy, .{comp});
work_queue_wait_group.spawnManager(workerDocsWasm, .{ comp, main_progress_node });
}
@@ -3538,21 +3594,32 @@ fn performAllTheWorkInner(
}
while (comp.c_object_work_queue.readItem()) |c_object| {
- comp.thread_pool.spawnWg(&work_queue_wait_group, workerUpdateCObject, .{
+ comp.thread_pool.spawnWg(work_queue_wait_group, workerUpdateCObject, .{
comp, c_object, main_progress_node,
});
}
while (comp.win32_resource_work_queue.readItem()) |win32_resource| {
- comp.thread_pool.spawnWg(&work_queue_wait_group, workerUpdateWin32Resource, .{
+ comp.thread_pool.spawnWg(work_queue_wait_group, workerUpdateWin32Resource, .{
comp, win32_resource, main_progress_node,
});
}
}
- if (comp.job_queued_compiler_rt_lib) work_queue_wait_group.spawnManager(buildRt, .{ comp, "compiler_rt.zig", .compiler_rt, .Lib, &comp.compiler_rt_lib, main_progress_node });
- if (comp.job_queued_compiler_rt_obj) work_queue_wait_group.spawnManager(buildRt, .{ comp, "compiler_rt.zig", .compiler_rt, .Obj, &comp.compiler_rt_obj, main_progress_node });
- if (comp.job_queued_fuzzer_lib) work_queue_wait_group.spawnManager(buildRt, .{ comp, "fuzzer.zig", .libfuzzer, .Lib, &comp.fuzzer_lib, main_progress_node });
+ if (comp.job_queued_compiler_rt_lib) {
+ comp.job_queued_compiler_rt_lib = false;
+ work_queue_wait_group.spawnManager(buildRt, .{ comp, "compiler_rt.zig", .compiler_rt, .Lib, &comp.compiler_rt_lib, main_progress_node });
+ }
+
+ if (comp.job_queued_compiler_rt_obj) {
+ comp.job_queued_compiler_rt_obj = false;
+ work_queue_wait_group.spawnManager(buildRt, .{ comp, "compiler_rt.zig", .compiler_rt, .Obj, &comp.compiler_rt_obj, main_progress_node });
+ }
+
+ if (comp.job_queued_fuzzer_lib) {
+ comp.job_queued_fuzzer_lib = false;
+ work_queue_wait_group.spawnManager(buildRt, .{ comp, "fuzzer.zig", .libfuzzer, .Lib, &comp.fuzzer_lib, main_progress_node });
+ }
if (comp.zcu) |zcu| {
const pt: Zcu.PerThread = .{ .zcu = zcu, .tid = .main };
@@ -3570,7 +3637,7 @@ fn performAllTheWorkInner(
if (!InternPool.single_threaded) {
comp.codegen_work.done = false; // may be `true` from a prior update
- comp.thread_pool.spawnWgId(&work_queue_wait_group, codegenThread, .{comp});
+ comp.thread_pool.spawnWgId(work_queue_wait_group, codegenThread, .{comp});
}
defer if (!InternPool.single_threaded) {
{
@@ -3679,31 +3746,6 @@ fn processOneJob(tid: usize, comp: *Compilation, job: Job, prog_node: std.Progre
error.AnalysisFail => return,
};
},
- .update_line_number => |decl_index| {
- const named_frame = tracy.namedFrame("update_line_number");
- defer named_frame.end();
-
- if (true) @panic("TODO: update_line_number");
-
- const gpa = comp.gpa;
- const pt: Zcu.PerThread = .{ .zcu = comp.zcu.?, .tid = @enumFromInt(tid) };
- const decl = pt.zcu.declPtr(decl_index);
- const lf = comp.bin_file.?;
- lf.updateDeclLineNumber(pt, decl_index) catch |err| {
- try pt.zcu.failed_analysis.ensureUnusedCapacity(gpa, 1);
- pt.zcu.failed_analysis.putAssumeCapacityNoClobber(
- InternPool.AnalUnit.wrap(.{ .decl = decl_index }),
- try Zcu.ErrorMsg.create(
- gpa,
- decl.navSrcLoc(pt.zcu),
- "unable to update line number: {s}",
- .{@errorName(err)},
- ),
- );
- decl.analysis = .codegen_failure;
- try pt.zcu.retryable_failures.append(gpa, InternPool.AnalUnit.wrap(.{ .decl = decl_index }));
- };
- },
.analyze_mod => |mod| {
const named_frame = tracy.namedFrame("analyze_mod");
defer named_frame.end();
@@ -4920,7 +4962,9 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr
// the contents were the same, we hit the cache but the manifest is dirty and we need to update
// it to prevent doing a full file content comparison the next time around.
man.writeManifest() catch |err| {
- log.warn("failed to write cache manifest when compiling '{s}': {s}", .{ c_object.src.src_path, @errorName(err) });
+ log.warn("failed to write cache manifest when compiling '{s}': {s}", .{
+ c_object.src.src_path, @errorName(err),
+ });
};
}
@@ -4935,6 +4979,8 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr
.lock = man.toOwnedLock(),
},
};
+
+ comp.enqueueLinkTasks(&.{.{ .load_object = c_object.status.success.object_path }});
}
fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32_resource_prog_node: std.Progress.Node) !void {
@@ -6058,35 +6104,6 @@ fn crtFilePath(crt_files: *std.StringHashMapUnmanaged(CrtFile), basename: []cons
return crt_file.full_object_path;
}
-fn wantBuildLibCFromSource(comp: Compilation) bool {
- const is_exe_or_dyn_lib = switch (comp.config.output_mode) {
- .Obj => false,
- .Lib => comp.config.link_mode == .dynamic,
- .Exe => true,
- };
- const ofmt = comp.root_mod.resolved_target.result.ofmt;
- return comp.config.link_libc and is_exe_or_dyn_lib and
- comp.libc_installation == null and ofmt != .c;
-}
-
-fn wantBuildGLibCFromSource(comp: Compilation) bool {
- return comp.wantBuildLibCFromSource() and comp.getTarget().isGnuLibC();
-}
-
-fn wantBuildMuslFromSource(comp: Compilation) bool {
- return comp.wantBuildLibCFromSource() and comp.getTarget().isMusl() and
- !comp.getTarget().isWasm();
-}
-
-fn wantBuildWasiLibcFromSource(comp: Compilation) bool {
- return comp.wantBuildLibCFromSource() and comp.getTarget().isWasm() and
- comp.getTarget().os.tag == .wasi;
-}
-
-fn wantBuildMinGWFromSource(comp: Compilation) bool {
- return comp.wantBuildLibCFromSource() and comp.getTarget().isMinGW();
-}
-
fn wantBuildLibUnwindFromSource(comp: *Compilation) bool {
const is_exe_or_dyn_lib = switch (comp.config.output_mode) {
.Obj => false,
@@ -6334,9 +6351,11 @@ fn buildOutputFromZig(
try comp.updateSubCompilation(sub_compilation, misc_task_tag, prog_node);
- // Under incremental compilation, `out` may already be populated from a prior update.
- assert(out.* == null or comp.incremental);
- out.* = try sub_compilation.toCrtFile();
+ const crt_file = try sub_compilation.toCrtFile();
+ assert(out.* == null);
+ out.* = crt_file;
+
+ comp.enqueueLinkTaskMode(crt_file.full_object_path, output_mode);
}
pub fn build_crt_file(
@@ -6443,8 +6462,39 @@ pub fn build_crt_file(
try comp.updateSubCompilation(sub_compilation, misc_task_tag, prog_node);
- try comp.crt_files.ensureUnusedCapacity(gpa, 1);
- comp.crt_files.putAssumeCapacityNoClobber(basename, try sub_compilation.toCrtFile());
+ const crt_file = try sub_compilation.toCrtFile();
+ comp.enqueueLinkTaskMode(crt_file.full_object_path, output_mode);
+
+ {
+ comp.mutex.lock();
+ defer comp.mutex.unlock();
+ try comp.crt_files.ensureUnusedCapacity(gpa, 1);
+ comp.crt_files.putAssumeCapacityNoClobber(basename, crt_file);
+ }
+}
+
+pub fn enqueueLinkTaskMode(comp: *Compilation, path: Path, output_mode: std.builtin.OutputMode) void {
+ comp.enqueueLinkTasks(switch (output_mode) {
+ .Exe => unreachable,
+ .Obj => &.{.{ .load_object = path }},
+ .Lib => &.{.{ .load_archive = path }},
+ });
+}
+
+/// Only valid to call during `update`. Automatically handles queuing up a
+/// linker worker task if there is not already one.
+fn enqueueLinkTasks(comp: *Compilation, tasks: []const link.File.Task) void {
+ const use_lld = build_options.have_llvm and comp.config.use_lld;
+ if (use_lld) return;
+ const target = comp.root_mod.resolved_target.result;
+ if (target.ofmt != .elf) return;
+ if (comp.link_task_queue.enqueue(comp.gpa, tasks) catch |err| switch (err) {
+ error.OutOfMemory => return comp.setAllocFailure(),
+ }) {
+ comp.thread_pool.spawnWg(&comp.work_queue_wait_group, link.File.flushTaskQueue, .{
+ comp.bin_file.?, comp.work_queue_progress_node,
+ });
+ }
}
pub fn toCrtFile(comp: *Compilation) Allocator.Error!CrtFile {
src/glibc.zig
@@ -1204,14 +1204,12 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) !voi
};
assert(comp.glibc_so_files == null);
- comp.glibc_so_files = BuiltSharedObjects{
+ comp.glibc_so_files = .{
.lock = man.toOwnedLock(),
.dir_path = try comp.global_cache_directory.join(comp.gpa, &.{ "o", &digest }),
};
}
-// zig fmt: on
-
fn buildSharedLib(
comp: *Compilation,
arena: Allocator,
src/libcxx.zig
@@ -355,7 +355,9 @@ pub fn buildLibCXX(comp: *Compilation, prog_node: std.Progress.Node) BuildError!
};
assert(comp.libcxx_static_lib == null);
- comp.libcxx_static_lib = try sub_compilation.toCrtFile();
+ const crt_file = try sub_compilation.toCrtFile();
+ comp.libcxx_static_lib = crt_file;
+ comp.enqueueLinkTaskMode(crt_file.full_object_path, output_mode);
}
pub fn buildLibCXXABI(comp: *Compilation, prog_node: std.Progress.Node) BuildError!void {
@@ -584,7 +586,9 @@ pub fn buildLibCXXABI(comp: *Compilation, prog_node: std.Progress.Node) BuildErr
};
assert(comp.libcxxabi_static_lib == null);
- comp.libcxxabi_static_lib = try sub_compilation.toCrtFile();
+ const crt_file = try sub_compilation.toCrtFile();
+ comp.libcxxabi_static_lib = crt_file;
+ comp.enqueueLinkTaskMode(crt_file.full_object_path, output_mode);
}
pub fn hardeningModeFlag(optimize_mode: std.builtin.OptimizeMode) []const u8 {
src/libtsan.zig
@@ -342,8 +342,10 @@ pub fn buildTsan(comp: *Compilation, prog_node: std.Progress.Node) BuildError!vo
},
};
+ const crt_file = try sub_compilation.toCrtFile();
+ comp.enqueueLinkTaskMode(crt_file.full_object_path, output_mode);
assert(comp.tsan_lib == null);
- comp.tsan_lib = try sub_compilation.toCrtFile();
+ comp.tsan_lib = crt_file;
}
const tsan_sources = [_][]const u8{
src/libunwind.zig
@@ -199,8 +199,10 @@ pub fn buildStaticLib(comp: *Compilation, prog_node: std.Progress.Node) BuildErr
},
};
+ const crt_file = try sub_compilation.toCrtFile();
+ comp.enqueueLinkTaskMode(crt_file.full_object_path, output_mode);
assert(comp.libunwind_static_lib == null);
- comp.libunwind_static_lib = try sub_compilation.toCrtFile();
+ comp.libunwind_static_lib = crt_file;
}
const unwind_src_list = [_][]const u8{
src/link.zig
@@ -24,6 +24,7 @@ const LlvmObject = @import("codegen/llvm.zig").Object;
const lldMain = @import("main.zig").lldMain;
const Package = @import("Package.zig");
const dev = @import("dev.zig");
+const ThreadSafeQueue = @import("ThreadSafeQueue.zig").ThreadSafeQueue;
pub const LdScript = @import("link/LdScript.zig");
@@ -368,6 +369,9 @@ pub const File = struct {
lock: ?Cache.Lock = null,
child_pid: ?std.process.Child.Id = null,
+ /// Ensure only 1 simultaneous call to `flushTaskQueue`.
+ task_queue_safety: std.debug.SafetyLock = .{},
+
pub const OpenOptions = struct {
symbol_count_hint: u64 = 32,
program_code_size_hint: u64 = 256 * 1024,
@@ -995,6 +999,86 @@ pub const File = struct {
}
}
+ /// Opens a path as an object file and parses it into the linker.
+ fn openLoadObject(base: *File, path: Path) anyerror!void {
+ const diags = &base.comp.link_diags;
+ const input = try openObjectInput(diags, path);
+ errdefer input.object.file.close();
+ try loadInput(base, input);
+ }
+
+ /// Opens a path as a static library and parses it into the linker.
+ fn openLoadArchive(base: *File, path: Path) anyerror!void {
+ const diags = &base.comp.link_diags;
+ const input = try openArchiveInput(diags, path, false, false);
+ errdefer input.archive.file.close();
+ try loadInput(base, input);
+ }
+
+ /// Opens a path as a shared library and parses it into the linker.
+ /// Handles GNU ld scripts.
+ fn openLoadDso(base: *File, path: Path, query: UnresolvedInput.Query) anyerror!void {
+ const dso = try openDso(path, query.needed, query.weak, query.reexport);
+ errdefer dso.file.close();
+ loadInput(base, .{ .dso = dso }) catch |err| switch (err) {
+ error.BadMagic, error.UnexpectedEndOfFile => {
+ if (base.tag != .elf) return err;
+ try loadGnuLdScript(base, path, query, dso.file);
+ dso.file.close();
+ return;
+ },
+ else => return err,
+ };
+ }
+
+ fn loadGnuLdScript(base: *File, path: Path, parent_query: UnresolvedInput.Query, file: fs.File) anyerror!void {
+ const diags = &base.comp.link_diags;
+ const gpa = base.comp.gpa;
+ const stat = try file.stat();
+ const size = std.math.cast(u32, stat.size) orelse return error.FileTooBig;
+ const buf = try gpa.alloc(u8, size);
+ defer gpa.free(buf);
+ const n = try file.preadAll(buf, 0);
+ if (buf.len != n) return error.UnexpectedEndOfFile;
+ var ld_script = try LdScript.parse(gpa, diags, path, buf);
+ defer ld_script.deinit(gpa);
+ for (ld_script.args) |arg| {
+ const query: UnresolvedInput.Query = .{
+ .needed = arg.needed or parent_query.needed,
+ .weak = parent_query.weak,
+ .reexport = parent_query.reexport,
+ .preferred_mode = parent_query.preferred_mode,
+ .search_strategy = parent_query.search_strategy,
+ .allow_so_scripts = parent_query.allow_so_scripts,
+ };
+ if (mem.startsWith(u8, arg.path, "-l")) {
+ @panic("TODO");
+ } else {
+ if (fs.path.isAbsolute(arg.path)) {
+ const new_path = Path.initCwd(try gpa.dupe(u8, arg.path));
+ switch (Compilation.classifyFileExt(arg.path)) {
+ .shared_library => try openLoadDso(base, new_path, query),
+ .object => try openLoadObject(base, new_path),
+ .static_library => try openLoadArchive(base, new_path),
+ else => diags.addParseError(path, "GNU ld script references file with unrecognized extension: {s}", .{arg.path}),
+ }
+ } else {
+ @panic("TODO");
+ }
+ }
+ }
+ }
+
+ pub fn loadInput(base: *File, input: Input) anyerror!void {
+ switch (base.tag) {
+ inline .elf => |tag| {
+ dev.check(tag.devFeature());
+ return @as(*tag.Type(), @fieldParentPtr("base", base)).loadInput(input);
+ },
+ else => {},
+ }
+ }
+
pub fn linkAsArchive(base: *File, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) FlushError!void {
dev.check(.lld_linker);
@@ -1261,6 +1345,111 @@ pub const File = struct {
pub const Wasm = @import("link/Wasm.zig");
pub const NvPtx = @import("link/NvPtx.zig");
pub const Dwarf = @import("link/Dwarf.zig");
+
+ /// Does all the tasks in the queue. Runs in exactly one separate thread
+ /// from the rest of compilation. All tasks performed here are
+ /// single-threaded with respect to one another.
+ pub fn flushTaskQueue(base: *File, prog_node: std.Progress.Node) void {
+ const comp = base.comp;
+ base.task_queue_safety.lock();
+ defer base.task_queue_safety.unlock();
+ while (comp.link_task_queue.check()) |tasks| {
+ for (tasks) |task| doTask(base, prog_node, task);
+ }
+ }
+
+ pub const Task = union(enum) {
+ /// Loads the objects, shared objects, and archives that are already
+ /// known from the command line.
+ load_explicitly_provided,
+ /// Tells the linker to load an object file by path.
+ load_object: Path,
+ /// Tells the linker to load a static library by path.
+ load_archive: Path,
+ /// Tells the linker to load a shared library, possibly one that is a
+ /// GNU ld script.
+ load_dso: Path,
+ /// Tells the linker to load an input which could be an object file,
+ /// archive, or shared library.
+ load_input: Input,
+ };
+
+ fn doTask(base: *File, parent_prog_node: std.Progress.Node, task: Task) void {
+ const comp = base.comp;
+ switch (task) {
+ .load_explicitly_provided => {
+ const prog_node = parent_prog_node.start("Linker Parse Input", comp.link_inputs.len);
+ defer prog_node.end();
+
+ for (comp.link_inputs) |input| {
+ const sub_node = prog_node.start(input.taskName(), 0);
+ defer sub_node.end();
+ base.loadInput(input) catch |err| switch (err) {
+ error.LinkFailure => return, // error reported via link_diags
+ else => |e| {
+ if (input.path()) |path| {
+ comp.link_diags.addParseError(path, "failed to parse linker input: {s}", .{@errorName(e)});
+ } else {
+ comp.link_diags.addError("failed to {s}: {s}", .{ input.taskName(), @errorName(e) });
+ }
+ },
+ };
+ }
+ },
+ .load_object => |path| {
+ const prog_node = parent_prog_node.start("Linker Parse Object", 0);
+ defer prog_node.end();
+ const sub_node = prog_node.start(path.basename(), 0);
+ defer sub_node.end();
+
+ base.openLoadObject(path) catch |err| switch (err) {
+ error.LinkFailure => return, // error reported via link_diags
+ else => |e| comp.link_diags.addParseError(path, "failed to parse object: {s}", .{@errorName(e)}),
+ };
+ },
+ .load_archive => |path| {
+ const prog_node = parent_prog_node.start("Linker Parse Archive", 0);
+ defer prog_node.end();
+ const sub_node = prog_node.start(path.basename(), 0);
+ defer sub_node.end();
+
+ base.openLoadArchive(path) catch |err| switch (err) {
+ error.LinkFailure => return, // error reported via link_diags
+ else => |e| comp.link_diags.addParseError(path, "failed to parse archive: {s}", .{@errorName(e)}),
+ };
+ },
+ .load_dso => |path| {
+ const prog_node = parent_prog_node.start("Linker Parse Shared Library", 0);
+ defer prog_node.end();
+ const sub_node = prog_node.start(path.basename(), 0);
+ defer sub_node.end();
+
+ base.openLoadDso(path, .{
+ .preferred_mode = .dynamic,
+ .search_strategy = .paths_first,
+ }) catch |err| switch (err) {
+ error.LinkFailure => return, // error reported via link_diags
+ else => |e| comp.link_diags.addParseError(path, "failed to parse shared library: {s}", .{@errorName(e)}),
+ };
+ },
+ .load_input => |input| {
+ const prog_node = parent_prog_node.start("Linker Parse Input", 0);
+ defer prog_node.end();
+ const sub_node = prog_node.start(input.taskName(), 0);
+ defer sub_node.end();
+ base.loadInput(input) catch |err| switch (err) {
+ error.LinkFailure => return, // error reported via link_diags
+ else => |e| {
+ if (input.path()) |path| {
+ comp.link_diags.addParseError(path, "failed to parse linker input: {s}", .{@errorName(e)});
+ } else {
+ comp.link_diags.addError("failed to {s}: {s}", .{ input.taskName(), @errorName(e) });
+ }
+ },
+ };
+ },
+ }
+ }
};
pub fn spawnLld(
@@ -1480,6 +1669,14 @@ pub const Input = union(enum) {
.dso_exact => null,
};
}
+
+ pub fn taskName(input: Input) []const u8 {
+ return switch (input) {
+ .object, .archive => |obj| obj.path.basename(),
+ inline .res, .dso => |x| x.path.basename(),
+ .dso_exact => "dso_exact",
+ };
+ }
};
pub fn hashInputs(man: *Cache.Manifest, link_inputs: []const Input) !void {
src/musl.zig
@@ -19,7 +19,7 @@ pub const CrtFile = enum {
libc_so,
};
-pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progress.Node) !void {
+pub fn buildCrtFile(comp: *Compilation, in_crt_file: CrtFile, prog_node: std.Progress.Node) !void {
if (!build_options.have_llvm) {
return error.ZigCompilerNotBuiltWithLLVMExtensions;
}
@@ -28,7 +28,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
defer arena_allocator.deinit();
const arena = arena_allocator.allocator();
- switch (crt_file) {
+ switch (in_crt_file) {
.crti_o => {
var args = std.ArrayList([]const u8).init(arena);
try addCcArgs(comp, arena, &args, false);
@@ -195,8 +195,9 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
.libc_so => {
const optimize_mode = comp.compilerRtOptMode();
const strip = comp.compilerRtStrip();
+ const output_mode: std.builtin.OutputMode = .Lib;
const config = try Compilation.Config.resolve(.{
- .output_mode = .Lib,
+ .output_mode = output_mode,
.link_mode = .dynamic,
.resolved_target = comp.root_mod.resolved_target,
.is_test = false,
@@ -276,12 +277,17 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
try comp.updateSubCompilation(sub_compilation, .@"musl libc.so", prog_node);
- try comp.crt_files.ensureUnusedCapacity(comp.gpa, 1);
-
const basename = try comp.gpa.dupe(u8, "libc.so");
errdefer comp.gpa.free(basename);
- comp.crt_files.putAssumeCapacityNoClobber(basename, try sub_compilation.toCrtFile());
+ const crt_file = try sub_compilation.toCrtFile();
+ comp.enqueueLinkTaskMode(crt_file.full_object_path, output_mode);
+ {
+ comp.mutex.lock();
+ defer comp.mutex.unlock();
+ try comp.crt_files.ensureUnusedCapacity(comp.gpa, 1);
+ comp.crt_files.putAssumeCapacityNoClobber(basename, crt_file);
+ }
},
}
}
src/target.zig
@@ -1,4 +1,6 @@
const std = @import("std");
+const assert = std.debug.assert;
+
const Type = @import("Type.zig");
const AddressSpace = std.builtin.AddressSpace;
const Alignment = @import("InternPool.zig").Alignment;
@@ -284,40 +286,17 @@ pub fn hasRedZone(target: std.Target) bool {
pub fn libcFullLinkFlags(target: std.Target) []const []const u8 {
// The linking order of these is significant and should match the order other
// c compilers such as gcc or clang use.
- return switch (target.os.tag) {
- .netbsd, .openbsd => &[_][]const u8{
- "-lm",
- "-lpthread",
- "-lc",
- "-lutil",
- },
- .solaris, .illumos => &[_][]const u8{
- "-lm",
- "-lsocket",
- "-lnsl",
- // Solaris releases after 10 merged the threading libraries into libc.
- "-lc",
- },
- .haiku => &[_][]const u8{
- "-lm",
- "-lroot",
- "-lpthread",
- "-lc",
- "-lnetwork",
- },
- else => if (target.isAndroid() or target.abi.isOpenHarmony()) &[_][]const u8{
- "-lm",
- "-lc",
- "-ldl",
- } else &[_][]const u8{
- "-lm",
- "-lpthread",
- "-lc",
- "-ldl",
- "-lrt",
- "-lutil",
- },
+ const result: []const []const u8 = switch (target.os.tag) {
+ .netbsd, .openbsd => &.{ "-lm", "-lpthread", "-lc", "-lutil" },
+ // Solaris releases after 10 merged the threading libraries into libc.
+ .solaris, .illumos => &.{ "-lm", "-lsocket", "-lnsl", "-lc" },
+ .haiku => &.{ "-lm", "-lroot", "-lpthread", "-lc", "-lnetwork" },
+ else => if (target.isAndroid() or target.abi.isOpenHarmony())
+ &.{ "-lm", "-lc", "-ldl" }
+ else
+ &.{ "-lm", "-lpthread", "-lc", "-ldl", "-lrt", "-lutil" },
};
+ return result;
}
pub fn clangMightShellOutForAssembly(target: std.Target) bool {
src/ThreadSafeQueue.zig
@@ -0,0 +1,63 @@
+const std = @import("std");
+const assert = std.debug.assert;
+const Allocator = std.mem.Allocator;
+
+pub fn ThreadSafeQueue(comptime T: type) type {
+ return struct {
+ worker_owned: std.ArrayListUnmanaged(T),
+ /// Protected by `mutex`.
+ shared: std.ArrayListUnmanaged(T),
+ mutex: std.Thread.Mutex,
+ state: State,
+
+ const Self = @This();
+
+ pub const State = enum { wait, run };
+
+ pub const empty: Self = .{
+ .worker_owned = .empty,
+ .shared = .empty,
+ .mutex = .{},
+ .state = .wait,
+ };
+
+ pub fn deinit(self: *Self, gpa: Allocator) void {
+ self.worker_owned.deinit(gpa);
+ self.shared.deinit(gpa);
+ self.* = undefined;
+ }
+
+ /// Must be called from the worker thread.
+ pub fn check(self: *Self) ?[]T {
+ assert(self.worker_owned.items.len == 0);
+ {
+ self.mutex.lock();
+ defer self.mutex.unlock();
+ assert(self.state == .run);
+ if (self.shared.items.len == 0) {
+ self.state = .wait;
+ return null;
+ }
+ std.mem.swap(std.ArrayListUnmanaged(T), &self.worker_owned, &self.shared);
+ }
+ const result = self.worker_owned.items;
+ self.worker_owned.clearRetainingCapacity();
+ return result;
+ }
+
+ /// Adds items to the queue, returning true if and only if the worker
+ /// thread is waiting. Thread-safe.
+ /// Not safe to call from the worker thread.
+ pub fn enqueue(self: *Self, gpa: Allocator, items: []const T) error{OutOfMemory}!bool {
+ self.mutex.lock();
+ defer self.mutex.unlock();
+ try self.shared.appendSlice(gpa, items);
+ const was_waiting = switch (self.state) {
+ .run => false,
+ .wait => true,
+ };
+ self.state = .run;
+ return was_waiting;
+ }
+ };
+}
CMakeLists.txt
@@ -522,6 +522,7 @@ set(ZIG_STAGE2_SOURCES
src/Sema.zig
src/Sema/bitcast.zig
src/Sema/comptime_ptr_access.zig
+ src/ThreadSafeQueue.zig
src/Type.zig
src/Value.zig
src/Zcu.zig