master
1const std = @import("std");
2const Allocator = std.mem.Allocator;
3const mem = std.mem;
4const log = std.log;
5const fs = std.fs;
6const path = fs.path;
7const assert = std.debug.assert;
8const Version = std.SemanticVersion;
9const Path = std.Build.Cache.Path;
10
11const Compilation = @import("../Compilation.zig");
12const build_options = @import("build_options");
13const trace = @import("../tracy.zig").trace;
14const Cache = std.Build.Cache;
15const Module = @import("../Package/Module.zig");
16const link = @import("../link.zig");
17
18pub const CrtFile = enum {
19 scrt0_o,
20};
21
22pub fn needsCrt0(output_mode: std.builtin.OutputMode) ?CrtFile {
23 // For shared libraries and PIC executables, we should actually link in a variant of crt1 that
24 // is built with `-DSHARED` so that it calls `__cxa_finalize` in an ELF destructor. However, we
25 // currently make no effort to respect `__cxa_finalize` on any other targets, so for now, we're
26 // not doing it here either.
27 //
28 // See: https://github.com/ziglang/zig/issues/23574#issuecomment-2869089897
29 return switch (output_mode) {
30 .Obj, .Lib => null,
31 .Exe => .scrt0_o,
32 };
33}
34
35fn includePath(comp: *Compilation, arena: Allocator, sub_path: []const u8) ![]const u8 {
36 return path.join(arena, &.{
37 comp.dirs.zig_lib.path.?,
38 "libc" ++ path.sep_str ++ "include",
39 sub_path,
40 });
41}
42
43fn csuPath(comp: *Compilation, arena: Allocator, sub_path: []const u8) ![]const u8 {
44 return path.join(arena, &.{
45 comp.dirs.zig_lib.path.?,
46 "libc" ++ path.sep_str ++ "netbsd" ++ path.sep_str ++ "lib" ++ path.sep_str ++ "csu",
47 sub_path,
48 });
49}
50
51/// TODO replace anyerror with explicit error set, recording user-friendly errors with
52/// lockAndSetMiscFailure and returning error.AlreadyReported. see libcxx.zig for example.
53pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progress.Node) anyerror!void {
54 if (!build_options.have_llvm) return error.ZigCompilerNotBuiltWithLLVMExtensions;
55
56 const gpa = comp.gpa;
57 var arena_allocator = std.heap.ArenaAllocator.init(gpa);
58 defer arena_allocator.deinit();
59 const arena = arena_allocator.allocator();
60
61 const target = &comp.root_mod.resolved_target.result;
62 const target_version = target.os.version_range.semver.min;
63
64 // In all cases in this function, we add the C compiler flags to
65 // cache_exempt_flags rather than extra_flags, because these arguments
66 // depend on only properties that are already covered by the cache
67 // manifest. Including these arguments in the cache could only possibly
68 // waste computation and create false negatives.
69
70 switch (crt_file) {
71 .scrt0_o => {
72 var cflags = std.array_list.Managed([]const u8).init(arena);
73 try cflags.appendSlice(&.{
74 "-DHAVE_INITFINI_ARRAY",
75 "-w", // Disable all warnings.
76 });
77
78 var acflags = std.array_list.Managed([]const u8).init(arena);
79 try acflags.appendSlice(&.{
80 // See `Compilation.addCCArgs`.
81 try std.fmt.allocPrint(arena, "-D__NetBSD_Version__={d}", .{(target_version.major * 100_000_000) + (target_version.minor * 1_000_000)}),
82 });
83
84 const csu_march: ?[]const u8 = switch (target.cpu.arch) {
85 .arm => if (target.abi.float() == .hard) "earmhf" else "earm",
86 .armeb => if (target.abi.float() == .hard) "earmhfeb" else "earmeb",
87 else => null,
88 };
89
90 inline for (.{ &cflags, &acflags }) |flags| {
91 if (csu_march) |march| {
92 try flags.appendSlice(&.{
93 try std.fmt.allocPrint(arena, "-DELF_NOTE_MARCH_DESC=\"{s}\"", .{march}),
94 try std.fmt.allocPrint(arena, "-DELF_NOTE_MARCH_DESCSZ={d}", .{march.len + 1}),
95 });
96 }
97
98 try flags.appendSlice(&.{
99 "-I",
100 try includePath(comp, arena, try std.fmt.allocPrint(arena, "{s}-{s}-{s}", .{
101 std.zig.target.netbsdArchNameHeaders(target.cpu.arch),
102 @tagName(target.os.tag),
103 std.zig.target.netbsdAbiNameHeaders(target.abi),
104 })),
105 "-I",
106 try includePath(comp, arena, "generic-netbsd"),
107 "-I",
108 try csuPath(comp, arena, "common"),
109 "-Qunused-arguments",
110 });
111 }
112
113 const sources = [_]struct {
114 path: []const u8,
115 flags: []const []const u8,
116 condition: bool = true,
117 }{
118 .{
119 .path = "common" ++ path.sep_str ++ "crt0-common.c",
120 .flags = cflags.items,
121 },
122 .{
123 .path = "common" ++ path.sep_str ++ "crtbegin.c",
124 .flags = cflags.items,
125 },
126 .{
127 .path = "common" ++ path.sep_str ++ "sysident.S",
128 .flags = acflags.items,
129 },
130
131 .{
132 .path = "arch" ++ path.sep_str ++ "earm" ++ path.sep_str ++ "crt0.S",
133 .flags = acflags.items,
134 .condition = target.cpu.arch.isArm(),
135 },
136
137 .{
138 .path = "arch" ++ path.sep_str ++ "aarch64" ++ path.sep_str ++ "crt0.S",
139 .flags = acflags.items,
140 .condition = target.cpu.arch.isAARCH64(),
141 },
142
143 .{
144 .path = "arch" ++ path.sep_str ++ "m68k" ++ path.sep_str ++ "crt0.S",
145 .flags = acflags.items,
146 .condition = target.cpu.arch == .m68k,
147 },
148
149 .{
150 .path = "arch" ++ path.sep_str ++ "mips" ++ path.sep_str ++ "crt0.S",
151 .flags = acflags.items,
152 .condition = target.cpu.arch.isMIPS(),
153 },
154
155 .{
156 .path = "arch" ++ path.sep_str ++ "powerpc" ++ path.sep_str ++ "crt0.S",
157 .flags = acflags.items,
158 .condition = target.cpu.arch == .powerpc,
159 },
160
161 .{
162 .path = "arch" ++ path.sep_str ++ "sparc" ++ path.sep_str ++ "crt0.S",
163 .flags = acflags.items,
164 .condition = target.cpu.arch == .sparc,
165 },
166
167 .{
168 .path = "arch" ++ path.sep_str ++ "sparc64" ++ path.sep_str ++ "crt0.S",
169 .flags = acflags.items,
170 .condition = target.cpu.arch == .sparc64,
171 },
172
173 .{
174 .path = "arch" ++ path.sep_str ++ "i386" ++ path.sep_str ++ "crt0.S",
175 .flags = acflags.items,
176 .condition = target.cpu.arch == .x86,
177 },
178
179 .{
180 .path = "arch" ++ path.sep_str ++ "x86_64" ++ path.sep_str ++ "crt0.S",
181 .flags = acflags.items,
182 .condition = target.cpu.arch == .x86_64,
183 },
184 };
185
186 var files_buf: [sources.len]Compilation.CSourceFile = undefined;
187 var files_index: usize = 0;
188 for (sources) |file| {
189 if (!file.condition) continue;
190
191 files_buf[files_index] = .{
192 .src_path = try csuPath(comp, arena, file.path),
193 .cache_exempt_flags = file.flags,
194 .owner = undefined,
195 };
196 files_index += 1;
197 }
198 const files = files_buf[0..files_index];
199
200 return comp.build_crt_file("crt0", .Obj, .@"netbsd libc Scrt0.o", prog_node, files, .{
201 .pic = true,
202 });
203 },
204 }
205}
206
207pub const Lib = struct {
208 name: []const u8,
209 sover: u8,
210};
211
212pub const libs = [_]Lib{
213 .{ .name = "m", .sover = 0 },
214 .{ .name = "pthread", .sover = 1 },
215 .{ .name = "c", .sover = 12 },
216 .{ .name = "rt", .sover = 1 },
217 .{ .name = "ld", .sover = 0 },
218 .{ .name = "util", .sover = 7 },
219 .{ .name = "execinfo", .sover = 0 },
220};
221
222pub const ABI = struct {
223 all_versions: []const Version, // all defined versions (one abilist from v2.0.0 up to current)
224 all_targets: []const std.zig.target.ArchOsAbi,
225 /// The bytes from the file verbatim, starting from the u16 number
226 /// of function inclusions.
227 inclusions: []const u8,
228 arena_state: std.heap.ArenaAllocator.State,
229
230 pub fn destroy(abi: *ABI, gpa: Allocator) void {
231 abi.arena_state.promote(gpa).deinit();
232 }
233};
234
235pub const LoadMetaDataError = error{
236 /// The files that ship with the Zig compiler were unable to be read, or otherwise had malformed data.
237 ZigInstallationCorrupt,
238 OutOfMemory,
239};
240
241pub const abilists_path = "libc" ++ path.sep_str ++ "netbsd" ++ path.sep_str ++ "abilists";
242pub const abilists_max_size = 300 * 1024; // Bigger than this and something is definitely borked.
243
244/// This function will emit a log error when there is a problem with the zig
245/// installation and then return `error.ZigInstallationCorrupt`.
246pub fn loadMetaData(gpa: Allocator, contents: []const u8) LoadMetaDataError!*ABI {
247 const tracy = trace(@src());
248 defer tracy.end();
249
250 var arena_allocator = std.heap.ArenaAllocator.init(gpa);
251 errdefer arena_allocator.deinit();
252 const arena = arena_allocator.allocator();
253
254 var index: usize = 0;
255
256 {
257 const libs_len = contents[index];
258 index += 1;
259
260 var i: u8 = 0;
261 while (i < libs_len) : (i += 1) {
262 const lib_name = mem.sliceTo(contents[index..], 0);
263 index += lib_name.len + 1;
264
265 if (i >= libs.len or !mem.eql(u8, libs[i].name, lib_name)) {
266 log.err("libc" ++ path.sep_str ++ "netbsd" ++ path.sep_str ++
267 "abilists: invalid library name or index ({d}): '{s}'", .{ i, lib_name });
268 return error.ZigInstallationCorrupt;
269 }
270 }
271 }
272
273 const versions = b: {
274 const versions_len = contents[index];
275 index += 1;
276
277 const versions = try arena.alloc(Version, versions_len);
278 var i: u8 = 0;
279 while (i < versions.len) : (i += 1) {
280 versions[i] = .{
281 .major = contents[index + 0],
282 .minor = contents[index + 1],
283 .patch = contents[index + 2],
284 };
285 index += 3;
286 }
287 break :b versions;
288 };
289
290 const targets = b: {
291 const targets_len = contents[index];
292 index += 1;
293
294 const targets = try arena.alloc(std.zig.target.ArchOsAbi, targets_len);
295 var i: u8 = 0;
296 while (i < targets.len) : (i += 1) {
297 const target_name = mem.sliceTo(contents[index..], 0);
298 index += target_name.len + 1;
299
300 var component_it = mem.tokenizeScalar(u8, target_name, '-');
301 const arch_name = component_it.next() orelse {
302 log.err("abilists: expected arch name", .{});
303 return error.ZigInstallationCorrupt;
304 };
305 const os_name = component_it.next() orelse {
306 log.err("abilists: expected OS name", .{});
307 return error.ZigInstallationCorrupt;
308 };
309 const abi_name = component_it.next() orelse {
310 log.err("abilists: expected ABI name", .{});
311 return error.ZigInstallationCorrupt;
312 };
313 const arch_tag = std.meta.stringToEnum(std.Target.Cpu.Arch, arch_name) orelse {
314 log.err("abilists: unrecognized arch: '{s}'", .{arch_name});
315 return error.ZigInstallationCorrupt;
316 };
317 if (!mem.eql(u8, os_name, "netbsd")) {
318 log.err("abilists: expected OS 'netbsd', found '{s}'", .{os_name});
319 return error.ZigInstallationCorrupt;
320 }
321 const abi_tag = std.meta.stringToEnum(std.Target.Abi, abi_name) orelse {
322 log.err("abilists: unrecognized ABI: '{s}'", .{abi_name});
323 return error.ZigInstallationCorrupt;
324 };
325
326 targets[i] = .{
327 .arch = arch_tag,
328 .os = .netbsd,
329 .abi = abi_tag,
330 };
331 }
332 break :b targets;
333 };
334
335 const abi = try arena.create(ABI);
336 abi.* = .{
337 .all_versions = versions,
338 .all_targets = targets,
339 .inclusions = contents[index..],
340 .arena_state = arena_allocator.state,
341 };
342 return abi;
343}
344
345pub const BuiltSharedObjects = struct {
346 lock: Cache.Lock,
347 dir_path: Path,
348
349 pub fn deinit(self: *BuiltSharedObjects, gpa: Allocator) void {
350 self.lock.release();
351 gpa.free(self.dir_path.sub_path);
352 self.* = undefined;
353 }
354};
355
356fn wordDirective(target: *const std.Target) []const u8 {
357 // Based on its description in the GNU `as` manual, you might assume that `.word` is sized
358 // according to the target word size. But no; that would just make too much sense.
359 return if (target.ptrBitWidth() == 64) ".quad" else ".long";
360}
361
362/// TODO replace anyerror with explicit error set, recording user-friendly errors with
363/// lockAndSetMiscFailure and returning error.AlreadyReported. see libcxx.zig for example.
364pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anyerror!void {
365 // See also glibc.zig which this code is based on.
366
367 const tracy = trace(@src());
368 defer tracy.end();
369
370 if (!build_options.have_llvm) {
371 return error.ZigCompilerNotBuiltWithLLVMExtensions;
372 }
373
374 const gpa = comp.gpa;
375 const io = comp.io;
376
377 var arena_allocator = std.heap.ArenaAllocator.init(gpa);
378 defer arena_allocator.deinit();
379 const arena = arena_allocator.allocator();
380
381 const target = comp.getTarget();
382 const target_version = target.os.version_range.semver.min;
383
384 // Use the global cache directory.
385 var cache: Cache = .{
386 .gpa = gpa,
387 .io = io,
388 .manifest_dir = try comp.dirs.global_cache.handle.makeOpenPath("h", .{}),
389 };
390 cache.addPrefix(.{ .path = null, .handle = fs.cwd() });
391 cache.addPrefix(comp.dirs.zig_lib);
392 cache.addPrefix(comp.dirs.global_cache);
393 defer cache.manifest_dir.close();
394
395 var man = cache.obtain();
396 defer man.deinit();
397 man.hash.addBytes(build_options.version);
398 man.hash.add(target.cpu.arch);
399 man.hash.add(target.abi);
400 man.hash.add(target_version);
401
402 const full_abilists_path = try comp.dirs.zig_lib.join(arena, &.{abilists_path});
403 const abilists_index = try man.addFile(full_abilists_path, abilists_max_size);
404
405 if (try man.hit()) {
406 const digest = man.final();
407
408 return queueSharedObjects(comp, .{
409 .lock = man.toOwnedLock(),
410 .dir_path = .{
411 .root_dir = comp.dirs.global_cache,
412 .sub_path = try gpa.dupe(u8, "o" ++ fs.path.sep_str ++ digest),
413 },
414 });
415 }
416
417 const digest = man.final();
418 const o_sub_path = try path.join(arena, &[_][]const u8{ "o", &digest });
419
420 var o_directory: Cache.Directory = .{
421 .handle = try comp.dirs.global_cache.handle.makeOpenPath(o_sub_path, .{}),
422 .path = try comp.dirs.global_cache.join(arena, &.{o_sub_path}),
423 };
424 defer o_directory.handle.close();
425
426 const abilists_contents = man.files.keys()[abilists_index].contents.?;
427 const metadata = try loadMetaData(gpa, abilists_contents);
428 defer metadata.destroy(gpa);
429
430 const target_targ_index = for (metadata.all_targets, 0..) |targ, i| {
431 if (targ.arch == target.cpu.arch and
432 targ.os == target.os.tag and
433 targ.abi == target.abi)
434 {
435 break i;
436 }
437 } else {
438 unreachable; // std.zig.target.available_libcs prevents us from getting here
439 };
440
441 const target_ver_index = for (metadata.all_versions, 0..) |ver, i| {
442 switch (ver.order(target_version)) {
443 .eq => break i,
444 .lt => continue,
445 .gt => {
446 // TODO Expose via compile error mechanism instead of log.
447 log.warn("invalid target NetBSD libc version: {f}", .{target_version});
448 return error.InvalidTargetLibCVersion;
449 },
450 }
451 } else blk: {
452 const latest_index = metadata.all_versions.len - 1;
453 log.warn("zig cannot build new NetBSD libc version {f}; providing instead {f}", .{
454 target_version, metadata.all_versions[latest_index],
455 });
456 break :blk latest_index;
457 };
458
459 var stubs_asm = std.array_list.Managed(u8).init(gpa);
460 defer stubs_asm.deinit();
461
462 for (libs, 0..) |lib, lib_i| {
463 stubs_asm.shrinkRetainingCapacity(0);
464
465 try stubs_asm.appendSlice(".text\n");
466
467 var sym_i: usize = 0;
468 var sym_name_buf: std.Io.Writer.Allocating = .init(arena);
469 var opt_symbol_name: ?[]const u8 = null;
470
471 var inc_reader: std.Io.Reader = .fixed(metadata.inclusions);
472
473 const fn_inclusions_len = try inc_reader.takeInt(u16, .little);
474
475 var chosen_ver_index: usize = 255;
476 var chosen_is_weak: bool = undefined;
477
478 while (sym_i < fn_inclusions_len) : (sym_i += 1) {
479 const sym_name = opt_symbol_name orelse n: {
480 sym_name_buf.clearRetainingCapacity();
481 _ = try inc_reader.streamDelimiter(&sym_name_buf.writer, 0);
482 assert(inc_reader.buffered()[0] == 0); // TODO change streamDelimiter API
483 inc_reader.toss(1);
484
485 opt_symbol_name = sym_name_buf.written();
486 chosen_ver_index = 255;
487
488 break :n sym_name_buf.written();
489 };
490
491 {
492 const targets = try inc_reader.takeLeb128(u64);
493 var lib_index = try inc_reader.takeByte();
494
495 const is_weak = (lib_index & (1 << 6)) != 0;
496 const is_terminal = (lib_index & (1 << 7)) != 0;
497
498 lib_index = @as(u5, @truncate(lib_index));
499
500 // Test whether the inclusion applies to our current library and target.
501 const ok_lib_and_target =
502 (lib_index == lib_i) and
503 ((targets & (@as(u64, 1) << @as(u6, @intCast(target_targ_index)))) != 0);
504
505 while (true) {
506 const byte = try inc_reader.takeByte();
507 const last = (byte & 0b1000_0000) != 0;
508 const ver_i = @as(u7, @truncate(byte));
509 if (ok_lib_and_target and ver_i <= target_ver_index and
510 (chosen_ver_index == 255 or ver_i > chosen_ver_index))
511 {
512 chosen_ver_index = ver_i;
513 chosen_is_weak = is_weak;
514 }
515 if (last) break;
516 }
517
518 if (is_terminal) {
519 opt_symbol_name = null;
520 } else continue;
521 }
522
523 if (chosen_ver_index != 255) {
524 // Example:
525 // .balign 4
526 // .globl _Exit
527 // .type _Exit, %function
528 // _Exit: .long 0
529 try stubs_asm.print(
530 \\.balign {d}
531 \\.{s} {s}
532 \\.type {s}, %function
533 \\{s}: {s} 0
534 \\
535 , .{
536 target.ptrBitWidth() / 8,
537 if (chosen_is_weak) "weak" else "globl",
538 sym_name,
539 sym_name,
540 sym_name,
541 wordDirective(target),
542 });
543 }
544 }
545
546 try stubs_asm.appendSlice(".data\n");
547
548 const obj_inclusions_len = try inc_reader.takeInt(u16, .little);
549
550 sym_i = 0;
551 opt_symbol_name = null;
552
553 var chosen_size: u16 = undefined;
554
555 while (sym_i < obj_inclusions_len) : (sym_i += 1) {
556 const sym_name = opt_symbol_name orelse n: {
557 sym_name_buf.clearRetainingCapacity();
558 _ = try inc_reader.streamDelimiter(&sym_name_buf.writer, 0);
559 assert(inc_reader.buffered()[0] == 0); // TODO change streamDelimiter API
560 inc_reader.toss(1);
561
562 opt_symbol_name = sym_name_buf.written();
563 chosen_ver_index = 255;
564
565 break :n sym_name_buf.written();
566 };
567
568 {
569 const targets = try inc_reader.takeLeb128(u64);
570 const size = try inc_reader.takeLeb128(u16);
571 var lib_index = try inc_reader.takeByte();
572
573 const is_weak = (lib_index & (1 << 6)) != 0;
574 const is_terminal = (lib_index & (1 << 7)) != 0;
575
576 lib_index = @as(u5, @truncate(lib_index));
577
578 // Test whether the inclusion applies to our current library and target.
579 const ok_lib_and_target =
580 (lib_index == lib_i) and
581 ((targets & (@as(u64, 1) << @as(u6, @intCast(target_targ_index)))) != 0);
582
583 while (true) {
584 const byte = try inc_reader.takeByte();
585 const last = (byte & 0b1000_0000) != 0;
586 const ver_i = @as(u7, @truncate(byte));
587 if (ok_lib_and_target and ver_i <= target_ver_index and
588 (chosen_ver_index == 255 or ver_i > chosen_ver_index))
589 {
590 chosen_ver_index = ver_i;
591 chosen_size = size;
592 chosen_is_weak = is_weak;
593 }
594 if (last) break;
595 }
596
597 if (is_terminal) {
598 opt_symbol_name = null;
599 } else continue;
600 }
601
602 if (chosen_ver_index != 255) {
603 // Example:
604 // .balign 4
605 // .globl malloc_conf
606 // .type malloc_conf, %object
607 // .size malloc_conf, 4
608 // malloc_conf: .fill 4, 1, 0
609 try stubs_asm.print(
610 \\.balign {d}
611 \\.{s} {s}
612 \\.type {s}, %object
613 \\.size {s}, {d}
614 \\{s}: {s} 0
615 \\
616 , .{
617 target.ptrBitWidth() / 8,
618 if (chosen_is_weak) "weak" else "globl",
619 sym_name,
620 sym_name,
621 sym_name,
622 chosen_size,
623 sym_name,
624 wordDirective(target),
625 });
626 }
627 }
628
629 var lib_name_buf: [32]u8 = undefined; // Larger than each of the names "c", "pthread", etc.
630 const asm_file_basename = std.fmt.bufPrint(&lib_name_buf, "{s}.s", .{lib.name}) catch unreachable;
631 try o_directory.handle.writeFile(.{ .sub_path = asm_file_basename, .data = stubs_asm.items });
632 try buildSharedLib(comp, arena, o_directory, asm_file_basename, lib, prog_node);
633 }
634
635 man.writeManifest() catch |err| {
636 log.warn("failed to write cache manifest for NetBSD libc stubs: {s}", .{@errorName(err)});
637 };
638
639 return queueSharedObjects(comp, .{
640 .lock = man.toOwnedLock(),
641 .dir_path = .{
642 .root_dir = comp.dirs.global_cache,
643 .sub_path = try gpa.dupe(u8, "o" ++ fs.path.sep_str ++ digest),
644 },
645 });
646}
647
648fn queueSharedObjects(comp: *Compilation, so_files: BuiltSharedObjects) void {
649 assert(comp.netbsd_so_files == null);
650 comp.netbsd_so_files = so_files;
651
652 var task_buffer: [libs.len]link.PrelinkTask = undefined;
653 var task_buffer_i: usize = 0;
654
655 {
656 comp.mutex.lock(); // protect comp.arena
657 defer comp.mutex.unlock();
658
659 for (libs) |lib| {
660 const so_path: Path = .{
661 .root_dir = so_files.dir_path.root_dir,
662 .sub_path = std.fmt.allocPrint(comp.arena, "{s}{c}lib{s}.so.{d}", .{
663 so_files.dir_path.sub_path, fs.path.sep, lib.name, lib.sover,
664 }) catch return comp.setAllocFailure(),
665 };
666 task_buffer[task_buffer_i] = .{ .load_dso = so_path };
667 task_buffer_i += 1;
668 }
669 }
670
671 comp.queuePrelinkTasks(task_buffer[0..task_buffer_i]);
672}
673
674fn buildSharedLib(
675 comp: *Compilation,
676 arena: Allocator,
677 bin_directory: Cache.Directory,
678 asm_file_basename: []const u8,
679 lib: Lib,
680 prog_node: std.Progress.Node,
681) !void {
682 const tracy = trace(@src());
683 defer tracy.end();
684
685 const io = comp.io;
686 const basename = try std.fmt.allocPrint(arena, "lib{s}.so.{d}", .{ lib.name, lib.sover });
687 const version: Version = .{ .major = lib.sover, .minor = 0, .patch = 0 };
688 const ld_basename = path.basename(comp.getTarget().standardDynamicLinkerPath().get().?);
689 const soname = if (mem.eql(u8, lib.name, "ld")) ld_basename else basename;
690
691 const optimize_mode = comp.compilerRtOptMode();
692 const strip = comp.compilerRtStrip();
693 const config = try Compilation.Config.resolve(.{
694 .output_mode = .Lib,
695 .link_mode = .dynamic,
696 .resolved_target = comp.root_mod.resolved_target,
697 .is_test = false,
698 .have_zcu = false,
699 .emit_bin = true,
700 .root_optimize_mode = optimize_mode,
701 .root_strip = strip,
702 .link_libc = false,
703 });
704
705 const root_mod = try Module.create(arena, .{
706 .paths = .{
707 .root = .zig_lib_root,
708 .root_src_path = "",
709 },
710 .fully_qualified_name = "root",
711 .inherited = .{
712 .resolved_target = comp.root_mod.resolved_target,
713 .strip = strip,
714 .stack_check = false,
715 .stack_protector = 0,
716 .sanitize_c = .off,
717 .sanitize_thread = false,
718 .red_zone = comp.root_mod.red_zone,
719 .omit_frame_pointer = comp.root_mod.omit_frame_pointer,
720 .valgrind = false,
721 .optimize_mode = optimize_mode,
722 .structured_cfg = comp.root_mod.structured_cfg,
723 },
724 .global = config,
725 .cc_argv = &.{},
726 .parent = null,
727 });
728
729 const c_source_files = [1]Compilation.CSourceFile{
730 .{
731 .src_path = try path.join(arena, &.{ bin_directory.path.?, asm_file_basename }),
732 .owner = root_mod,
733 },
734 };
735
736 const misc_task: Compilation.MiscTask = .@"netbsd libc shared object";
737
738 var sub_create_diag: Compilation.CreateDiagnostic = undefined;
739 const sub_compilation = Compilation.create(comp.gpa, arena, io, &sub_create_diag, .{
740 .dirs = comp.dirs.withoutLocalCache(),
741 .thread_pool = comp.thread_pool,
742 .self_exe_path = comp.self_exe_path,
743 // Because we manually cache the whole set of objects, we don't cache the individual objects
744 // within it. In fact, we *can't* do that, because we need `emit_bin` to specify the path.
745 .cache_mode = .none,
746 .config = config,
747 .root_mod = root_mod,
748 .root_name = lib.name,
749 .libc_installation = comp.libc_installation,
750 .emit_bin = .{ .yes_path = try bin_directory.join(arena, &.{basename}) },
751 .verbose_cc = comp.verbose_cc,
752 .verbose_link = comp.verbose_link,
753 .verbose_air = comp.verbose_air,
754 .verbose_llvm_ir = comp.verbose_llvm_ir,
755 .verbose_llvm_bc = comp.verbose_llvm_bc,
756 .verbose_cimport = comp.verbose_cimport,
757 .verbose_llvm_cpu_features = comp.verbose_llvm_cpu_features,
758 .clang_passthrough_mode = comp.clang_passthrough_mode,
759 .version = version,
760 .soname = soname,
761 .c_source_files = &c_source_files,
762 .skip_linker_dependencies = true,
763 }) catch |err| switch (err) {
764 error.CreateFail => {
765 comp.lockAndSetMiscFailure(misc_task, "sub-compilation of {t} failed: {f}", .{ misc_task, sub_create_diag });
766 return error.AlreadyReported;
767 },
768 else => |e| return e,
769 };
770 defer sub_compilation.destroy();
771
772 try comp.updateSubCompilation(sub_compilation, misc_task, prog_node);
773}