Commit 95941c4e70
Changed files (5)
ci/azure/windows_mingw_script
@@ -18,4 +18,8 @@ cmake .. -G 'MSYS Makefiles' -DCMAKE_BUILD_TYPE=RelWithDebInfo $CMAKEFLAGS -DCMA
make -j$(nproc) install
+# I saw a failure due to `git diff` being > 400 KB instead of empty as expected so this is to debug it.
+git status
+git diff | head -n100
+
./zig build test-behavior -Dskip-non-native -Dskip-release
src-self-hosted/Cache.zig
@@ -26,9 +26,9 @@ pub fn obtain(cache: *const Cache) CacheHash {
pub const base64_encoder = fs.base64_encoder;
pub const base64_decoder = fs.base64_decoder;
-/// 16 would be 128 bits - Even with 2^54 cache entries, the probably of a collision would be under 10^-6
-/// We round up to 18 to avoid the `==` padding after base64 encoding.
-pub const BIN_DIGEST_LEN = 18;
+/// This is 128 bits - Even with 2^54 cache entries, the probably of a collision would be under 10^-6
+/// Currently we use SipHash and so this value must be 16 not any higher.
+pub const BIN_DIGEST_LEN = 16;
pub const BASE64_DIGEST_LEN = base64.Base64Encoder.calcSize(BIN_DIGEST_LEN);
const MANIFEST_FILE_SIZE_MAX = 50 * 1024 * 1024;
@@ -87,14 +87,29 @@ pub const HashHelper = struct {
hh.add(x.major);
hh.add(x.minor);
hh.add(x.patch);
- return;
},
- else => {},
- }
-
- switch (@typeInfo(@TypeOf(x))) {
- .Bool, .Int, .Enum, .Array => hh.addBytes(mem.asBytes(&x)),
- else => @compileError("unable to hash type " ++ @typeName(@TypeOf(x))),
+ std.Target.Os.TaggedVersionRange => {
+ switch (x) {
+ .linux => |linux| {
+ hh.add(linux.range.min);
+ hh.add(linux.range.max);
+ hh.add(linux.glibc);
+ },
+ .windows => |windows| {
+ hh.add(windows.min);
+ hh.add(windows.max);
+ },
+ .semver => |semver| {
+ hh.add(semver.min);
+ hh.add(semver.max);
+ },
+ .none => {},
+ }
+ },
+ else => switch (@typeInfo(@TypeOf(x))) {
+ .Bool, .Int, .Enum, .Array => hh.addBytes(mem.asBytes(&x)),
+ else => @compileError("unable to hash type " ++ @typeName(@TypeOf(x))),
+ },
}
}
@@ -613,44 +628,46 @@ test "cache file and then recall it" {
var digest1: [BASE64_DIGEST_LEN]u8 = undefined;
var digest2: [BASE64_DIGEST_LEN]u8 = undefined;
- var cache = Cache{
- .gpa = testing.allocator,
- .manifest_dir = try cwd.makeOpenPath(temp_manifest_dir, .{}),
- };
- defer cache.manifest_dir.close();
-
{
- var ch = cache.obtain();
- defer ch.deinit();
+ var cache = Cache{
+ .gpa = testing.allocator,
+ .manifest_dir = try cwd.makeOpenPath(temp_manifest_dir, .{}),
+ };
+ defer cache.manifest_dir.close();
- ch.hash.add(true);
- ch.hash.add(@as(u16, 1234));
- ch.hash.addBytes("1234");
- _ = try ch.addFile(temp_file, null);
+ {
+ var ch = cache.obtain();
+ defer ch.deinit();
- // There should be nothing in the cache
- testing.expectEqual(false, try ch.hit());
+ ch.hash.add(true);
+ ch.hash.add(@as(u16, 1234));
+ ch.hash.addBytes("1234");
+ _ = try ch.addFile(temp_file, null);
- digest1 = ch.final();
- try ch.writeManifest();
- }
- {
- var ch = cache.obtain();
- defer ch.deinit();
+ // There should be nothing in the cache
+ testing.expectEqual(false, try ch.hit());
- ch.hash.add(true);
- ch.hash.add(@as(u16, 1234));
- ch.hash.addBytes("1234");
- _ = try ch.addFile(temp_file, null);
+ digest1 = ch.final();
+ try ch.writeManifest();
+ }
+ {
+ var ch = cache.obtain();
+ defer ch.deinit();
- // Cache hit! We just "built" the same file
- testing.expect(try ch.hit());
- digest2 = ch.final();
+ ch.hash.add(true);
+ ch.hash.add(@as(u16, 1234));
+ ch.hash.addBytes("1234");
+ _ = try ch.addFile(temp_file, null);
- try ch.writeManifest();
- }
+ // Cache hit! We just "built" the same file
+ testing.expect(try ch.hit());
+ digest2 = ch.final();
- testing.expectEqual(digest1, digest2);
+ try ch.writeManifest();
+ }
+
+ testing.expectEqual(digest1, digest2);
+ }
try cwd.deleteTree(temp_manifest_dir);
try cwd.deleteFile(temp_file);
@@ -693,50 +710,52 @@ test "check that changing a file makes cache fail" {
var digest1: [BASE64_DIGEST_LEN]u8 = undefined;
var digest2: [BASE64_DIGEST_LEN]u8 = undefined;
- var cache = Cache{
- .gpa = testing.allocator,
- .manifest_dir = try cwd.makeOpenPath(temp_manifest_dir, .{}),
- };
- defer cache.manifest_dir.close();
-
{
- var ch = cache.obtain();
- defer ch.deinit();
+ var cache = Cache{
+ .gpa = testing.allocator,
+ .manifest_dir = try cwd.makeOpenPath(temp_manifest_dir, .{}),
+ };
+ defer cache.manifest_dir.close();
- ch.hash.addBytes("1234");
- const temp_file_idx = try ch.addFile(temp_file, 100);
+ {
+ var ch = cache.obtain();
+ defer ch.deinit();
- // There should be nothing in the cache
- testing.expectEqual(false, try ch.hit());
+ ch.hash.addBytes("1234");
+ const temp_file_idx = try ch.addFile(temp_file, 100);
- testing.expect(mem.eql(u8, original_temp_file_contents, ch.files.items[temp_file_idx].contents.?));
+ // There should be nothing in the cache
+ testing.expectEqual(false, try ch.hit());
- digest1 = ch.final();
+ testing.expect(mem.eql(u8, original_temp_file_contents, ch.files.items[temp_file_idx].contents.?));
- try ch.writeManifest();
- }
+ digest1 = ch.final();
- try cwd.writeFile(temp_file, updated_temp_file_contents);
+ try ch.writeManifest();
+ }
- {
- var ch = cache.obtain();
- defer ch.deinit();
+ try cwd.writeFile(temp_file, updated_temp_file_contents);
- ch.hash.addBytes("1234");
- const temp_file_idx = try ch.addFile(temp_file, 100);
+ {
+ var ch = cache.obtain();
+ defer ch.deinit();
- // A file that we depend on has been updated, so the cache should not contain an entry for it
- testing.expectEqual(false, try ch.hit());
+ ch.hash.addBytes("1234");
+ const temp_file_idx = try ch.addFile(temp_file, 100);
- // The cache system does not keep the contents of re-hashed input files.
- testing.expect(ch.files.items[temp_file_idx].contents == null);
+ // A file that we depend on has been updated, so the cache should not contain an entry for it
+ testing.expectEqual(false, try ch.hit());
- digest2 = ch.final();
+ // The cache system does not keep the contents of re-hashed input files.
+ testing.expect(ch.files.items[temp_file_idx].contents == null);
- try ch.writeManifest();
- }
+ digest2 = ch.final();
+
+ try ch.writeManifest();
+ }
- testing.expect(!mem.eql(u8, digest1[0..], digest2[0..]));
+ testing.expect(!mem.eql(u8, digest1[0..], digest2[0..]));
+ }
try cwd.deleteTree(temp_manifest_dir);
try cwd.deleteTree(temp_file);
@@ -749,7 +768,7 @@ test "no file inputs" {
}
const cwd = fs.cwd();
const temp_manifest_dir = "no_file_inputs_manifest_dir";
- defer cwd.deleteTree(temp_manifest_dir) catch unreachable;
+ defer cwd.deleteTree(temp_manifest_dir) catch {};
var digest1: [BASE64_DIGEST_LEN]u8 = undefined;
var digest2: [BASE64_DIGEST_LEN]u8 = undefined;
@@ -810,67 +829,69 @@ test "CacheHashes with files added after initial hash work" {
var digest2: [BASE64_DIGEST_LEN]u8 = undefined;
var digest3: [BASE64_DIGEST_LEN]u8 = undefined;
- var cache = Cache{
- .gpa = testing.allocator,
- .manifest_dir = try cwd.makeOpenPath(temp_manifest_dir, .{}),
- };
- defer cache.manifest_dir.close();
-
{
- var ch = cache.obtain();
- defer ch.deinit();
+ var cache = Cache{
+ .gpa = testing.allocator,
+ .manifest_dir = try cwd.makeOpenPath(temp_manifest_dir, .{}),
+ };
+ defer cache.manifest_dir.close();
- ch.hash.addBytes("1234");
- _ = try ch.addFile(temp_file1, null);
+ {
+ var ch = cache.obtain();
+ defer ch.deinit();
- // There should be nothing in the cache
- testing.expectEqual(false, try ch.hit());
+ ch.hash.addBytes("1234");
+ _ = try ch.addFile(temp_file1, null);
- _ = try ch.addFilePost(temp_file2);
+ // There should be nothing in the cache
+ testing.expectEqual(false, try ch.hit());
- digest1 = ch.final();
- try ch.writeManifest();
- }
- {
- var ch = cache.obtain();
- defer ch.deinit();
+ _ = try ch.addFilePost(temp_file2);
- ch.hash.addBytes("1234");
- _ = try ch.addFile(temp_file1, null);
+ digest1 = ch.final();
+ try ch.writeManifest();
+ }
+ {
+ var ch = cache.obtain();
+ defer ch.deinit();
- testing.expect(try ch.hit());
- digest2 = ch.final();
+ ch.hash.addBytes("1234");
+ _ = try ch.addFile(temp_file1, null);
- try ch.writeManifest();
- }
- testing.expect(mem.eql(u8, &digest1, &digest2));
+ testing.expect(try ch.hit());
+ digest2 = ch.final();
- // Modify the file added after initial hash
- const ts2 = std.time.nanoTimestamp();
- try cwd.writeFile(temp_file2, "Hello world the second, updated\n");
+ try ch.writeManifest();
+ }
+ testing.expect(mem.eql(u8, &digest1, &digest2));
- while (isProblematicTimestamp(ts2)) {
- std.time.sleep(1);
- }
+ // Modify the file added after initial hash
+ const ts2 = std.time.nanoTimestamp();
+ try cwd.writeFile(temp_file2, "Hello world the second, updated\n");
- {
- var ch = cache.obtain();
- defer ch.deinit();
+ while (isProblematicTimestamp(ts2)) {
+ std.time.sleep(1);
+ }
- ch.hash.addBytes("1234");
- _ = try ch.addFile(temp_file1, null);
+ {
+ var ch = cache.obtain();
+ defer ch.deinit();
- // A file that we depend on has been updated, so the cache should not contain an entry for it
- testing.expectEqual(false, try ch.hit());
+ ch.hash.addBytes("1234");
+ _ = try ch.addFile(temp_file1, null);
- _ = try ch.addFilePost(temp_file2);
+ // A file that we depend on has been updated, so the cache should not contain an entry for it
+ testing.expectEqual(false, try ch.hit());
- digest3 = ch.final();
+ _ = try ch.addFilePost(temp_file2);
- try ch.writeManifest();
- }
+ digest3 = ch.final();
- testing.expect(!mem.eql(u8, &digest1, &digest3));
+ try ch.writeManifest();
+ }
+
+ testing.expect(!mem.eql(u8, &digest1, &digest3));
+ }
try cwd.deleteTree(temp_manifest_dir);
try cwd.deleteFile(temp_file1);
src-self-hosted/Compilation.zig
@@ -68,7 +68,9 @@ libunwind_static_lib: ?[]const u8 = null,
/// and resolved before calling linker.flush().
libc_static_lib: ?[]const u8 = null,
-/// For example `Scrt1.o` and `libc.so.6`. These are populated after building libc from source,
+glibc_so_files: ?glibc.BuiltSharedObjects = null,
+
+/// For example `Scrt1.o` and `libc_nonshared.a`. These are populated after building libc from source,
/// The set of needed CRT (C runtime) files differs depending on the target and compilation settings.
/// The key is the basename, and the value is the absolute path to the completed build artifact.
crt_files: std.StringHashMapUnmanaged(CRTFile) = .{},
@@ -111,8 +113,8 @@ const WorkItem = union(enum) {
/// one of the glibc static objects
glibc_crt_file: glibc.CRTFile,
- /// one of the glibc shared objects
- glibc_so: *const glibc.Lib,
+ /// all of the glibc shared objects
+ glibc_shared_objects,
};
pub const CObject = struct {
@@ -272,6 +274,9 @@ pub const InitOptions = struct {
version: ?std.builtin.Version = null,
libc_installation: ?*const LibCInstallation = null,
machine_code_model: std.builtin.CodeModel = .default,
+ /// TODO Once self-hosted Zig is capable enough, we can remove this special-case
+ /// hack in favor of more general compilation options.
+ stage1_is_dummy_so: bool = false,
};
pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
@@ -421,6 +426,7 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
cache.hash.addBytes(options.target.cpu.model.name);
cache.hash.add(options.target.cpu.features.ints);
cache.hash.add(options.target.os.tag);
+ cache.hash.add(options.is_native_os);
cache.hash.add(options.target.abi);
cache.hash.add(ofmt);
cache.hash.add(pic);
@@ -446,22 +452,7 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
hash.addOptionalBytes(root_pkg.root_src_directory.path);
hash.add(valgrind);
hash.add(single_threaded);
- switch (options.target.os.getVersionRange()) {
- .linux => |linux| {
- hash.add(linux.range.min);
- hash.add(linux.range.max);
- hash.add(linux.glibc);
- },
- .windows => |windows| {
- hash.add(windows.min);
- hash.add(windows.max);
- },
- .semver => |semver| {
- hash.add(semver.min);
- hash.add(semver.max);
- },
- .none => {},
- }
+ hash.add(options.target.os.getVersionRange());
const digest = hash.final();
const artifact_sub_dir = try std.fs.path.join(arena, &[_][]const u8{ "o", &digest });
@@ -660,7 +651,6 @@ pub fn destroy(self: *Compilation) void {
{
var it = self.crt_files.iterator();
while (it.next()) |entry| {
- gpa.free(entry.key);
entry.value.deinit(gpa);
}
self.crt_files.deinit(gpa);
@@ -936,14 +926,15 @@ pub fn performAllTheWork(self: *Compilation) error{OutOfMemory}!void {
},
.glibc_crt_file => |crt_file| {
glibc.buildCRTFile(self, crt_file) catch |err| {
- // This is a problem with the Zig installation. It's mostly OK to crash here,
- // but TODO because it would be even better if we could recover gracefully
- // from temporary problems such as out-of-disk-space.
+ // TODO Expose this as a normal compile error rather than crashing here.
fatal("unable to build glibc CRT file: {}", .{@errorName(err)});
};
},
- .glibc_so => |glibc_lib| {
- fatal("TODO build glibc shared object '{}.so.{}'", .{ glibc_lib.name, glibc_lib.sover });
+ .glibc_shared_objects => {
+ glibc.buildSharedObjects(self) catch |err| {
+ // TODO Expose this as a normal compile error rather than crashing here.
+ fatal("unable to build glibc shared objects: {}", .{@errorName(err)});
+ };
},
};
}
@@ -1587,17 +1578,13 @@ pub fn get_libc_crt_file(comp: *Compilation, arena: *Allocator, basename: []cons
}
fn addBuildingGLibCWorkItems(comp: *Compilation) !void {
- const static_file_work_items = [_]WorkItem{
+ try comp.work_queue.write(&[_]WorkItem{
.{ .glibc_crt_file = .crti_o },
.{ .glibc_crt_file = .crtn_o },
.{ .glibc_crt_file = .scrt1_o },
.{ .glibc_crt_file = .libc_nonshared_a },
- };
- try comp.work_queue.ensureUnusedCapacity(static_file_work_items.len + glibc.libs.len);
- comp.work_queue.writeAssumeCapacity(&static_file_work_items);
- for (glibc.libs) |*glibc_so| {
- comp.work_queue.writeItemAssumeCapacity(.{ .glibc_so = glibc_so });
- }
+ .{ .glibc_shared_objects = {} },
+ });
}
fn wantBuildGLibCFromSource(comp: *Compilation) bool {
src-self-hosted/glibc.zig
@@ -1,11 +1,15 @@
const std = @import("std");
const Allocator = std.mem.Allocator;
-const target_util = @import("target.zig");
const mem = std.mem;
-const Compilation = @import("Compilation.zig");
const path = std.fs.path;
+const assert = std.debug.assert;
+
+const target_util = @import("target.zig");
+const Compilation = @import("Compilation.zig");
const build_options = @import("build_options");
const trace = @import("tracy.zig").trace;
+const Cache = @import("Cache.zig");
+const Package = @import("Package.zig");
pub const Lib = struct {
name: []const u8,
@@ -83,14 +87,14 @@ pub fn loadMetaData(gpa: *Allocator, zig_lib_dir: std.fs.Dir) LoadMetaDataError!
};
defer gpa.free(vers_txt_contents);
- const fns_txt_contents = glibc_dir.readFileAlloc(gpa, "fns.txt", max_txt_size) catch |err| switch (err) {
+ // Arena allocated because the result contains references to function names.
+ const fns_txt_contents = glibc_dir.readFileAlloc(arena, "fns.txt", max_txt_size) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
else => {
std.log.err("unable to read fns.txt: {}", .{@errorName(err)});
return error.ZigInstallationCorrupt;
},
};
- defer gpa.free(fns_txt_contents);
const abi_txt_contents = glibc_dir.readFileAlloc(gpa, "abi.txt", max_txt_size) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
@@ -183,7 +187,7 @@ pub fn loadMetaData(gpa: *Allocator, zig_lib_dir: std.fs.Dir) LoadMetaDataError!
.os = .linux,
.abi = abi_tag,
};
- try version_table.put(arena, triple, ver_list_base.ptr);
+ try version_table.put(gpa, triple, ver_list_base.ptr);
}
break :blk ver_list_base;
};
@@ -250,7 +254,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile) !void {
}
const gpa = comp.gpa;
var arena_allocator = std.heap.ArenaAllocator.init(gpa);
- errdefer arena_allocator.deinit();
+ defer arena_allocator.deinit();
const arena = &arena_allocator.allocator;
switch (crt_file) {
@@ -713,6 +717,252 @@ fn build_crt_file(
});
defer sub_compilation.destroy();
+ try updateSubCompilation(sub_compilation);
+
+ try comp.crt_files.ensureCapacity(comp.gpa, comp.crt_files.count() + 1);
+ const artifact_path = if (sub_compilation.bin_file.options.directory.path) |p|
+ try path.join(comp.gpa, &[_][]const u8{ p, basename })
+ else
+ try comp.gpa.dupe(u8, basename);
+
+ comp.crt_files.putAssumeCapacityNoClobber(basename, .{
+ .full_object_path = artifact_path,
+ .lock = sub_compilation.bin_file.toOwnedLock(),
+ });
+}
+
+pub const BuiltSharedObjects = struct {
+ lock: Cache.Lock,
+ dir_path: []u8,
+
+ pub fn deinit(self: *BuiltSharedObjects, gpa: *Allocator) void {
+ self.lock.release();
+ gpa.free(self.dir_path);
+ self.* = undefined;
+ }
+};
+
+const all_map_basename = "all.map";
+
+pub fn buildSharedObjects(comp: *Compilation) !void {
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
+ defer arena_allocator.deinit();
+ const arena = &arena_allocator.allocator;
+
+ const target = comp.getTarget();
+ const target_version = target.os.version_range.linux.glibc;
+
+ // TODO use the global cache directory here
+ var cache_parent: Cache = .{
+ .gpa = comp.gpa,
+ .manifest_dir = comp.cache_parent.manifest_dir,
+ };
+ var cache = cache_parent.obtain();
+ defer cache.deinit();
+ cache.hash.addBytes(build_options.version);
+ cache.hash.addBytes(comp.zig_lib_directory.path orelse ".");
+ cache.hash.add(target.cpu.arch);
+ cache.hash.addBytes(target.cpu.model.name);
+ cache.hash.add(target.cpu.features.ints);
+ cache.hash.add(target.abi);
+ cache.hash.add(target_version);
+
+ const hit = try cache.hit();
+ const digest = cache.final();
+ const o_sub_path = try path.join(arena, &[_][]const u8{ "o", &digest });
+ if (!hit) {
+ var o_directory: Compilation.Directory = .{
+ .handle = try comp.zig_cache_directory.handle.makeOpenPath(o_sub_path, .{}),
+ .path = try path.join(arena, &[_][]const u8{ comp.zig_cache_directory.path.?, o_sub_path }),
+ };
+ defer o_directory.handle.close();
+
+ const metadata = try loadMetaData(comp.gpa, comp.zig_lib_directory.handle);
+ defer metadata.destroy(comp.gpa);
+
+ const ver_list_base = metadata.version_table.get(.{
+ .arch = target.cpu.arch,
+ .os = target.os.tag,
+ .abi = target.abi,
+ }) orelse return error.GLibCUnavailableForThisTarget;
+ const target_ver_index = for (metadata.all_versions) |ver, i| {
+ switch (ver.order(target_version)) {
+ .eq => break i,
+ .lt => continue,
+ .gt => {
+ // TODO Expose via compile error mechanism instead of log.
+ std.log.warn("invalid target glibc version: {}", .{target_version});
+ return error.InvalidTargetGLibCVersion;
+ },
+ }
+ } else blk: {
+ const latest_index = metadata.all_versions.len - 1;
+ std.log.warn("zig cannot build new glibc version {}; providing instead {}", .{
+ target_version, metadata.all_versions[latest_index],
+ });
+ break :blk latest_index;
+ };
+ {
+ var map_contents = std.ArrayList(u8).init(arena);
+ for (metadata.all_versions) |ver| {
+ if (ver.patch == 0) {
+ try map_contents.writer().print("GLIBC_{d}.{d} {{ }};\n", .{ ver.major, ver.minor });
+ } else {
+ try map_contents.writer().print("GLIBC_{d}.{d}.{d} {{ }};\n", .{ ver.major, ver.minor, ver.patch });
+ }
+ }
+ try o_directory.handle.writeFile(all_map_basename, map_contents.items);
+ map_contents.deinit(); // The most recent allocation of an arena can be freed :)
+ }
+ var zig_body = std.ArrayList(u8).init(comp.gpa);
+ defer zig_body.deinit();
+ var zig_footer = std.ArrayList(u8).init(comp.gpa);
+ defer zig_footer.deinit();
+ for (libs) |*lib| {
+ zig_body.shrinkRetainingCapacity(0);
+ zig_footer.shrinkRetainingCapacity(0);
+
+ try zig_body.appendSlice(
+ \\comptime {
+ \\ asm (
+ \\
+ );
+ for (metadata.all_functions) |*libc_fn, fn_i| {
+ if (libc_fn.lib != lib) continue;
+
+ const ver_list = ver_list_base[fn_i];
+ // Pick the default symbol version:
+ // - If there are no versions, don't emit it
+ // - Take the greatest one <= than the target one
+ // - If none of them is <= than the
+ // specified one don't pick any default version
+ if (ver_list.len == 0) continue;
+ var chosen_def_ver_index: u8 = 255;
+ {
+ var ver_i: u8 = 0;
+ while (ver_i < ver_list.len) : (ver_i += 1) {
+ const ver_index = ver_list.versions[ver_i];
+ if ((chosen_def_ver_index == 255 or ver_index > chosen_def_ver_index) and
+ target_ver_index >= ver_index)
+ {
+ chosen_def_ver_index = ver_index;
+ }
+ }
+ }
+ {
+ var ver_i: u8 = 0;
+ while (ver_i < ver_list.len) : (ver_i += 1) {
+ const ver_index = ver_list.versions[ver_i];
+ const ver = metadata.all_versions[ver_index];
+ const sym_name = libc_fn.name;
+ const stub_name = if (ver.patch == 0)
+ try std.fmt.allocPrint(arena, "{s}_{d}_{d}", .{ sym_name, ver.major, ver.minor })
+ else
+ try std.fmt.allocPrint(arena, "{s}_{d}_{d}_{d}", .{ sym_name, ver.major, ver.minor, ver.patch });
+
+ try zig_footer.writer().print("export fn {s}() void {{}}\n", .{stub_name});
+
+ // Default symbol version definition vs normal symbol version definition
+ const want_two_ats = chosen_def_ver_index != 255 and ver_index == chosen_def_ver_index;
+ const at_sign_str = "@@"[0 .. @boolToInt(want_two_ats) + @as(usize, 1)];
+ if (ver.patch == 0) {
+ try zig_body.writer().print(" \\\\ .symver {s}, {s}{s}GLIBC_{d}.{d}\n", .{
+ stub_name, sym_name, at_sign_str, ver.major, ver.minor,
+ });
+ } else {
+ try zig_body.writer().print(" \\\\ .symver {s}, {s}{s}GLIBC_{d}.{d}.{d}\n", .{
+ stub_name, sym_name, at_sign_str, ver.major, ver.minor, ver.patch,
+ });
+ }
+ // Hide the stub to keep the symbol table clean
+ try zig_body.writer().print(" \\\\ .hidden {s}\n", .{stub_name});
+ }
+ }
+ }
+
+ try zig_body.appendSlice(
+ \\ );
+ \\}
+ \\
+ );
+ try zig_body.appendSlice(zig_footer.items);
+
+ var lib_name_buf: [32]u8 = undefined; // Larger than each of the names "c", "pthread", etc.
+ const zig_file_basename = std.fmt.bufPrint(&lib_name_buf, "{s}.zig", .{lib.name}) catch unreachable;
+ try o_directory.handle.writeFile(zig_file_basename, zig_body.items);
+
+ try buildSharedLib(comp, arena, comp.zig_cache_directory, o_directory, zig_file_basename, lib);
+ }
+ cache.writeManifest() catch |err| {
+ std.log.warn("glibc shared objects: failed to write cache manifest: {}", .{@errorName(err)});
+ };
+ }
+
+ assert(comp.glibc_so_files == null);
+ comp.glibc_so_files = BuiltSharedObjects{
+ .lock = cache.toOwnedLock(),
+ .dir_path = try path.join(comp.gpa, &[_][]const u8{ comp.zig_cache_directory.path.?, o_sub_path }),
+ };
+}
+
+fn buildSharedLib(
+ comp: *Compilation,
+ arena: *Allocator,
+ zig_cache_directory: Compilation.Directory,
+ bin_directory: Compilation.Directory,
+ zig_file_basename: []const u8,
+ lib: *const Lib,
+) !void {
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ const emit_bin = Compilation.EmitLoc{
+ .directory = bin_directory,
+ .basename = try std.fmt.allocPrint(arena, "lib{s}.so.{d}", .{ lib.name, lib.sover }),
+ };
+ const version: std.builtin.Version = .{ .major = lib.sover, .minor = 0, .patch = 0 };
+ const ld_basename = path.basename(comp.getTarget().standardDynamicLinkerPath().get().?);
+ const override_soname = if (mem.eql(u8, lib.name, "ld")) ld_basename else null;
+ const map_file_path = try path.join(arena, &[_][]const u8{ bin_directory.path.?, all_map_basename });
+ // TODO we should be able to just give the open directory to Package
+ const root_pkg = try Package.create(comp.gpa, std.fs.cwd(), bin_directory.path.?, zig_file_basename);
+ defer root_pkg.destroy(comp.gpa);
+ const sub_compilation = try Compilation.create(comp.gpa, .{
+ .zig_cache_directory = zig_cache_directory,
+ .zig_lib_directory = comp.zig_lib_directory,
+ .target = comp.getTarget(),
+ .root_name = lib.name,
+ .root_pkg = null,
+ .output_mode = .Lib,
+ .link_mode = .Dynamic,
+ .rand = comp.rand,
+ .libc_installation = comp.bin_file.options.libc_installation,
+ .emit_bin = emit_bin,
+ .optimize_mode = comp.bin_file.options.optimize_mode,
+ .want_sanitize_c = false,
+ .want_stack_check = false,
+ .want_valgrind = false,
+ .emit_h = null,
+ .strip = comp.bin_file.options.strip,
+ .is_native_os = false,
+ .self_exe_path = comp.self_exe_path,
+ .debug_cc = comp.debug_cc,
+ .debug_link = comp.bin_file.options.debug_link,
+ .clang_passthrough_mode = comp.clang_passthrough_mode,
+ .version = version,
+ .stage1_is_dummy_so = true,
+ .version_script = map_file_path,
+ .override_soname = override_soname,
+ });
+ defer sub_compilation.destroy();
+
+ try updateSubCompilation(sub_compilation);
+}
+
+fn updateSubCompilation(sub_compilation: *Compilation) !void {
try sub_compilation.update();
// Look for compilation errors in this sub_compilation
@@ -730,15 +980,4 @@ fn build_crt_file(
}
return error.BuildingLibCObjectFailed;
}
-
- try comp.crt_files.ensureCapacity(comp.gpa, comp.crt_files.count() + 1);
- const artifact_path = if (sub_compilation.bin_file.options.directory.path) |p|
- try std.fs.path.join(comp.gpa, &[_][]const u8{ p, basename })
- else
- try comp.gpa.dupe(u8, basename);
-
- comp.crt_files.putAssumeCapacityNoClobber(basename, .{
- .full_object_path = artifact_path,
- .lock = sub_compilation.bin_file.toOwnedLock(),
- });
}
BRANCH_TODO
@@ -1,4 +1,7 @@
* glibc .so files
+ - stage1 C++ code integration
+ - ok file
+ * use hex for cache hash file paths
* support rpaths in ELF linker code
* build & link against compiler-rt
* build & link againstn freestanding libc
@@ -21,7 +24,6 @@
* implement proper parsing of LLD stderr/stdout and exposing compile errors
* implement proper parsing of clang stderr/stdout and exposing compile errors
* implement proper compile errors for failing to build glibc crt files and shared libs
- * skip LLD caching when bin directory is not in the cache (so we don't put `id.txt` into the cwd)
* self-host link.cpp and building libcs (#4313 and #4314). using the `zig cc` command will set a flag indicating a preference for the llvm backend, which will include linking with LLD. At least for now. If zig's self-hosted linker ever gets on par with the likes of ld and lld, we can make it always be used even for zig cc.
* improve the stage2 tests to support testing with LLVM extensions enabled
* multi-thread building C objects
@@ -47,3 +49,10 @@
* libc_installation.zig: make it look for msvc only if msvc abi is chosen
* switch the default C ABI for windows to be mingw-w64
* port windows_sdk.cpp to zig
+ * change glibc log errors to normal exposed compile errors
+ * update Package to use Compilation.Directory in create()
+ - skip LLD caching when bin directory is not in the cache (so we don't put `id.txt` into the cwd)
+ (maybe make it an explicit option and have main.zig disable it)
+ - make it possible for Package to not openDir and reference already existing resources.
+ * rename src/ to src/stage1/
+ * rename src-self-hosted/ to src/