Commit 37a9a4e0f1
Changed files (49)
src
arch
aarch64
arm
riscv64
sparc64
wasm
x86_64
Package
src/arch/aarch64/CodeGen.zig
@@ -333,7 +333,7 @@ pub fn generate(
const func = zcu.funcInfo(func_index);
const fn_type = Type.fromInterned(func.ty);
const file_scope = zcu.navFileScope(func.owner_nav);
- const target = &file_scope.mod.resolved_target.result;
+ const target = &file_scope.mod.?.resolved_target.result;
var branch_stack = std.ArrayList(Branch).init(gpa);
defer {
src/arch/arm/CodeGen.zig
@@ -342,7 +342,7 @@ pub fn generate(
const func = zcu.funcInfo(func_index);
const func_ty = Type.fromInterned(func.ty);
const file_scope = zcu.navFileScope(func.owner_nav);
- const target = &file_scope.mod.resolved_target.result;
+ const target = &file_scope.mod.?.resolved_target.result;
var branch_stack = std.ArrayList(Branch).init(gpa);
defer {
src/arch/riscv64/CodeGen.zig
@@ -767,7 +767,7 @@ pub fn generate(
const ip = &zcu.intern_pool;
const func = zcu.funcInfo(func_index);
const fn_type = Type.fromInterned(func.ty);
- const mod = zcu.navFileScope(func.owner_nav).mod;
+ const mod = zcu.navFileScope(func.owner_nav).mod.?;
var branch_stack = std.ArrayList(Branch).init(gpa);
defer {
src/arch/sparc64/CodeGen.zig
@@ -275,7 +275,7 @@ pub fn generate(
const func = zcu.funcInfo(func_index);
const func_ty = Type.fromInterned(func.ty);
const file_scope = zcu.navFileScope(func.owner_nav);
- const target = &file_scope.mod.resolved_target.result;
+ const target = &file_scope.mod.?.resolved_target.result;
var branch_stack = std.ArrayList(Branch).init(gpa);
defer {
src/arch/wasm/CodeGen.zig
@@ -1268,7 +1268,7 @@ pub fn function(
const gpa = zcu.gpa;
const cg = zcu.funcInfo(func_index);
const file_scope = zcu.navFileScope(cg.owner_nav);
- const target = &file_scope.mod.resolved_target.result;
+ const target = &file_scope.mod.?.resolved_target.result;
const fn_ty = zcu.navValue(cg.owner_nav).typeOf(zcu);
const fn_info = zcu.typeToFunc(fn_ty).?;
const ip = &zcu.intern_pool;
src/arch/x86_64/CodeGen.zig
@@ -892,7 +892,7 @@ pub fn generate(
const ip = &zcu.intern_pool;
const func = zcu.funcInfo(func_index);
const fn_type: Type = .fromInterned(func.ty);
- const mod = zcu.navFileScope(func.owner_nav).mod;
+ const mod = zcu.navFileScope(func.owner_nav).mod.?;
var function: CodeGen = .{
.gpa = gpa,
src/codegen/c.zig
@@ -2670,7 +2670,7 @@ pub fn genTypeDecl(
_ = try renderTypePrefix(.flush, global_ctype_pool, zcu, writer, global_ctype, .suffix, .{});
try writer.writeByte(';');
const file_scope = ty.typeDeclInstAllowGeneratedTag(zcu).?.resolveFile(ip);
- if (!zcu.fileByIndex(file_scope).mod.strip) try writer.print(" /* {} */", .{
+ if (!zcu.fileByIndex(file_scope).mod.?.strip) try writer.print(" /* {} */", .{
ty.containerTypeName(ip).fmt(ip),
});
try writer.writeByte('\n');
src/codegen/llvm.zig
@@ -587,13 +587,8 @@ pub const Object = struct {
// into the garbage can by converting into absolute paths. What
// a terrible tragedy.
const compile_unit_dir = blk: {
- if (comp.zcu) |zcu| m: {
- const d = try zcu.main_mod.root.joinString(arena, "");
- if (d.len == 0) break :m;
- if (std.fs.path.isAbsolute(d)) break :blk d;
- break :blk std.fs.realpathAlloc(arena, d) catch break :blk d;
- }
- break :blk try std.process.getCwdAlloc(arena);
+ const zcu = comp.zcu orelse break :blk comp.dirs.cwd;
+ break :blk try zcu.main_mod.root.toAbsolute(comp.dirs, arena);
};
const debug_file = try builder.debugFile(
@@ -1135,7 +1130,7 @@ pub const Object = struct {
const func = zcu.funcInfo(func_index);
const nav = ip.getNav(func.owner_nav);
const file_scope = zcu.navFileScopeIndex(func.owner_nav);
- const owner_mod = zcu.fileByIndex(file_scope).mod;
+ const owner_mod = zcu.fileByIndex(file_scope).mod.?;
const fn_ty = Type.fromInterned(func.ty);
const fn_info = zcu.typeToFunc(fn_ty).?;
const target = owner_mod.resolved_target.result;
@@ -1735,20 +1730,14 @@ pub const Object = struct {
const gop = try o.debug_file_map.getOrPut(gpa, file_index);
errdefer assert(o.debug_file_map.remove(file_index));
if (gop.found_existing) return gop.value_ptr.*;
- const file = o.pt.zcu.fileByIndex(file_index);
+ const zcu = o.pt.zcu;
+ const path = zcu.fileByIndex(file_index).path;
+ const abs_path = try path.toAbsolute(zcu.comp.dirs, gpa);
+ defer gpa.free(abs_path);
+
gop.value_ptr.* = try o.builder.debugFile(
- try o.builder.metadataString(std.fs.path.basename(file.sub_file_path)),
- dir_path: {
- const sub_path = std.fs.path.dirname(file.sub_file_path) orelse "";
- const dir_path = try file.mod.root.joinString(gpa, sub_path);
- defer gpa.free(dir_path);
- if (std.fs.path.isAbsolute(dir_path))
- break :dir_path try o.builder.metadataString(dir_path);
- var abs_buffer: [std.fs.max_path_bytes]u8 = undefined;
- const abs_path = std.fs.realpath(dir_path, &abs_buffer) catch
- break :dir_path try o.builder.metadataString(dir_path);
- break :dir_path try o.builder.metadataString(abs_path);
- },
+ try o.builder.metadataString(std.fs.path.basename(abs_path)),
+ try o.builder.metadataString(std.fs.path.dirname(abs_path) orelse ""),
);
return gop.value_ptr.*;
}
@@ -2646,11 +2635,9 @@ pub const Object = struct {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
- const std_mod = zcu.std_mod;
- const std_file_imported = pt.importPkg(std_mod) catch unreachable;
-
+ const std_file_index = zcu.module_roots.get(zcu.std_mod).?.unwrap().?;
const builtin_str = try ip.getOrPutString(zcu.gpa, pt.tid, "builtin", .no_embedded_nulls);
- const std_file_root_type = Type.fromInterned(zcu.fileRootType(std_file_imported.file_index));
+ const std_file_root_type = Type.fromInterned(zcu.fileRootType(std_file_index));
const std_namespace = ip.namespacePtr(std_file_root_type.getNamespaceIndex(zcu));
const builtin_nav = std_namespace.pub_decls.getKeyAdapted(builtin_str, Zcu.Namespace.NameAdapter{ .zcu = zcu }).?;
@@ -2683,7 +2670,7 @@ pub const Object = struct {
const ip = &zcu.intern_pool;
const gpa = o.gpa;
const nav = ip.getNav(nav_index);
- const owner_mod = zcu.navFileScope(nav_index).mod;
+ const owner_mod = zcu.navFileScope(nav_index).mod.?;
const ty: Type = .fromInterned(nav.typeOf(ip));
const gop = try o.nav_map.getOrPut(gpa, nav_index);
if (gop.found_existing) return gop.value_ptr.ptr(&o.builder).kind.function;
@@ -3013,7 +3000,7 @@ pub const Object = struct {
if (is_extern) {
variable_index.setLinkage(.external, &o.builder);
variable_index.setUnnamedAddr(.default, &o.builder);
- if (is_threadlocal and !zcu.navFileScope(nav_index).mod.single_threaded)
+ if (is_threadlocal and !zcu.navFileScope(nav_index).mod.?.single_threaded)
variable_index.setThreadLocal(.generaldynamic, &o.builder);
if (is_weak_linkage) variable_index.setLinkage(.extern_weak, &o.builder);
if (is_dll_import) variable_index.setDllStorageClass(.dllimport, &o.builder);
@@ -4514,7 +4501,7 @@ pub const NavGen = struct {
err_msg: ?*Zcu.ErrorMsg,
fn ownerModule(ng: NavGen) *Package.Module {
- return ng.object.pt.zcu.navFileScope(ng.nav_index).mod;
+ return ng.object.pt.zcu.navFileScope(ng.nav_index).mod.?;
}
fn todo(ng: *NavGen, comptime format: []const u8, args: anytype) Error {
@@ -4557,7 +4544,7 @@ pub const NavGen = struct {
}, &o.builder);
const file_scope = zcu.navFileScopeIndex(nav_index);
- const mod = zcu.fileByIndex(file_scope).mod;
+ const mod = zcu.fileByIndex(file_scope).mod.?;
if (is_threadlocal and !mod.single_threaded)
variable_index.setThreadLocal(.generaldynamic, &o.builder);
@@ -5121,7 +5108,7 @@ pub const FuncGen = struct {
const func = zcu.funcInfo(inline_func);
const nav = ip.getNav(func.owner_nav);
const file_scope = zcu.navFileScopeIndex(func.owner_nav);
- const mod = zcu.fileByIndex(file_scope).mod;
+ const mod = zcu.fileByIndex(file_scope).mod.?;
self.file = try o.getDebugFile(file_scope);
src/codegen/spirv.zig
@@ -201,7 +201,7 @@ pub const Object = struct {
) !void {
const zcu = pt.zcu;
const gpa = zcu.gpa;
- const structured_cfg = zcu.navFileScope(nav_index).mod.structured_cfg;
+ const structured_cfg = zcu.navFileScope(nav_index).mod.?.structured_cfg;
var nav_gen = NavGen{
.gpa = gpa,
src/libs/freebsd.zig
@@ -34,7 +34,7 @@ pub fn needsCrt0(output_mode: std.builtin.OutputMode) ?CrtFile {
fn includePath(comp: *Compilation, arena: Allocator, sub_path: []const u8) ![]const u8 {
return path.join(arena, &.{
- comp.zig_lib_directory.path.?,
+ comp.dirs.zig_lib.path.?,
"libc" ++ path.sep_str ++ "include",
sub_path,
});
@@ -42,7 +42,7 @@ fn includePath(comp: *Compilation, arena: Allocator, sub_path: []const u8) ![]co
fn csuPath(comp: *Compilation, arena: Allocator, sub_path: []const u8) ![]const u8 {
return path.join(arena, &.{
- comp.zig_lib_directory.path.?,
+ comp.dirs.zig_lib.path.?,
"libc" ++ path.sep_str ++ "freebsd" ++ path.sep_str ++ "lib" ++ path.sep_str ++ "csu",
sub_path,
});
@@ -50,7 +50,7 @@ fn csuPath(comp: *Compilation, arena: Allocator, sub_path: []const u8) ![]const
fn libcPath(comp: *Compilation, arena: Allocator, sub_path: []const u8) ![]const u8 {
return path.join(arena, &.{
- comp.zig_lib_directory.path.?,
+ comp.dirs.zig_lib.path.?,
"libc" ++ path.sep_str ++ "freebsd" ++ path.sep_str ++ "lib" ++ path.sep_str ++ "libc",
sub_path,
});
@@ -438,11 +438,11 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
// Use the global cache directory.
var cache: Cache = .{
.gpa = gpa,
- .manifest_dir = try comp.global_cache_directory.handle.makeOpenPath("h", .{}),
+ .manifest_dir = try comp.dirs.global_cache.handle.makeOpenPath("h", .{}),
};
cache.addPrefix(.{ .path = null, .handle = fs.cwd() });
- cache.addPrefix(comp.zig_lib_directory);
- cache.addPrefix(comp.global_cache_directory);
+ cache.addPrefix(comp.dirs.zig_lib);
+ cache.addPrefix(comp.dirs.global_cache);
defer cache.manifest_dir.close();
var man = cache.obtain();
@@ -452,7 +452,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
man.hash.add(target.abi);
man.hash.add(target_version);
- const full_abilists_path = try comp.zig_lib_directory.join(arena, &.{abilists_path});
+ const full_abilists_path = try comp.dirs.zig_lib.join(arena, &.{abilists_path});
const abilists_index = try man.addFile(full_abilists_path, abilists_max_size);
if (try man.hit()) {
@@ -461,7 +461,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
return queueSharedObjects(comp, .{
.lock = man.toOwnedLock(),
.dir_path = .{
- .root_dir = comp.global_cache_directory,
+ .root_dir = comp.dirs.global_cache,
.sub_path = try gpa.dupe(u8, "o" ++ fs.path.sep_str ++ digest),
},
});
@@ -470,9 +470,9 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
const digest = man.final();
const o_sub_path = try path.join(arena, &[_][]const u8{ "o", &digest });
- var o_directory: Compilation.Directory = .{
- .handle = try comp.global_cache_directory.handle.makeOpenPath(o_sub_path, .{}),
- .path = try comp.global_cache_directory.join(arena, &.{o_sub_path}),
+ var o_directory: Cache.Directory = .{
+ .handle = try comp.dirs.global_cache.handle.makeOpenPath(o_sub_path, .{}),
+ .path = try comp.dirs.global_cache.join(arena, &.{o_sub_path}),
};
defer o_directory.handle.close();
@@ -974,7 +974,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
var lib_name_buf: [32]u8 = undefined; // Larger than each of the names "c", "stdthreads", etc.
const asm_file_basename = std.fmt.bufPrint(&lib_name_buf, "{s}.s", .{lib.name}) catch unreachable;
try o_directory.handle.writeFile(.{ .sub_path = asm_file_basename, .data = stubs_asm.items });
- try buildSharedLib(comp, arena, comp.global_cache_directory, o_directory, asm_file_basename, lib, prog_node);
+ try buildSharedLib(comp, arena, o_directory, asm_file_basename, lib, prog_node);
}
man.writeManifest() catch |err| {
@@ -984,7 +984,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
return queueSharedObjects(comp, .{
.lock = man.toOwnedLock(),
.dir_path = .{
- .root_dir = comp.global_cache_directory,
+ .root_dir = comp.dirs.global_cache,
.sub_path = try gpa.dupe(u8, "o" ++ fs.path.sep_str ++ digest),
},
});
@@ -1023,8 +1023,7 @@ fn queueSharedObjects(comp: *Compilation, so_files: BuiltSharedObjects) void {
fn buildSharedLib(
comp: *Compilation,
arena: Allocator,
- zig_cache_directory: Compilation.Directory,
- bin_directory: Compilation.Directory,
+ bin_directory: Cache.Directory,
asm_file_basename: []const u8,
lib: Lib,
prog_node: std.Progress.Node,
@@ -1057,9 +1056,8 @@ fn buildSharedLib(
});
const root_mod = try Module.create(arena, .{
- .global_cache_directory = comp.global_cache_directory,
.paths = .{
- .root = .{ .root_dir = comp.zig_lib_directory },
+ .root = .zig_lib_root,
.root_src_path = "",
},
.fully_qualified_name = "root",
@@ -1079,8 +1077,6 @@ fn buildSharedLib(
.global = config,
.cc_argv = &.{},
.parent = null,
- .builtin_mod = null,
- .builtin_modules = null, // there is only one module in this compilation
});
const c_source_files = [1]Compilation.CSourceFile{
@@ -1091,9 +1087,7 @@ fn buildSharedLib(
};
const sub_compilation = try Compilation.create(comp.gpa, arena, .{
- .local_cache_directory = zig_cache_directory,
- .global_cache_directory = comp.global_cache_directory,
- .zig_lib_directory = comp.zig_lib_directory,
+ .dirs = comp.dirs.withoutLocalCache(),
.thread_pool = comp.thread_pool,
.self_exe_path = comp.self_exe_path,
.cache_mode = .incremental,
src/libs/glibc.zig
@@ -365,7 +365,7 @@ fn start_asm_path(comp: *Compilation, arena: Allocator, basename: []const u8) ![
const s = path.sep_str;
var result = std.ArrayList(u8).init(arena);
- try result.appendSlice(comp.zig_lib_directory.path.?);
+ try result.appendSlice(comp.dirs.zig_lib.path orelse ".");
try result.appendSlice(s ++ "libc" ++ s ++ "glibc" ++ s ++ "sysdeps" ++ s);
if (is_sparc) {
if (is_64) {
@@ -439,7 +439,7 @@ fn add_include_dirs(comp: *Compilation, arena: Allocator, args: *std.ArrayList([
}
if (opt_nptl) |nptl| {
try args.append("-I");
- try args.append(try path.join(arena, &[_][]const u8{ comp.zig_lib_directory.path.?, lib_libc_glibc ++ "sysdeps", nptl }));
+ try args.append(try path.join(arena, &.{ comp.dirs.zig_lib.path orelse ".", lib_libc_glibc ++ "sysdeps", nptl }));
}
try args.append("-I");
@@ -459,11 +459,11 @@ fn add_include_dirs(comp: *Compilation, arena: Allocator, args: *std.ArrayList([
try args.append(try lib_path(comp, arena, lib_libc_glibc ++ "sysdeps" ++ s ++ "generic"));
try args.append("-I");
- try args.append(try path.join(arena, &[_][]const u8{ comp.zig_lib_directory.path.?, lib_libc ++ "glibc" }));
+ try args.append(try path.join(arena, &[_][]const u8{ comp.dirs.zig_lib.path orelse ".", lib_libc ++ "glibc" }));
try args.append("-I");
try args.append(try std.fmt.allocPrint(arena, "{s}" ++ s ++ "libc" ++ s ++ "include" ++ s ++ "{s}-{s}-{s}", .{
- comp.zig_lib_directory.path.?, @tagName(target.cpu.arch), @tagName(target.os.tag), @tagName(target.abi),
+ comp.dirs.zig_lib.path orelse ".", @tagName(target.cpu.arch), @tagName(target.os.tag), @tagName(target.abi),
}));
try args.append("-I");
@@ -472,7 +472,7 @@ fn add_include_dirs(comp: *Compilation, arena: Allocator, args: *std.ArrayList([
const arch_name = std.zig.target.osArchName(target);
try args.append("-I");
try args.append(try std.fmt.allocPrint(arena, "{s}" ++ s ++ "libc" ++ s ++ "include" ++ s ++ "{s}-linux-any", .{
- comp.zig_lib_directory.path.?, arch_name,
+ comp.dirs.zig_lib.path orelse ".", arch_name,
}));
try args.append("-I");
@@ -626,15 +626,11 @@ fn add_include_dirs_arch(
}
}
-fn path_from_lib(comp: *Compilation, arena: Allocator, sub_path: []const u8) ![]const u8 {
- return path.join(arena, &[_][]const u8{ comp.zig_lib_directory.path.?, sub_path });
-}
-
const lib_libc = "libc" ++ path.sep_str;
const lib_libc_glibc = lib_libc ++ "glibc" ++ path.sep_str;
fn lib_path(comp: *Compilation, arena: Allocator, sub_path: []const u8) ![]const u8 {
- return path.join(arena, &[_][]const u8{ comp.zig_lib_directory.path.?, sub_path });
+ return path.join(arena, &.{ comp.dirs.zig_lib.path orelse ".", sub_path });
}
pub const BuiltSharedObjects = struct {
@@ -678,11 +674,11 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
// Use the global cache directory.
var cache: Cache = .{
.gpa = gpa,
- .manifest_dir = try comp.global_cache_directory.handle.makeOpenPath("h", .{}),
+ .manifest_dir = try comp.dirs.global_cache.handle.makeOpenPath("h", .{}),
};
cache.addPrefix(.{ .path = null, .handle = fs.cwd() });
- cache.addPrefix(comp.zig_lib_directory);
- cache.addPrefix(comp.global_cache_directory);
+ cache.addPrefix(comp.dirs.zig_lib);
+ cache.addPrefix(comp.dirs.global_cache);
defer cache.manifest_dir.close();
var man = cache.obtain();
@@ -692,7 +688,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
man.hash.add(target.abi);
man.hash.add(target_version);
- const full_abilists_path = try comp.zig_lib_directory.join(arena, &.{abilists_path});
+ const full_abilists_path = try comp.dirs.zig_lib.join(arena, &.{abilists_path});
const abilists_index = try man.addFile(full_abilists_path, abilists_max_size);
if (try man.hit()) {
@@ -701,7 +697,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
return queueSharedObjects(comp, .{
.lock = man.toOwnedLock(),
.dir_path = .{
- .root_dir = comp.global_cache_directory,
+ .root_dir = comp.dirs.global_cache,
.sub_path = try gpa.dupe(u8, "o" ++ fs.path.sep_str ++ digest),
},
});
@@ -710,9 +706,9 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
const digest = man.final();
const o_sub_path = try path.join(arena, &[_][]const u8{ "o", &digest });
- var o_directory: Compilation.Directory = .{
- .handle = try comp.global_cache_directory.handle.makeOpenPath(o_sub_path, .{}),
- .path = try comp.global_cache_directory.join(arena, &.{o_sub_path}),
+ var o_directory: Cache.Directory = .{
+ .handle = try comp.dirs.global_cache.handle.makeOpenPath(o_sub_path, .{}),
+ .path = try comp.dirs.global_cache.join(arena, &.{o_sub_path}),
};
defer o_directory.handle.close();
@@ -1112,7 +1108,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
var lib_name_buf: [32]u8 = undefined; // Larger than each of the names "c", "pthread", etc.
const asm_file_basename = std.fmt.bufPrint(&lib_name_buf, "{s}.s", .{lib.name}) catch unreachable;
try o_directory.handle.writeFile(.{ .sub_path = asm_file_basename, .data = stubs_asm.items });
- try buildSharedLib(comp, arena, comp.global_cache_directory, o_directory, asm_file_basename, lib, prog_node);
+ try buildSharedLib(comp, arena, o_directory, asm_file_basename, lib, prog_node);
}
man.writeManifest() catch |err| {
@@ -1122,7 +1118,7 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
return queueSharedObjects(comp, .{
.lock = man.toOwnedLock(),
.dir_path = .{
- .root_dir = comp.global_cache_directory,
+ .root_dir = comp.dirs.global_cache,
.sub_path = try gpa.dupe(u8, "o" ++ fs.path.sep_str ++ digest),
},
});
@@ -1174,8 +1170,7 @@ fn queueSharedObjects(comp: *Compilation, so_files: BuiltSharedObjects) void {
fn buildSharedLib(
comp: *Compilation,
arena: Allocator,
- zig_cache_directory: Compilation.Directory,
- bin_directory: Compilation.Directory,
+ bin_directory: Cache.Directory,
asm_file_basename: []const u8,
lib: Lib,
prog_node: std.Progress.Node,
@@ -1208,9 +1203,8 @@ fn buildSharedLib(
});
const root_mod = try Module.create(arena, .{
- .global_cache_directory = comp.global_cache_directory,
.paths = .{
- .root = .{ .root_dir = comp.zig_lib_directory },
+ .root = .zig_lib_root,
.root_src_path = "",
},
.fully_qualified_name = "root",
@@ -1230,8 +1224,6 @@ fn buildSharedLib(
.global = config,
.cc_argv = &.{},
.parent = null,
- .builtin_mod = null,
- .builtin_modules = null, // there is only one module in this compilation
});
const c_source_files = [1]Compilation.CSourceFile{
@@ -1242,9 +1234,7 @@ fn buildSharedLib(
};
const sub_compilation = try Compilation.create(comp.gpa, arena, .{
- .local_cache_directory = zig_cache_directory,
- .global_cache_directory = comp.global_cache_directory,
- .zig_lib_directory = comp.zig_lib_directory,
+ .dirs = comp.dirs.withoutLocalCache(),
.thread_pool = comp.thread_pool,
.self_exe_path = comp.self_exe_path,
.cache_mode = .incremental,
src/libs/libcxx.zig
@@ -134,10 +134,10 @@ pub fn buildLibCxx(comp: *Compilation, prog_node: std.Progress.Node) BuildError!
.basename = basename,
};
- const cxxabi_include_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxxabi", "include" });
- const cxx_include_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxx", "include" });
- const cxx_src_include_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxx", "src" });
- const cxx_libc_include_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxx", "libc" });
+ const cxxabi_include_path = try comp.dirs.zig_lib.join(arena, &.{ "libcxxabi", "include" });
+ const cxx_include_path = try comp.dirs.zig_lib.join(arena, &.{ "libcxx", "include" });
+ const cxx_src_include_path = try comp.dirs.zig_lib.join(arena, &.{ "libcxx", "src" });
+ const cxx_libc_include_path = try comp.dirs.zig_lib.join(arena, &.{ "libcxx", "libc" });
const optimize_mode = comp.compilerRtOptMode();
const strip = comp.compilerRtStrip();
@@ -164,9 +164,8 @@ pub fn buildLibCxx(comp: *Compilation, prog_node: std.Progress.Node) BuildError!
};
const root_mod = Module.create(arena, .{
- .global_cache_directory = comp.global_cache_directory,
.paths = .{
- .root = .{ .root_dir = comp.zig_lib_directory },
+ .root = .zig_lib_root,
.root_src_path = "",
},
.fully_qualified_name = "root",
@@ -188,8 +187,6 @@ pub fn buildLibCxx(comp: *Compilation, prog_node: std.Progress.Node) BuildError!
.global = config,
.cc_argv = &.{},
.parent = null,
- .builtin_mod = null,
- .builtin_modules = null, // there is only one module in this compilation
}) catch |err| {
comp.setMiscFailure(
.libcxx,
@@ -258,7 +255,7 @@ pub fn buildLibCxx(comp: *Compilation, prog_node: std.Progress.Node) BuildError!
try cache_exempt_flags.append(cxx_libc_include_path);
c_source_files.appendAssumeCapacity(.{
- .src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxx", cxx_src }),
+ .src_path = try comp.dirs.zig_lib.join(arena, &.{ "libcxx", cxx_src }),
.extra_flags = cflags.items,
.cache_exempt_flags = cache_exempt_flags.items,
.owner = root_mod,
@@ -266,9 +263,7 @@ pub fn buildLibCxx(comp: *Compilation, prog_node: std.Progress.Node) BuildError!
}
const sub_compilation = Compilation.create(comp.gpa, arena, .{
- .local_cache_directory = comp.global_cache_directory,
- .global_cache_directory = comp.global_cache_directory,
- .zig_lib_directory = comp.zig_lib_directory,
+ .dirs = comp.dirs.withoutLocalCache(),
.self_exe_path = comp.self_exe_path,
.cache_mode = .whole,
.config = config,
@@ -344,9 +339,9 @@ pub fn buildLibCxxAbi(comp: *Compilation, prog_node: std.Progress.Node) BuildErr
.basename = basename,
};
- const cxxabi_include_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxxabi", "include" });
- const cxx_include_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxx", "include" });
- const cxx_src_include_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxx", "src" });
+ const cxxabi_include_path = try comp.dirs.zig_lib.join(arena, &.{ "libcxxabi", "include" });
+ const cxx_include_path = try comp.dirs.zig_lib.join(arena, &.{ "libcxx", "include" });
+ const cxx_src_include_path = try comp.dirs.zig_lib.join(arena, &.{ "libcxx", "src" });
const optimize_mode = comp.compilerRtOptMode();
const strip = comp.compilerRtStrip();
@@ -378,9 +373,8 @@ pub fn buildLibCxxAbi(comp: *Compilation, prog_node: std.Progress.Node) BuildErr
};
const root_mod = Module.create(arena, .{
- .global_cache_directory = comp.global_cache_directory,
.paths = .{
- .root = .{ .root_dir = comp.zig_lib_directory },
+ .root = .zig_lib_root,
.root_src_path = "",
},
.fully_qualified_name = "root",
@@ -403,8 +397,6 @@ pub fn buildLibCxxAbi(comp: *Compilation, prog_node: std.Progress.Node) BuildErr
.global = config,
.cc_argv = &.{},
.parent = null,
- .builtin_mod = null,
- .builtin_modules = null, // there is only one module in this compilation
}) catch |err| {
comp.setMiscFailure(
.libcxxabi,
@@ -459,7 +451,7 @@ pub fn buildLibCxxAbi(comp: *Compilation, prog_node: std.Progress.Node) BuildErr
try cache_exempt_flags.append(cxx_src_include_path);
c_source_files.appendAssumeCapacity(.{
- .src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxxabi", cxxabi_src }),
+ .src_path = try comp.dirs.zig_lib.join(arena, &.{ "libcxxabi", cxxabi_src }),
.extra_flags = cflags.items,
.cache_exempt_flags = cache_exempt_flags.items,
.owner = root_mod,
@@ -467,9 +459,7 @@ pub fn buildLibCxxAbi(comp: *Compilation, prog_node: std.Progress.Node) BuildErr
}
const sub_compilation = Compilation.create(comp.gpa, arena, .{
- .local_cache_directory = comp.global_cache_directory,
- .global_cache_directory = comp.global_cache_directory,
- .zig_lib_directory = comp.zig_lib_directory,
+ .dirs = comp.dirs.withoutLocalCache(),
.self_exe_path = comp.self_exe_path,
.cache_mode = .whole,
.config = config,
src/libs/libtsan.zig
@@ -84,9 +84,8 @@ pub fn buildTsan(comp: *Compilation, prog_node: std.Progress.Node) BuildError!vo
};
const root_mod = Module.create(arena, .{
- .global_cache_directory = comp.global_cache_directory,
.paths = .{
- .root = .{ .root_dir = comp.zig_lib_directory },
+ .root = .zig_lib_root,
.root_src_path = "",
},
.fully_qualified_name = "root",
@@ -110,8 +109,6 @@ pub fn buildTsan(comp: *Compilation, prog_node: std.Progress.Node) BuildError!vo
.global = config,
.cc_argv = &common_flags,
.parent = null,
- .builtin_mod = null,
- .builtin_modules = null, // there is only one module in this compilation
}) catch |err| {
comp.setMiscFailure(
.libtsan,
@@ -124,7 +121,7 @@ pub fn buildTsan(comp: *Compilation, prog_node: std.Progress.Node) BuildError!vo
var c_source_files = std.ArrayList(Compilation.CSourceFile).init(arena);
try c_source_files.ensureUnusedCapacity(tsan_sources.len);
- const tsan_include_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{"libtsan"});
+ const tsan_include_path = try comp.dirs.zig_lib.join(arena, &.{"libtsan"});
for (tsan_sources) |tsan_src| {
var cflags = std.ArrayList([]const u8).init(arena);
@@ -134,7 +131,7 @@ pub fn buildTsan(comp: *Compilation, prog_node: std.Progress.Node) BuildError!vo
try addCcArgs(target, &cflags);
c_source_files.appendAssumeCapacity(.{
- .src_path = try comp.zig_lib_directory.join(arena, &.{ "libtsan", tsan_src }),
+ .src_path = try comp.dirs.zig_lib.join(arena, &.{ "libtsan", tsan_src }),
.extra_flags = cflags.items,
.owner = root_mod,
});
@@ -155,7 +152,7 @@ pub fn buildTsan(comp: *Compilation, prog_node: std.Progress.Node) BuildError!vo
try addCcArgs(target, &cflags);
c_source_files.appendAssumeCapacity(.{
- .src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libtsan", tsan_src }),
+ .src_path = try comp.dirs.zig_lib.join(arena, &.{ "libtsan", tsan_src }),
.extra_flags = cflags.items,
.owner = root_mod,
});
@@ -179,14 +176,14 @@ pub fn buildTsan(comp: *Compilation, prog_node: std.Progress.Node) BuildError!vo
try cflags.append("-DNDEBUG");
c_source_files.appendAssumeCapacity(.{
- .src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libtsan", asm_source }),
+ .src_path = try comp.dirs.zig_lib.join(arena, &.{ "libtsan", asm_source }),
.extra_flags = cflags.items,
.owner = root_mod,
});
}
try c_source_files.ensureUnusedCapacity(sanitizer_common_sources.len);
- const sanitizer_common_include_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
+ const sanitizer_common_include_path = try comp.dirs.zig_lib.join(arena, &.{
"libtsan", "sanitizer_common",
});
for (sanitizer_common_sources) |common_src| {
@@ -200,7 +197,7 @@ pub fn buildTsan(comp: *Compilation, prog_node: std.Progress.Node) BuildError!vo
try addCcArgs(target, &cflags);
c_source_files.appendAssumeCapacity(.{
- .src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
+ .src_path = try comp.dirs.zig_lib.join(arena, &.{
"libtsan", "sanitizer_common", common_src,
}),
.extra_flags = cflags.items,
@@ -224,7 +221,7 @@ pub fn buildTsan(comp: *Compilation, prog_node: std.Progress.Node) BuildError!vo
try addCcArgs(target, &cflags);
c_source_files.appendAssumeCapacity(.{
- .src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
+ .src_path = try comp.dirs.zig_lib.join(arena, &.{
"libtsan", "sanitizer_common", c_src,
}),
.extra_flags = cflags.items,
@@ -242,7 +239,7 @@ pub fn buildTsan(comp: *Compilation, prog_node: std.Progress.Node) BuildError!vo
try addCcArgs(target, &cflags);
c_source_files.appendAssumeCapacity(.{
- .src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
+ .src_path = try comp.dirs.zig_lib.join(arena, &.{
"libtsan", "sanitizer_common", c_src,
}),
.extra_flags = cflags.items,
@@ -250,10 +247,7 @@ pub fn buildTsan(comp: *Compilation, prog_node: std.Progress.Node) BuildError!vo
});
}
- const interception_include_path = try comp.zig_lib_directory.join(
- arena,
- &[_][]const u8{"interception"},
- );
+ const interception_include_path = try comp.dirs.zig_lib.join(arena, &.{"interception"});
try c_source_files.ensureUnusedCapacity(interception_sources.len);
for (interception_sources) |c_src| {
@@ -268,7 +262,7 @@ pub fn buildTsan(comp: *Compilation, prog_node: std.Progress.Node) BuildError!vo
try addCcArgs(target, &cflags);
c_source_files.appendAssumeCapacity(.{
- .src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
+ .src_path = try comp.dirs.zig_lib.join(arena, &.{
"libtsan", "interception", c_src,
}),
.extra_flags = cflags.items,
@@ -285,9 +279,7 @@ pub fn buildTsan(comp: *Compilation, prog_node: std.Progress.Node) BuildError!vo
// Workaround for https://github.com/llvm/llvm-project/issues/97627
const headerpad_size: ?u32 = if (target.os.tag.isDarwin()) 32 else null;
const sub_compilation = Compilation.create(comp.gpa, arena, .{
- .local_cache_directory = comp.global_cache_directory,
- .global_cache_directory = comp.global_cache_directory,
- .zig_lib_directory = comp.zig_lib_directory,
+ .dirs = comp.dirs.withoutLocalCache(),
.thread_pool = comp.thread_pool,
.self_exe_path = comp.self_exe_path,
.cache_mode = .whole,
src/libs/libunwind.zig
@@ -50,9 +50,8 @@ pub fn buildStaticLib(comp: *Compilation, prog_node: std.Progress.Node) BuildErr
return error.SubCompilationFailed;
};
const root_mod = Module.create(arena, .{
- .global_cache_directory = comp.global_cache_directory,
.paths = .{
- .root = .{ .root_dir = comp.zig_lib_directory },
+ .root = .zig_lib_root,
.root_src_path = "",
},
.fully_qualified_name = "root",
@@ -76,8 +75,6 @@ pub fn buildStaticLib(comp: *Compilation, prog_node: std.Progress.Node) BuildErr
.global = config,
.cc_argv = &.{},
.parent = null,
- .builtin_mod = null,
- .builtin_modules = null, // there is only one module in this compilation
}) catch |err| {
comp.setMiscFailure(
.libunwind,
@@ -118,7 +115,7 @@ pub fn buildStaticLib(comp: *Compilation, prog_node: std.Progress.Node) BuildErr
else => unreachable, // See `unwind_src_list`.
}
try cflags.append("-I");
- try cflags.append(try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libunwind", "include" }));
+ try cflags.append(try comp.dirs.zig_lib.join(arena, &.{ "libunwind", "include" }));
try cflags.append("-D_LIBUNWIND_HIDE_SYMBOLS");
try cflags.append("-Wa,--noexecstack");
try cflags.append("-fvisibility=hidden");
@@ -148,16 +145,14 @@ pub fn buildStaticLib(comp: *Compilation, prog_node: std.Progress.Node) BuildErr
}
c_source_files[i] = .{
- .src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{unwind_src}),
+ .src_path = try comp.dirs.zig_lib.join(arena, &.{unwind_src}),
.extra_flags = cflags.items,
.owner = root_mod,
};
}
const sub_compilation = Compilation.create(comp.gpa, arena, .{
+ .dirs = comp.dirs.withoutLocalCache(),
.self_exe_path = comp.self_exe_path,
- .local_cache_directory = comp.global_cache_directory,
- .global_cache_directory = comp.global_cache_directory,
- .zig_lib_directory = comp.zig_lib_directory,
.config = config,
.root_mod = root_mod,
.cache_mode = .whole,
src/libs/mingw.zig
@@ -40,7 +40,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
}
var files = [_]Compilation.CSourceFile{
.{
- .src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
+ .src_path = try comp.dirs.zig_lib.join(arena, &.{
"libc", "mingw", "crt", "crtexe.c",
}),
.extra_flags = args.items,
@@ -57,7 +57,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
try addCrtCcArgs(comp, arena, &args);
var files = [_]Compilation.CSourceFile{
.{
- .src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
+ .src_path = try comp.dirs.zig_lib.join(arena, &.{
"libc", "mingw", "crt", "crtdll.c",
}),
.extra_flags = args.items,
@@ -78,7 +78,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
for (mingw32_generic_src) |dep| {
try c_source_files.append(.{
- .src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
+ .src_path = try comp.dirs.zig_lib.join(arena, &.{
"libc", "mingw", dep,
}),
.extra_flags = crt_args.items,
@@ -88,7 +88,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
if (target.cpu.arch.isX86()) {
for (mingw32_x86_src) |dep| {
try c_source_files.append(.{
- .src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
+ .src_path = try comp.dirs.zig_lib.join(arena, &.{
"libc", "mingw", dep,
}),
.extra_flags = crt_args.items,
@@ -98,7 +98,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
if (target.cpu.arch == .x86) {
for (mingw32_x86_32_src) |dep| {
try c_source_files.append(.{
- .src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
+ .src_path = try comp.dirs.zig_lib.join(arena, &.{
"libc", "mingw", dep,
}),
.extra_flags = crt_args.items,
@@ -109,7 +109,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
} else if (target.cpu.arch == .thumb) {
for (mingw32_arm_src) |dep| {
try c_source_files.append(.{
- .src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
+ .src_path = try comp.dirs.zig_lib.join(arena, &.{
"libc", "mingw", dep,
}),
.extra_flags = crt_args.items,
@@ -118,7 +118,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
}
for (mingw32_arm32_src) |dep| {
try c_source_files.append(.{
- .src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
+ .src_path = try comp.dirs.zig_lib.join(arena, &.{
"libc", "mingw", dep,
}),
.extra_flags = crt_args.items,
@@ -128,7 +128,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
} else if (target.cpu.arch == .aarch64) {
for (mingw32_arm_src) |dep| {
try c_source_files.append(.{
- .src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
+ .src_path = try comp.dirs.zig_lib.join(arena, &.{
"libc", "mingw", dep,
}),
.extra_flags = crt_args.items,
@@ -137,7 +137,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
}
for (mingw32_arm64_src) |dep| {
try c_source_files.append(.{
- .src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
+ .src_path = try comp.dirs.zig_lib.join(arena, &.{
"libc", "mingw", dep,
}),
.extra_flags = crt_args.items,
@@ -164,7 +164,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
for (mingw32_winpthreads_src) |dep| {
try c_source_files.append(.{
- .src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
+ .src_path = try comp.dirs.zig_lib.join(arena, &.{
"libc", "mingw", dep,
}),
.extra_flags = winpthreads_args.items,
@@ -192,7 +192,7 @@ fn addCcArgs(
"-D__USE_MINGW_ANSI_STDIO=0",
"-isystem",
- try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libc", "include", "any-windows-any" }),
+ try comp.dirs.zig_lib.join(arena, &.{ "libc", "include", "any-windows-any" }),
});
}
@@ -219,7 +219,7 @@ fn addCrtCcArgs(
"-DHAVE_CONFIG_H",
"-I",
- try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libc", "mingw", "include" }),
+ try comp.dirs.zig_lib.join(arena, &.{ "libc", "mingw", "include" }),
});
}
@@ -232,7 +232,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
defer arena_allocator.deinit();
const arena = arena_allocator.allocator();
- const def_file_path = findDef(arena, comp.getTarget(), comp.zig_lib_directory, lib_name) catch |err| switch (err) {
+ const def_file_path = findDef(arena, comp.getTarget(), comp.dirs.zig_lib, lib_name) catch |err| switch (err) {
error.FileNotFound => {
log.debug("no {s}.def file available to make a DLL import {s}.lib", .{ lib_name, lib_name });
// In this case we will end up putting foo.lib onto the linker line and letting the linker
@@ -247,15 +247,15 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
// Use the global cache directory.
var cache: Cache = .{
.gpa = gpa,
- .manifest_dir = try comp.global_cache_directory.handle.makeOpenPath("h", .{}),
+ .manifest_dir = try comp.dirs.global_cache.handle.makeOpenPath("h", .{}),
};
cache.addPrefix(.{ .path = null, .handle = std.fs.cwd() });
- cache.addPrefix(comp.zig_lib_directory);
- cache.addPrefix(comp.global_cache_directory);
+ cache.addPrefix(comp.dirs.zig_lib);
+ cache.addPrefix(comp.dirs.global_cache);
defer cache.manifest_dir.close();
cache.hash.addBytes(build_options.version);
- cache.hash.addOptionalBytes(comp.zig_lib_directory.path);
+ cache.hash.addOptionalBytes(comp.dirs.zig_lib.path);
cache.hash.add(target.cpu.arch);
var man = cache.obtain();
@@ -276,7 +276,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
try comp.crt_files.ensureUnusedCapacity(gpa, 1);
comp.crt_files.putAssumeCapacityNoClobber(final_lib_basename, .{
.full_object_path = .{
- .root_dir = comp.global_cache_directory,
+ .root_dir = comp.dirs.global_cache,
.sub_path = sub_path,
},
.lock = man.toOwnedLock(),
@@ -286,11 +286,11 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
const digest = man.final();
const o_sub_path = try std.fs.path.join(arena, &[_][]const u8{ "o", &digest });
- var o_dir = try comp.global_cache_directory.handle.makeOpenPath(o_sub_path, .{});
+ var o_dir = try comp.dirs.global_cache.handle.makeOpenPath(o_sub_path, .{});
defer o_dir.close();
const final_def_basename = try std.fmt.allocPrint(arena, "{s}.def", .{lib_name});
- const def_final_path = try comp.global_cache_directory.join(arena, &[_][]const u8{
+ const def_final_path = try comp.dirs.global_cache.join(arena, &[_][]const u8{
"o", &digest, final_def_basename,
});
@@ -306,7 +306,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
var aro_comp = aro.Compilation.init(gpa, std.fs.cwd());
defer aro_comp.deinit();
- const include_dir = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libc", "mingw", "def-include" });
+ const include_dir = try comp.dirs.zig_lib.join(arena, &.{ "libc", "mingw", "def-include" });
if (comp.verbose_cc) print: {
std.debug.lockStdErr();
@@ -350,7 +350,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
if (!build_options.have_llvm) return error.ZigCompilerNotBuiltWithLLVMExtensions;
const llvm_bindings = @import("../codegen/llvm/bindings.zig");
const def_final_path_z = try arena.dupeZ(u8, def_final_path);
- const lib_final_path_z = try comp.global_cache_directory.joinZ(arena, &.{lib_final_path});
+ const lib_final_path_z = try comp.dirs.global_cache.joinZ(arena, &.{lib_final_path});
if (llvm_bindings.WriteImportLibrary(
def_final_path_z.ptr,
@intFromEnum(target.toCoffMachine()),
@@ -370,7 +370,7 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
defer comp.mutex.unlock();
try comp.crt_files.putNoClobber(gpa, final_lib_basename, .{
.full_object_path = .{
- .root_dir = comp.global_cache_directory,
+ .root_dir = comp.dirs.global_cache,
.sub_path = lib_final_path,
},
.lock = man.toOwnedLock(),
src/libs/musl.zig
@@ -34,7 +34,7 @@ pub fn buildCrtFile(comp: *Compilation, in_crt_file: CrtFile, prog_node: std.Pro
try args.append("-DCRT");
var files = [_]Compilation.CSourceFile{
.{
- .src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
+ .src_path = try comp.dirs.zig_lib.join(arena, &.{
"libc", "musl", "crt", "crt1.c",
}),
.extra_flags = args.items,
@@ -54,7 +54,7 @@ pub fn buildCrtFile(comp: *Compilation, in_crt_file: CrtFile, prog_node: std.Pro
try args.append("-DCRT");
var files = [_]Compilation.CSourceFile{
.{
- .src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
+ .src_path = try comp.dirs.zig_lib.join(arena, &.{
"libc", "musl", "crt", "rcrt1.c",
}),
.extra_flags = args.items,
@@ -75,7 +75,7 @@ pub fn buildCrtFile(comp: *Compilation, in_crt_file: CrtFile, prog_node: std.Pro
try args.append("-DCRT");
var files = [_]Compilation.CSourceFile{
.{
- .src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
+ .src_path = try comp.dirs.zig_lib.join(arena, &.{
"libc", "musl", "crt", "Scrt1.c",
}),
.extra_flags = args.items,
@@ -165,7 +165,7 @@ pub fn buildCrtFile(comp: *Compilation, in_crt_file: CrtFile, prog_node: std.Pro
try addCcArgs(comp, arena, &args, ext == .o3);
const c_source_file = try c_source_files.addOne();
c_source_file.* = .{
- .src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libc", src_file }),
+ .src_path = try comp.dirs.zig_lib.join(arena, &.{ "libc", src_file }),
.extra_flags = args.items,
.owner = undefined,
};
@@ -220,9 +220,8 @@ pub fn buildCrtFile(comp: *Compilation, in_crt_file: CrtFile, prog_node: std.Pro
&.{ arch_define, family_define };
const root_mod = try Module.create(arena, .{
- .global_cache_directory = comp.global_cache_directory,
.paths = .{
- .root = .{ .root_dir = comp.zig_lib_directory },
+ .root = .zig_lib_root,
.root_src_path = "",
},
.fully_qualified_name = "root",
@@ -242,14 +241,10 @@ pub fn buildCrtFile(comp: *Compilation, in_crt_file: CrtFile, prog_node: std.Pro
.global = config,
.cc_argv = cc_argv,
.parent = null,
- .builtin_mod = null,
- .builtin_modules = null, // there is only one module in this compilation
});
const sub_compilation = try Compilation.create(comp.gpa, arena, .{
- .local_cache_directory = comp.global_cache_directory,
- .global_cache_directory = comp.global_cache_directory,
- .zig_lib_directory = comp.zig_lib_directory,
+ .dirs = comp.dirs.withoutLocalCache(),
.self_exe_path = comp.self_exe_path,
.cache_mode = .whole,
.config = config,
@@ -266,9 +261,9 @@ pub fn buildCrtFile(comp: *Compilation, in_crt_file: CrtFile, prog_node: std.Pro
.verbose_cimport = comp.verbose_cimport,
.verbose_llvm_cpu_features = comp.verbose_llvm_cpu_features,
.clang_passthrough_mode = comp.clang_passthrough_mode,
- .c_source_files = &[_]Compilation.CSourceFile{
+ .c_source_files = &.{
.{
- .src_path = try comp.zig_lib_directory.join(arena, &.{ "libc", "musl", "libc.S" }),
+ .src_path = try comp.dirs.zig_lib.join(arena, &.{ "libc", "musl", "libc.S" }),
.owner = root_mod,
},
},
@@ -411,25 +406,25 @@ fn addCcArgs(
"-D_XOPEN_SOURCE=700",
"-I",
- try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libc", "musl", "arch", arch_name }),
+ try comp.dirs.zig_lib.join(arena, &.{ "libc", "musl", "arch", arch_name }),
"-I",
- try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libc", "musl", "arch", "generic" }),
+ try comp.dirs.zig_lib.join(arena, &.{ "libc", "musl", "arch", "generic" }),
"-I",
- try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libc", "musl", "src", "include" }),
+ try comp.dirs.zig_lib.join(arena, &.{ "libc", "musl", "src", "include" }),
"-I",
- try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libc", "musl", "src", "internal" }),
+ try comp.dirs.zig_lib.join(arena, &.{ "libc", "musl", "src", "internal" }),
"-I",
- try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libc", "musl", "include" }),
+ try comp.dirs.zig_lib.join(arena, &.{ "libc", "musl", "include" }),
"-I",
- try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libc", "include", triple }),
+ try comp.dirs.zig_lib.join(arena, &.{ "libc", "include", triple }),
"-I",
- try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libc", "include", "generic-musl" }),
+ try comp.dirs.zig_lib.join(arena, &.{ "libc", "include", "generic-musl" }),
o_arg,
@@ -444,7 +439,7 @@ fn addCcArgs(
fn start_asm_path(comp: *Compilation, arena: Allocator, basename: []const u8) ![]const u8 {
const target = comp.getTarget();
- return comp.zig_lib_directory.join(arena, &[_][]const u8{
+ return comp.dirs.zig_lib.join(arena, &.{
"libc", "musl", "crt", std.zig.target.muslArchName(target.cpu.arch, target.abi), basename,
});
}
src/libs/wasi_libc.zig
@@ -81,7 +81,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
try addLibcBottomHalfIncludes(comp, arena, &args);
var files = [_]Compilation.CSourceFile{
.{
- .src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
+ .src_path = try comp.dirs.zig_lib.join(arena, &.{
"libc", try sanitize(arena, crt1_reactor_src_file),
}),
.extra_flags = args.items,
@@ -96,7 +96,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
try addLibcBottomHalfIncludes(comp, arena, &args);
var files = [_]Compilation.CSourceFile{
.{
- .src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
+ .src_path = try comp.dirs.zig_lib.join(arena, &.{
"libc", try sanitize(arena, crt1_command_src_file),
}),
.extra_flags = args.items,
@@ -114,7 +114,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
try addCCArgs(comp, arena, &args, .{ .want_O3 = true, .no_strict_aliasing = true });
for (emmalloc_src_files) |file_path| {
try libc_sources.append(.{
- .src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
+ .src_path = try comp.dirs.zig_lib.join(arena, &.{
"libc", try sanitize(arena, file_path),
}),
.extra_flags = args.items,
@@ -131,7 +131,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
for (libc_bottom_half_src_files) |file_path| {
try libc_sources.append(.{
- .src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
+ .src_path = try comp.dirs.zig_lib.join(arena, &.{
"libc", try sanitize(arena, file_path),
}),
.extra_flags = args.items,
@@ -148,7 +148,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
for (libc_top_half_src_files) |file_path| {
try libc_sources.append(.{
- .src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
+ .src_path = try comp.dirs.zig_lib.join(arena, &.{
"libc", try sanitize(arena, file_path),
}),
.extra_flags = args.items,
@@ -168,7 +168,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
var emu_dl_sources = std.ArrayList(Compilation.CSourceFile).init(arena);
for (emulated_dl_src_files) |file_path| {
try emu_dl_sources.append(.{
- .src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
+ .src_path = try comp.dirs.zig_lib.join(arena, &.{
"libc", try sanitize(arena, file_path),
}),
.extra_flags = args.items,
@@ -186,7 +186,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
var emu_clocks_sources = std.ArrayList(Compilation.CSourceFile).init(arena);
for (emulated_process_clocks_src_files) |file_path| {
try emu_clocks_sources.append(.{
- .src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
+ .src_path = try comp.dirs.zig_lib.join(arena, &.{
"libc", try sanitize(arena, file_path),
}),
.extra_flags = args.items,
@@ -203,7 +203,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
var emu_getpid_sources = std.ArrayList(Compilation.CSourceFile).init(arena);
for (emulated_getpid_src_files) |file_path| {
try emu_getpid_sources.append(.{
- .src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
+ .src_path = try comp.dirs.zig_lib.join(arena, &.{
"libc", try sanitize(arena, file_path),
}),
.extra_flags = args.items,
@@ -220,7 +220,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
var emu_mman_sources = std.ArrayList(Compilation.CSourceFile).init(arena);
for (emulated_mman_src_files) |file_path| {
try emu_mman_sources.append(.{
- .src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
+ .src_path = try comp.dirs.zig_lib.join(arena, &.{
"libc", try sanitize(arena, file_path),
}),
.extra_flags = args.items,
@@ -238,7 +238,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
for (emulated_signal_bottom_half_src_files) |file_path| {
try emu_signal_sources.append(.{
- .src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
+ .src_path = try comp.dirs.zig_lib.join(arena, &.{
"libc", try sanitize(arena, file_path),
}),
.extra_flags = args.items,
@@ -255,7 +255,7 @@ pub fn buildCrtFile(comp: *Compilation, crt_file: CrtFile, prog_node: std.Progre
for (emulated_signal_top_half_src_files) |file_path| {
try emu_signal_sources.append(.{
- .src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{
+ .src_path = try comp.dirs.zig_lib.join(arena, &.{
"libc", try sanitize(arena, file_path),
}),
.extra_flags = args.items,
@@ -316,10 +316,10 @@ fn addCCArgs(
"/",
"-iwithsysroot",
- try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libc", "include", triple }),
+ try comp.dirs.zig_lib.join(arena, &.{ "libc", "include", triple }),
"-iwithsysroot",
- try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libc", "include", "generic-musl" }),
+ try comp.dirs.zig_lib.join(arena, &.{ "libc", "include", "generic-musl" }),
"-DBULK_MEMORY_THRESHOLD=32",
});
@@ -336,7 +336,7 @@ fn addLibcBottomHalfIncludes(
) error{OutOfMemory}!void {
try args.appendSlice(&[_][]const u8{
"-I",
- try comp.zig_lib_directory.join(arena, &[_][]const u8{
+ try comp.dirs.zig_lib.join(arena, &.{
"libc",
"wasi",
"libc-bottom-half",
@@ -345,7 +345,7 @@ fn addLibcBottomHalfIncludes(
}),
"-I",
- try comp.zig_lib_directory.join(arena, &[_][]const u8{
+ try comp.dirs.zig_lib.join(arena, &.{
"libc",
"wasi",
"libc-bottom-half",
@@ -355,7 +355,7 @@ fn addLibcBottomHalfIncludes(
}),
"-I",
- try comp.zig_lib_directory.join(arena, &[_][]const u8{
+ try comp.dirs.zig_lib.join(arena, &.{
"libc",
"wasi",
"libc-bottom-half",
@@ -364,7 +364,7 @@ fn addLibcBottomHalfIncludes(
}),
"-I",
- try comp.zig_lib_directory.join(arena, &[_][]const u8{
+ try comp.dirs.zig_lib.join(arena, &.{
"libc",
"wasi",
"libc-top-half",
@@ -374,7 +374,7 @@ fn addLibcBottomHalfIncludes(
}),
"-I",
- try comp.zig_lib_directory.join(arena, &[_][]const u8{
+ try comp.dirs.zig_lib.join(arena, &.{
"libc",
"musl",
"src",
@@ -382,7 +382,7 @@ fn addLibcBottomHalfIncludes(
}),
"-I",
- try comp.zig_lib_directory.join(arena, &[_][]const u8{
+ try comp.dirs.zig_lib.join(arena, &.{
"libc",
"wasi",
"libc-top-half",
@@ -392,7 +392,7 @@ fn addLibcBottomHalfIncludes(
}),
"-I",
- try comp.zig_lib_directory.join(arena, &[_][]const u8{
+ try comp.dirs.zig_lib.join(arena, &.{
"libc",
"musl",
"src",
@@ -408,7 +408,7 @@ fn addLibcTopHalfIncludes(
) error{OutOfMemory}!void {
try args.appendSlice(&[_][]const u8{
"-I",
- try comp.zig_lib_directory.join(arena, &[_][]const u8{
+ try comp.dirs.zig_lib.join(arena, &.{
"libc",
"wasi",
"libc-top-half",
@@ -418,7 +418,7 @@ fn addLibcTopHalfIncludes(
}),
"-I",
- try comp.zig_lib_directory.join(arena, &[_][]const u8{
+ try comp.dirs.zig_lib.join(arena, &.{
"libc",
"musl",
"src",
@@ -426,7 +426,7 @@ fn addLibcTopHalfIncludes(
}),
"-I",
- try comp.zig_lib_directory.join(arena, &[_][]const u8{
+ try comp.dirs.zig_lib.join(arena, &.{
"libc",
"wasi",
"libc-top-half",
@@ -436,7 +436,7 @@ fn addLibcTopHalfIncludes(
}),
"-I",
- try comp.zig_lib_directory.join(arena, &[_][]const u8{
+ try comp.dirs.zig_lib.join(arena, &.{
"libc",
"musl",
"src",
@@ -444,7 +444,7 @@ fn addLibcTopHalfIncludes(
}),
"-I",
- try comp.zig_lib_directory.join(arena, &[_][]const u8{
+ try comp.dirs.zig_lib.join(arena, &.{
"libc",
"wasi",
"libc-top-half",
@@ -454,7 +454,7 @@ fn addLibcTopHalfIncludes(
}),
"-I",
- try comp.zig_lib_directory.join(arena, &[_][]const u8{
+ try comp.dirs.zig_lib.join(arena, &.{
"libc",
"musl",
"arch",
@@ -462,7 +462,7 @@ fn addLibcTopHalfIncludes(
}),
"-I",
- try comp.zig_lib_directory.join(arena, &[_][]const u8{
+ try comp.dirs.zig_lib.join(arena, &.{
"libc",
"wasi",
"libc-top-half",
src/link/Elf/ZigObject.zig
@@ -1201,7 +1201,7 @@ fn getNavShdrIndex(
return osec;
}
if (nav_init != .none and Value.fromInterned(nav_init).isUndefDeep(zcu))
- return switch (zcu.navFileScope(nav_index).mod.optimize_mode) {
+ return switch (zcu.navFileScope(nav_index).mod.?.optimize_mode) {
.Debug, .ReleaseSafe => {
if (self.data_index) |symbol_index|
return self.symbol(symbol_index).outputShndx(elf_file).?;
@@ -1271,7 +1271,7 @@ fn updateNavCode(
log.debug("updateNavCode {}({d})", .{ nav.fqn.fmt(ip), nav_index });
- const target = zcu.navFileScope(nav_index).mod.resolved_target.result;
+ const target = zcu.navFileScope(nav_index).mod.?.resolved_target.result;
const required_alignment = switch (pt.navAlignment(nav_index)) {
.none => target_util.defaultFunctionAlignment(target),
else => |a| a.maxStrict(target_util.minFunctionAlignment(target)),
src/link/MachO/ZigObject.zig
@@ -954,7 +954,7 @@ fn updateNavCode(
log.debug("updateNavCode {} 0x{x}", .{ nav.fqn.fmt(ip), nav_index });
- const target = zcu.navFileScope(nav_index).mod.resolved_target.result;
+ const target = zcu.navFileScope(nav_index).mod.?.resolved_target.result;
const required_alignment = switch (pt.navAlignment(nav_index)) {
.none => target_util.defaultFunctionAlignment(target),
else => |a| a.maxStrict(target_util.minFunctionAlignment(target)),
@@ -1184,7 +1184,7 @@ fn getNavOutputSection(
}
if (is_const) return macho_file.zig_const_sect_index.?;
if (nav_init != .none and Value.fromInterned(nav_init).isUndefDeep(zcu))
- return switch (zcu.navFileScope(nav_index).mod.optimize_mode) {
+ return switch (zcu.navFileScope(nav_index).mod.?.optimize_mode) {
.Debug, .ReleaseSafe => macho_file.zig_data_sect_index.?,
.ReleaseFast, .ReleaseSmall => macho_file.zig_bss_sect_index.?,
};
src/link/C.zig
@@ -206,7 +206,7 @@ pub fn updateFunc(
.dg = .{
.gpa = gpa,
.pt = pt,
- .mod = zcu.navFileScope(func.owner_nav).mod,
+ .mod = zcu.navFileScope(func.owner_nav).mod.?,
.error_msg = null,
.pass = .{ .nav = func.owner_nav },
.is_naked_fn = Type.fromInterned(func.ty).fnCallingConvention(zcu) == .naked,
@@ -337,7 +337,7 @@ pub fn updateNav(self: *C, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) l
.dg = .{
.gpa = gpa,
.pt = pt,
- .mod = zcu.navFileScope(nav_index).mod,
+ .mod = zcu.navFileScope(nav_index).mod.?,
.error_msg = null,
.pass = .{ .nav = nav_index },
.is_naked_fn = false,
@@ -490,7 +490,7 @@ pub fn flushModule(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node:
for (self.navs.keys(), self.navs.values()) |nav, *av_block| try self.flushAvBlock(
pt,
- zcu.navFileScope(nav).mod,
+ zcu.navFileScope(nav).mod.?,
&f,
av_block,
self.exported_navs.getPtr(nav),
@@ -846,7 +846,7 @@ pub fn updateExports(
const gpa = zcu.gpa;
const mod, const pass: codegen.DeclGen.Pass, const decl_block, const exported_block = switch (exported) {
.nav => |nav| .{
- zcu.navFileScope(nav).mod,
+ zcu.navFileScope(nav).mod.?,
.{ .nav = nav },
self.navs.getPtr(nav).?,
(try self.exported_navs.getOrPut(gpa, nav)).value_ptr,
src/link/Coff.zig
@@ -1392,7 +1392,7 @@ fn updateNavCode(
log.debug("updateNavCode {} 0x{x}", .{ nav.fqn.fmt(ip), nav_index });
- const target = zcu.navFileScope(nav_index).mod.resolved_target.result;
+ const target = zcu.navFileScope(nav_index).mod.?.resolved_target.result;
const required_alignment = switch (pt.navAlignment(nav_index)) {
.none => target_util.defaultFunctionAlignment(target),
else => |a| a.maxStrict(target_util.minFunctionAlignment(target)),
src/link/Dwarf.zig
@@ -34,9 +34,7 @@ pub const UpdateError = error{
std.fs.File.PReadError ||
std.fs.File.PWriteError;
-pub const FlushError =
- UpdateError ||
- std.process.GetCwdError;
+pub const FlushError = UpdateError;
pub const RelocError =
std.fs.File.PWriteError;
@@ -967,7 +965,7 @@ const Entry = struct {
const ip = &zcu.intern_pool;
for (dwarf.types.keys(), dwarf.types.values()) |ty, other_entry| {
const ty_unit: Unit.Index = if (Type.fromInterned(ty).typeDeclInst(zcu)) |inst_index|
- dwarf.getUnit(zcu.fileByIndex(inst_index.resolveFile(ip)).mod) catch unreachable
+ dwarf.getUnit(zcu.fileByIndex(inst_index.resolveFile(ip)).mod.?) catch unreachable
else
.main;
if (sec.getUnit(ty_unit) == unit and unit.getEntry(other_entry) == entry)
@@ -977,7 +975,7 @@ const Entry = struct {
});
}
for (dwarf.navs.keys(), dwarf.navs.values()) |nav, other_entry| {
- const nav_unit = dwarf.getUnit(zcu.fileByIndex(ip.getNav(nav).srcInst(ip).resolveFile(ip)).mod) catch unreachable;
+ const nav_unit = dwarf.getUnit(zcu.fileByIndex(ip.getNav(nav).srcInst(ip).resolveFile(ip)).mod.?) catch unreachable;
if (sec.getUnit(nav_unit) == unit and unit.getEntry(other_entry) == entry)
log.err("missing Nav({}({d}))", .{ ip.getNav(nav).fqn.fmt(ip), @intFromEnum(nav) });
}
@@ -1620,7 +1618,7 @@ pub const WipNav = struct {
const new_func_info = zcu.funcInfo(func);
const new_file = zcu.navFileScopeIndex(new_func_info.owner_nav);
- const new_unit = try dwarf.getUnit(zcu.fileByIndex(new_file).mod);
+ const new_unit = try dwarf.getUnit(zcu.fileByIndex(new_file).mod.?);
const dlw = wip_nav.debug_line.writer(dwarf.gpa);
if (dwarf.incremental()) {
@@ -1810,7 +1808,7 @@ pub const WipNav = struct {
fn getNavEntry(wip_nav: *WipNav, nav_index: InternPool.Nav.Index) UpdateError!struct { Unit.Index, Entry.Index } {
const zcu = wip_nav.pt.zcu;
const ip = &zcu.intern_pool;
- const unit = try wip_nav.dwarf.getUnit(zcu.fileByIndex(ip.getNav(nav_index).srcInst(ip).resolveFile(ip)).mod);
+ const unit = try wip_nav.dwarf.getUnit(zcu.fileByIndex(ip.getNav(nav_index).srcInst(ip).resolveFile(ip)).mod.?);
const gop = try wip_nav.dwarf.navs.getOrPut(wip_nav.dwarf.gpa, nav_index);
if (gop.found_existing) return .{ unit, gop.value_ptr.* };
const entry = try wip_nav.dwarf.addCommonEntry(unit);
@@ -1828,7 +1826,7 @@ pub const WipNav = struct {
const ip = &zcu.intern_pool;
const maybe_inst_index = ty.typeDeclInst(zcu);
const unit = if (maybe_inst_index) |inst_index|
- try wip_nav.dwarf.getUnit(zcu.fileByIndex(inst_index.resolveFile(ip)).mod)
+ try wip_nav.dwarf.getUnit(zcu.fileByIndex(inst_index.resolveFile(ip)).mod.?)
else
.main;
const gop = try wip_nav.dwarf.types.getOrPut(wip_nav.dwarf.gpa, ty.toIntern());
@@ -2386,7 +2384,7 @@ fn initWipNavInner(
else => {},
}
- const unit = try dwarf.getUnit(file.mod);
+ const unit = try dwarf.getUnit(file.mod.?);
const nav_gop = try dwarf.navs.getOrPut(dwarf.gpa, nav_index);
errdefer _ = if (!nav_gop.found_existing) dwarf.navs.pop();
if (nav_gop.found_existing) {
@@ -2514,7 +2512,7 @@ fn initWipNavInner(
try wip_nav.infoAddrSym(sym_index, 0);
wip_nav.func_high_pc = @intCast(wip_nav.debug_info.items.len);
try diw.writeInt(u32, 0, dwarf.endian);
- const target = file.mod.resolved_target.result;
+ const target = file.mod.?.resolved_target.result;
try uleb128(diw, switch (nav.status.fully_resolved.alignment) {
.none => target_info.defaultFunctionAlignment(target),
else => |a| a.maxStrict(target_info.minFunctionAlignment(target)),
@@ -2726,7 +2724,7 @@ fn updateComptimeNavInner(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPoo
var wip_nav: WipNav = .{
.dwarf = dwarf,
.pt = pt,
- .unit = try dwarf.getUnit(file.mod),
+ .unit = try dwarf.getUnit(file.mod.?),
.entry = undefined,
.any_children = false,
.func = .none,
@@ -4044,7 +4042,7 @@ pub fn updateContainerType(dwarf: *Dwarf, pt: Zcu.PerThread, type_index: InternP
const inst_info = ty.typeDeclInst(zcu).?.resolveFull(ip).?;
const file = zcu.fileByIndex(inst_info.file);
- const unit = try dwarf.getUnit(file.mod);
+ const unit = try dwarf.getUnit(file.mod.?);
const file_gop = try dwarf.getModInfo(unit).files.getOrPut(dwarf.gpa, inst_info.file);
if (inst_info.inst == .main_struct_inst) {
const type_gop = try dwarf.types.getOrPut(dwarf.gpa, type_index);
@@ -4348,7 +4346,7 @@ pub fn updateLineNumber(dwarf: *Dwarf, zcu: *Zcu, zir_index: InternPool.TrackedI
var line_buf: [4]u8 = undefined;
std.mem.writeInt(u32, &line_buf, decl.src_line + 1, dwarf.endian);
- const unit = dwarf.debug_info.section.getUnit(dwarf.getUnitIfExists(file.mod) orelse return);
+ const unit = dwarf.debug_info.section.getUnit(dwarf.getUnitIfExists(file.mod.?) orelse return);
const entry = unit.getEntry(dwarf.decls.get(zir_index) orelse return);
try dwarf.getFile().?.pwriteAll(&line_buf, dwarf.debug_info.section.off(dwarf) + unit.off + unit.header_len + entry.off + DebugInfo.declEntryLineOff(dwarf));
}
@@ -4418,18 +4416,10 @@ pub fn flushModule(dwarf: *Dwarf, pt: Zcu.PerThread) FlushError!void {
try wip_nav.updateLazy(.unneeded);
}
- {
- const cwd = try std.process.getCwdAlloc(dwarf.gpa);
- defer dwarf.gpa.free(cwd);
- for (dwarf.mods.keys(), dwarf.mods.values()) |mod, *mod_info| {
- const root_dir_path = try std.fs.path.resolve(dwarf.gpa, &.{
- cwd,
- mod.root.root_dir.path orelse "",
- mod.root.sub_path,
- });
- defer dwarf.gpa.free(root_dir_path);
- mod_info.root_dir_path = try dwarf.debug_line_str.addString(dwarf, root_dir_path);
- }
+ for (dwarf.mods.keys(), dwarf.mods.values()) |mod, *mod_info| {
+ const root_dir_path = try mod.root.toAbsolute(zcu.comp.dirs, dwarf.gpa);
+ defer dwarf.gpa.free(root_dir_path);
+ mod_info.root_dir_path = try dwarf.debug_line_str.addString(dwarf, root_dir_path);
}
var header = std.ArrayList(u8).init(dwarf.gpa);
@@ -4687,7 +4677,7 @@ pub fn flushModule(dwarf: *Dwarf, pt: Zcu.PerThread) FlushError!void {
header.appendNTimesAssumeCapacity(0, dwarf.sectionOffsetBytes());
dwarf.writeInt(
header.addManyAsSliceAssumeCapacity(dir_index_info.bytes),
- mod_info.dirs.getIndex(dwarf.getUnitIfExists(file.mod).?).?,
+ mod_info.dirs.getIndex(dwarf.getUnitIfExists(file.mod.?).?) orelse 0,
);
unit.cross_section_relocs.appendAssumeCapacity(.{
.source_off = @intCast(header.items.len),
@@ -4695,7 +4685,7 @@ pub fn flushModule(dwarf: *Dwarf, pt: Zcu.PerThread) FlushError!void {
.target_unit = StringSection.unit,
.target_entry = (try dwarf.debug_line_str.addString(
dwarf,
- if (file.mod.builtin_file == file) file.source.? else "",
+ if (file.is_builtin) file.source.? else "",
)).toOptional(),
});
header.appendNTimesAssumeCapacity(0, dwarf.sectionOffsetBytes());
src/link/MachO.zig
@@ -867,11 +867,11 @@ pub fn resolveLibSystem(
success: {
if (self.sdk_layout) |sdk_layout| switch (sdk_layout) {
.sdk => {
- const dir = try fs.path.join(arena, &[_][]const u8{ comp.sysroot.?, "usr", "lib" });
+ const dir = try fs.path.join(arena, &.{ comp.sysroot.?, "usr", "lib" });
if (try accessLibPath(arena, &test_path, &checked_paths, dir, "System")) break :success;
},
.vendored => {
- const dir = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libc", "darwin" });
+ const dir = try comp.dirs.zig_lib.join(arena, &.{ "libc", "darwin" });
if (try accessLibPath(arena, &test_path, &checked_paths, dir, "System")) break :success;
},
};
@@ -4406,7 +4406,7 @@ fn inferSdkVersion(comp: *Compilation, sdk_layout: SdkLayout) ?std.SemanticVersi
const sdk_dir = switch (sdk_layout) {
.sdk => comp.sysroot.?,
- .vendored => fs.path.join(arena, &.{ comp.zig_lib_directory.path.?, "libc", "darwin" }) catch return null,
+ .vendored => fs.path.join(arena, &.{ comp.dirs.zig_lib.path.?, "libc", "darwin" }) catch return null,
};
if (readSdkVersionFromSettings(arena, sdk_dir)) |ver| {
return parseSdkVersion(ver);
src/link/Plan9.zig
@@ -315,8 +315,9 @@ pub fn createEmpty(
}
fn putFn(self: *Plan9, nav_index: InternPool.Nav.Index, out: FnNavOutput) !void {
- const gpa = self.base.comp.gpa;
- const zcu = self.base.comp.zcu.?;
+ const comp = self.base.comp;
+ const gpa = comp.gpa;
+ const zcu = comp.zcu.?;
const file_scope = zcu.navFileScopeIndex(nav_index);
const fn_map_res = try self.fn_nav_table.getOrPut(gpa, file_scope);
if (fn_map_res.found_existing) {
@@ -345,14 +346,11 @@ fn putFn(self: *Plan9, nav_index: InternPool.Nav.Index, out: FnNavOutput) !void
try a.writer().writeInt(u16, 1, .big);
// getting the full file path
- // TODO don't call getcwd here, that is inappropriate
- var buf: [std.fs.max_path_bytes]u8 = undefined;
- const full_path = try std.fs.path.join(arena, &.{
- file.mod.root.root_dir.path orelse try std.posix.getcwd(&buf),
- file.mod.root.sub_path,
- file.sub_file_path,
- });
- try self.addPathComponents(full_path, &a);
+ {
+ const full_path = try file.path.toAbsolute(comp.dirs, gpa);
+ defer gpa.free(full_path);
+ try self.addPathComponents(full_path, &a);
+ }
// null terminate
try a.append(0);
@@ -437,9 +435,7 @@ pub fn updateFunc(
.start_line = dbg_info_output.start_line.?,
.end_line = dbg_info_output.end_line,
};
- // The awkward error handling here is due to putFn calling `std.posix.getcwd` which it should not do.
- self.putFn(func.owner_nav, out) catch |err|
- return zcu.codegenFail(func.owner_nav, "failed to put fn: {s}", .{@errorName(err)});
+ try self.putFn(func.owner_nav, out);
return self.updateFinish(pt, func.owner_nav);
}
src/Package/Module.zig
@@ -1,15 +1,17 @@
//! Corresponds to something that Zig source code can `@import`.
-/// Only files inside this directory can be imported.
-root: Cache.Path,
-/// Relative to `root`. May contain path separators.
+/// The root directory of the module. Only files inside this directory can be imported.
+root: Compilation.Path,
+/// Path to the root source file of this module. Relative to `root`. May contain path separators.
root_src_path: []const u8,
/// Name used in compile errors. Looks like "root.foo.bar".
fully_qualified_name: []const u8,
-/// The dependency table of this module. Shared dependencies such as 'std',
-/// 'builtin', and 'root' are not specified in every dependency table, but
-/// instead only in the table of `main_mod`. `Module.importFile` is
-/// responsible for detecting these names and using the correct package.
+/// The dependency table of this module. The shared dependencies 'std' and
+/// 'root' are not specified in every module dependency table, but are stored
+/// separately in `Zcu`. 'builtin' is also not stored here, although it is
+/// not necessarily the same between all modules. Handling of `@import` in
+/// the rest of the compiler must detect these special names and use the
+/// correct module instead of consulting `deps`.
deps: Deps = .{},
resolved_target: ResolvedTarget,
@@ -33,25 +35,14 @@ cc_argv: []const []const u8,
structured_cfg: bool,
no_builtin: bool,
-/// If the module is an `@import("builtin")` module, this is the `File` that
-/// is preallocated for it. Otherwise this field is null.
-builtin_file: ?*File,
-
pub const Deps = std.StringArrayHashMapUnmanaged(*Module);
-pub fn isBuiltin(m: Module) bool {
- return m.builtin_file != null;
-}
-
pub const Tree = struct {
/// Each `Package` exposes a `Module` with build.zig as its root source file.
build_module_table: std.AutoArrayHashMapUnmanaged(MultiHashHexDigest, *Module),
};
pub const CreateOptions = struct {
- /// Where to store builtin.zig. The global cache directory is used because
- /// it is a pure function based on CLI flags.
- global_cache_directory: Cache.Directory,
paths: Paths,
fully_qualified_name: []const u8,
@@ -61,15 +52,8 @@ pub const CreateOptions = struct {
/// If this is null then `resolved_target` must be non-null.
parent: ?*Package.Module,
- builtin_mod: ?*Package.Module,
-
- /// Allocated into the given `arena`. Should be shared across all module creations in a Compilation.
- /// Ignored if `builtin_mod` is passed or if `!have_zcu`.
- /// Otherwise, may be `null` only if this Compilation consists of a single module.
- builtin_modules: ?*std.StringHashMapUnmanaged(*Module),
-
pub const Paths = struct {
- root: Cache.Path,
+ root: Compilation.Path,
/// Relative to `root`. May contain path separators.
root_src_path: []const u8,
};
@@ -401,126 +385,13 @@ pub fn create(arena: Allocator, options: CreateOptions) !*Package.Module {
.cc_argv = options.cc_argv,
.structured_cfg = structured_cfg,
.no_builtin = no_builtin,
- .builtin_file = null,
};
-
- const opt_builtin_mod = options.builtin_mod orelse b: {
- if (!options.global.have_zcu) break :b null;
-
- const generated_builtin_source = try Builtin.generate(.{
- .target = target,
- .zig_backend = zig_backend,
- .output_mode = options.global.output_mode,
- .link_mode = options.global.link_mode,
- .unwind_tables = unwind_tables,
- .is_test = options.global.is_test,
- .single_threaded = single_threaded,
- .link_libc = options.global.link_libc,
- .link_libcpp = options.global.link_libcpp,
- .optimize_mode = optimize_mode,
- .error_tracing = error_tracing,
- .valgrind = valgrind,
- .sanitize_thread = sanitize_thread,
- .fuzz = fuzz,
- .pic = pic,
- .pie = options.global.pie,
- .strip = strip,
- .code_model = code_model,
- .omit_frame_pointer = omit_frame_pointer,
- .wasi_exec_model = options.global.wasi_exec_model,
- }, arena);
-
- const new = if (options.builtin_modules) |builtins| new: {
- const gop = try builtins.getOrPut(arena, generated_builtin_source);
- if (gop.found_existing) break :b gop.value_ptr.*;
- errdefer builtins.removeByPtr(gop.key_ptr);
- const new = try arena.create(Module);
- gop.value_ptr.* = new;
- break :new new;
- } else try arena.create(Module);
- errdefer if (options.builtin_modules) |builtins| assert(builtins.remove(generated_builtin_source));
-
- const new_file = try arena.create(File);
-
- const hex_digest = digest: {
- var hasher: Cache.Hasher = Cache.hasher_init;
- hasher.update(generated_builtin_source);
-
- var bin_digest: Cache.BinDigest = undefined;
- hasher.final(&bin_digest);
-
- var hex_digest: Cache.HexDigest = undefined;
- _ = std.fmt.bufPrint(
- &hex_digest,
- "{s}",
- .{std.fmt.fmtSliceHexLower(&bin_digest)},
- ) catch unreachable;
-
- break :digest hex_digest;
- };
-
- const builtin_sub_path = try arena.dupe(u8, "b" ++ std.fs.path.sep_str ++ hex_digest);
-
- new.* = .{
- .root = .{
- .root_dir = options.global_cache_directory,
- .sub_path = builtin_sub_path,
- },
- .root_src_path = "builtin.zig",
- .fully_qualified_name = if (options.parent == null)
- "builtin"
- else
- try std.fmt.allocPrint(arena, "{s}.builtin", .{options.fully_qualified_name}),
- .resolved_target = .{
- .result = target,
- .is_native_os = resolved_target.is_native_os,
- .is_native_abi = resolved_target.is_native_abi,
- .llvm_cpu_features = llvm_cpu_features,
- },
- .optimize_mode = optimize_mode,
- .single_threaded = single_threaded,
- .error_tracing = error_tracing,
- .valgrind = valgrind,
- .pic = pic,
- .strip = strip,
- .omit_frame_pointer = omit_frame_pointer,
- .stack_check = stack_check,
- .stack_protector = stack_protector,
- .code_model = code_model,
- .red_zone = red_zone,
- .sanitize_c = sanitize_c,
- .sanitize_thread = sanitize_thread,
- .fuzz = fuzz,
- .unwind_tables = unwind_tables,
- .cc_argv = &.{},
- .structured_cfg = structured_cfg,
- .no_builtin = no_builtin,
- .builtin_file = new_file,
- };
- new_file.* = .{
- .sub_file_path = "builtin.zig",
- .stat = undefined,
- .source = generated_builtin_source,
- .tree = null,
- .zir = null,
- .zoir = null,
- .status = .never_loaded,
- .mod = new,
- };
- break :b new;
- };
-
- if (opt_builtin_mod) |builtin_mod| {
- try mod.deps.ensureUnusedCapacity(arena, 1);
- mod.deps.putAssumeCapacityNoClobber("builtin", builtin_mod);
- }
-
return mod;
}
/// All fields correspond to `CreateOptions`.
pub const LimitedOptions = struct {
- root: Cache.Path,
+ root: Compilation.Path,
root_src_path: []const u8,
fully_qualified_name: []const u8,
};
@@ -553,18 +424,73 @@ pub fn createLimited(gpa: Allocator, options: LimitedOptions) Allocator.Error!*P
.cc_argv = undefined,
.structured_cfg = undefined,
.no_builtin = undefined,
- .builtin_file = null,
};
return mod;
}
-/// Asserts that the module has a builtin module, which is not true for non-zig
-/// modules such as ones only used for `@embedFile`, or the root module when
-/// there is no Zig Compilation Unit.
-pub fn getBuiltinDependency(m: Module) *Module {
- const result = m.deps.values()[0];
- assert(result.isBuiltin());
- return result;
+/// Does not ensure that the module's root directory exists on-disk; see `Builtin.updateFileOnDisk` for that task.
+pub fn createBuiltin(arena: Allocator, opts: Builtin, dirs: Compilation.Directories) Allocator.Error!*Module {
+ const sub_path = "b" ++ Cache.binToHex(opts.hash());
+ const new = try arena.create(Module);
+ new.* = .{
+ .root = try .fromRoot(arena, dirs, .global_cache, sub_path),
+ .root_src_path = "builtin.zig",
+ .fully_qualified_name = "builtin",
+ .resolved_target = .{
+ .result = opts.target,
+ // These values are not in `opts`, but do not matter because `builtin.zig` contains no runtime code.
+ .is_native_os = false,
+ .is_native_abi = false,
+ .llvm_cpu_features = null,
+ },
+ .optimize_mode = opts.optimize_mode,
+ .single_threaded = opts.single_threaded,
+ .error_tracing = opts.error_tracing,
+ .valgrind = opts.valgrind,
+ .pic = opts.pic,
+ .strip = opts.strip,
+ .omit_frame_pointer = opts.omit_frame_pointer,
+ .code_model = opts.code_model,
+ .sanitize_thread = opts.sanitize_thread,
+ .fuzz = opts.fuzz,
+ .unwind_tables = opts.unwind_tables,
+ .cc_argv = &.{},
+ // These values are not in `opts`, but do not matter because `builtin.zig` contains no runtime code.
+ .stack_check = false,
+ .stack_protector = 0,
+ .red_zone = false,
+ .sanitize_c = .off,
+ .structured_cfg = false,
+ .no_builtin = false,
+ };
+ return new;
+}
+
+/// Returns the `Builtin` which forms the contents of `@import("builtin")` for this module.
+pub fn getBuiltinOptions(m: Module, global: Compilation.Config) Builtin {
+ assert(global.have_zcu);
+ return .{
+ .target = m.resolved_target.result,
+ .zig_backend = target_util.zigBackend(m.resolved_target.result, global.use_llvm),
+ .output_mode = global.output_mode,
+ .link_mode = global.link_mode,
+ .unwind_tables = m.unwind_tables,
+ .is_test = global.is_test,
+ .single_threaded = m.single_threaded,
+ .link_libc = global.link_libc,
+ .link_libcpp = global.link_libcpp,
+ .optimize_mode = m.optimize_mode,
+ .error_tracing = m.error_tracing,
+ .valgrind = m.valgrind,
+ .sanitize_thread = m.sanitize_thread,
+ .fuzz = m.fuzz,
+ .pic = m.pic,
+ .pie = global.pie,
+ .strip = m.strip,
+ .code_model = m.code_model,
+ .omit_frame_pointer = m.omit_frame_pointer,
+ .wasi_exec_model = global.wasi_exec_model,
+ };
}
const Module = @This();
src/Zcu/PerThread.zig
@@ -8,23 +8,26 @@ const Ast = std.zig.Ast;
const AstGen = std.zig.AstGen;
const BigIntConst = std.math.big.int.Const;
const BigIntMutable = std.math.big.int.Mutable;
+const Builtin = @import("../Builtin.zig");
const build_options = @import("build_options");
const builtin = @import("builtin");
const Cache = std.Build.Cache;
const dev = @import("../dev.zig");
const InternPool = @import("../InternPool.zig");
const AnalUnit = InternPool.AnalUnit;
-const isUpDir = @import("../introspect.zig").isUpDir;
+const introspect = @import("../introspect.zig");
const Liveness = @import("../Liveness.zig");
const log = std.log.scoped(.zcu);
const Module = @import("../Package.zig").Module;
const Sema = @import("../Sema.zig");
const std = @import("std");
+const mem = std.mem;
const target_util = @import("../target.zig");
const trace = @import("../tracy.zig").trace;
const Type = @import("../Type.zig");
const Value = @import("../Value.zig");
const Zcu = @import("../Zcu.zig");
+const Compilation = @import("../Compilation.zig");
const Zir = std.zig.Zir;
const Zoir = std.zig.Zoir;
const ZonGen = std.zig.ZonGen;
@@ -50,16 +53,9 @@ fn deinitFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) void {
const zcu = pt.zcu;
const gpa = zcu.gpa;
const file = zcu.fileByIndex(file_index);
- const is_builtin = file.mod.isBuiltin();
- log.debug("deinit File {s}", .{file.sub_file_path});
- if (is_builtin) {
- file.unloadTree(gpa);
- file.unloadZir(gpa);
- } else {
- gpa.free(file.sub_file_path);
- file.unload(gpa);
- }
- file.references.deinit(gpa);
+ log.debug("deinit File {}", .{file.path.fmt(zcu.comp)});
+ file.path.deinit(gpa);
+ file.unload(gpa);
if (file.prev_zir) |prev_zir| {
prev_zir.deinit(gpa);
gpa.destroy(prev_zir);
@@ -70,20 +66,19 @@ fn deinitFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) void {
pub fn destroyFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) void {
const gpa = pt.zcu.gpa;
const file = pt.zcu.fileByIndex(file_index);
- const is_builtin = file.mod.isBuiltin();
pt.deinitFile(file_index);
- if (!is_builtin) gpa.destroy(file);
+ gpa.destroy(file);
}
/// Ensures that `file` has up-to-date ZIR. If not, loads the ZIR cache or runs
-/// AstGen as needed. Also updates `file.status`.
+/// AstGen as needed. Also updates `file.status`. Does not assume that `file.mod`
+/// is populated. Does not return `error.AnalysisFail` on AstGen failures.
pub fn updateFile(
pt: Zcu.PerThread,
+ file_index: Zcu.File.Index,
file: *Zcu.File,
- path_digest: Cache.BinDigest,
) !void {
dev.check(.ast_gen);
- assert(!file.mod.isBuiltin());
const tracy = trace(@src());
defer tracy.end();
@@ -93,13 +88,20 @@ pub fn updateFile(
const gpa = zcu.gpa;
// In any case we need to examine the stat of the file to determine the course of action.
- var source_file = try file.mod.root.openFile(file.sub_file_path, .{});
+ var source_file = f: {
+ const dir, const sub_path = file.path.openInfo(comp.dirs);
+ break :f try dir.openFile(sub_path, .{});
+ };
defer source_file.close();
const stat = try source_file.stat();
- const want_local_cache = file.mod == zcu.main_mod;
- const hex_digest = Cache.binToHex(path_digest);
+ const want_local_cache = switch (file.path.root) {
+ .none, .local_cache => true,
+ .global_cache, .zig_lib => false,
+ };
+
+ const hex_digest = Cache.binToHex(file.path.digest());
const cache_directory = if (want_local_cache) zcu.local_zir_cache else zcu.global_zir_cache;
const zir_dir = cache_directory.handle;
@@ -107,8 +109,8 @@ pub fn updateFile(
var lock: std.fs.File.Lock = switch (file.status) {
.never_loaded, .retryable_failure => lock: {
// First, load the cached ZIR code, if any.
- log.debug("AstGen checking cache: {s} (local={}, digest={s})", .{
- file.sub_file_path, want_local_cache, &hex_digest,
+ log.debug("AstGen checking cache: {} (local={}, digest={s})", .{
+ file.path.fmt(comp), want_local_cache, &hex_digest,
});
break :lock .shared;
@@ -120,18 +122,18 @@ pub fn updateFile(
stat.inode == file.stat.inode;
if (unchanged_metadata) {
- log.debug("unmodified metadata of file: {s}", .{file.sub_file_path});
+ log.debug("unmodified metadata of file: {}", .{file.path.fmt(comp)});
return;
}
- log.debug("metadata changed: {s}", .{file.sub_file_path});
+ log.debug("metadata changed: {}", .{file.path.fmt(comp)});
break :lock .exclusive;
},
};
// The old compile error, if any, is no longer relevant.
- pt.lockAndClearFileCompileError(file);
+ pt.lockAndClearFileCompileError(file_index, file);
// If `zir` is not null, and `prev_zir` is null, then `TrackedInst`s are associated with `zir`.
// We need to keep it around!
@@ -211,12 +213,12 @@ pub fn updateFile(
};
switch (result) {
.success => {
- log.debug("AstGen cached success: {s}", .{file.sub_file_path});
+ log.debug("AstGen cached success: {}", .{file.path.fmt(comp)});
break false;
},
.invalid => {},
- .truncated => log.warn("unexpected EOF reading cached ZIR for {s}", .{file.sub_file_path}),
- .stale => log.debug("AstGen cache stale: {s}", .{file.sub_file_path}),
+ .truncated => log.warn("unexpected EOF reading cached ZIR for {}", .{file.path.fmt(comp)}),
+ .stale => log.debug("AstGen cache stale: {}", .{file.path.fmt(comp)}),
}
// If we already have the exclusive lock then it is our job to update.
@@ -255,22 +257,22 @@ pub fn updateFile(
file.zir = try AstGen.generate(gpa, file.tree.?);
Zcu.saveZirCache(gpa, cache_file, stat, file.zir.?) catch |err| switch (err) {
error.OutOfMemory => |e| return e,
- else => log.warn("unable to write cached ZIR code for {}{s} to {}{s}: {s}", .{
- file.mod.root, file.sub_file_path, cache_directory, &hex_digest, @errorName(err),
+ else => log.warn("unable to write cached ZIR code for {} to {}{s}: {s}", .{
+ file.path.fmt(comp), cache_directory, &hex_digest, @errorName(err),
}),
};
},
.zon => {
file.zoir = try ZonGen.generate(gpa, file.tree.?, .{});
Zcu.saveZoirCache(cache_file, stat, file.zoir.?) catch |err| {
- log.warn("unable to write cached ZOIR code for {}{s} to {}{s}: {s}", .{
- file.mod.root, file.sub_file_path, cache_directory, &hex_digest, @errorName(err),
+ log.warn("unable to write cached ZOIR code for {} to {}{s}: {s}", .{
+ file.path.fmt(comp), cache_directory, &hex_digest, @errorName(err),
});
};
},
}
- log.debug("AstGen fresh success: {s}", .{file.sub_file_path});
+ log.debug("AstGen fresh success: {}", .{file.path.fmt(comp)});
}
file.stat = .{
@@ -287,7 +289,7 @@ pub fn updateFile(
if (file.zir.?.hasCompileErrors()) {
comp.mutex.lock();
defer comp.mutex.unlock();
- try zcu.failed_files.putNoClobber(gpa, file, null);
+ try zcu.failed_files.putNoClobber(gpa, file_index, null);
}
if (file.zir.?.loweringFailed()) {
file.status = .astgen_failure;
@@ -300,7 +302,7 @@ pub fn updateFile(
file.status = .astgen_failure;
comp.mutex.lock();
defer comp.mutex.unlock();
- try zcu.failed_files.putNoClobber(gpa, file, null);
+ try zcu.failed_files.putNoClobber(gpa, file_index, null);
} else {
file.status = .success;
}
@@ -310,8 +312,7 @@ pub fn updateFile(
switch (file.status) {
.never_loaded => unreachable,
.retryable_failure => unreachable,
- .astgen_failure => return error.AnalysisFail,
- .success => return,
+ .astgen_failure, .success => {},
}
}
@@ -388,9 +389,18 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void {
var updated_files: std.AutoArrayHashMapUnmanaged(Zcu.File.Index, UpdatedFile) = .empty;
defer cleanupUpdatedFiles(gpa, &updated_files);
- for (zcu.import_table.values()) |file_index| {
+ for (zcu.import_table.keys()) |file_index| {
+ if (!zcu.alive_files.contains(file_index)) continue;
const file = zcu.fileByIndex(file_index);
assert(file.status == .success);
+ if (file.module_changed) {
+ try updated_files.putNoClobber(gpa, file_index, .{
+ .file = file,
+ // We intentionally don't map any instructions here; that's the point, the whole file is outdated!
+ .inst_map = .{},
+ });
+ continue;
+ }
switch (file.getMode()) {
.zig => {}, // logic below
.zon => {
@@ -540,10 +550,12 @@ pub fn updateZirRefs(pt: Zcu.PerThread) Allocator.Error!void {
for (updated_files.keys(), updated_files.values()) |file_index, updated_file| {
const file = updated_file.file;
- const prev_zir = file.prev_zir.?;
- file.prev_zir = null;
- prev_zir.deinit(gpa);
- gpa.destroy(prev_zir);
+ if (file.prev_zir) |prev_zir| {
+ prev_zir.deinit(gpa);
+ gpa.destroy(prev_zir);
+ file.prev_zir = null;
+ }
+ file.module_changed = false;
// For every file which has changed, re-scan the namespace of the file's root struct type.
// These types are special-cased because they don't have an enclosing declaration which will
@@ -661,9 +673,9 @@ fn analyzeMemoizedState(pt: Zcu.PerThread, stage: InternPool.MemoizedStateStage)
// * The type `std`, and its namespace
// * The type `std.builtin`, and its namespace
// * A semi-reasonable source location
- const std_file_imported = pt.importPkg(zcu.std_mod) catch return error.AnalysisFail;
- try pt.ensureFileAnalyzed(std_file_imported.file_index);
- const std_type: Type = .fromInterned(zcu.fileRootType(std_file_imported.file_index));
+ const std_file_index = zcu.module_roots.get(zcu.std_mod).?.unwrap().?;
+ try pt.ensureFileAnalyzed(std_file_index);
+ const std_type: Type = .fromInterned(zcu.fileRootType(std_file_index));
const std_namespace = std_type.getNamespaceIndex(zcu);
try pt.ensureNamespaceUpToDate(std_namespace);
const builtin_str = try ip.getOrPutString(gpa, pt.tid, "builtin", .no_embedded_nulls);
@@ -675,7 +687,7 @@ fn analyzeMemoizedState(pt: Zcu.PerThread, stage: InternPool.MemoizedStateStage)
try pt.ensureNamespaceUpToDate(builtin_namespace);
const src: Zcu.LazySrcLoc = .{
.base_node_inst = builtin_type.typeDeclInst(zcu).?,
- .offset = .entire_file,
+ .offset = .{ .byte_abs = 0 },
};
var analysis_arena: std.heap.ArenaAllocator = .init(gpa);
@@ -1250,7 +1262,7 @@ fn analyzeNavVal(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Zcu.CompileErr
if (!try nav_ty.hasRuntimeBitsSema(pt)) {
if (zcu.comp.config.use_llvm) break :queue_codegen;
- if (file.mod.strip) break :queue_codegen;
+ if (file.mod.?.strip) break :queue_codegen;
}
// This job depends on any resolve_type_fully jobs queued up before it.
@@ -1730,13 +1742,12 @@ pub fn linkerUpdateFunc(pt: Zcu.PerThread, func_index: InternPool.Index, air: Ai
}
}
-/// https://github.com/ziglang/zig/issues/14307
-pub fn semaPkg(pt: Zcu.PerThread, pkg: *Module) !void {
+pub fn semaMod(pt: Zcu.PerThread, mod: *Module) !void {
dev.check(.sema);
- const import_file_result = try pt.importPkg(pkg);
- const root_type = pt.zcu.fileRootType(import_file_result.file_index);
+ const file_index = pt.zcu.module_roots.get(mod).?.unwrap().?;
+ const root_type = pt.zcu.fileRootType(file_index);
if (root_type == .none) {
- return pt.semaFile(import_file_result.file_index);
+ return pt.semaFile(file_index);
}
}
@@ -1808,7 +1819,7 @@ fn createFileRootStruct(
try zcu.comp.queueJob(.{ .resolve_type_fully = wip_ty.index });
codegen_type: {
if (zcu.comp.config.use_llvm) break :codegen_type;
- if (file.mod.strip) break :codegen_type;
+ if (file.mod.?.strip) break :codegen_type;
// This job depends on any resolve_type_fully jobs queued up before it.
try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
@@ -1829,7 +1840,7 @@ fn updateFileNamespace(pt: Zcu.PerThread, file_index: Zcu.File.Index) Allocator.
if (file_root_type == .none) return;
log.debug("updateFileNamespace mod={s} sub_file_path={s}", .{
- file.mod.fully_qualified_name,
+ file.mod.?.fully_qualified_name,
file.sub_file_path,
});
@@ -1872,211 +1883,464 @@ fn semaFile(pt: Zcu.PerThread, file_index: Zcu.File.Index) Zcu.SemaError!void {
errdefer zcu.intern_pool.remove(pt.tid, struct_ty);
}
-pub fn importPkg(pt: Zcu.PerThread, mod: *Module) Allocator.Error!Zcu.ImportFileResult {
+/// Called by AstGen worker threads when an import is seen. If `new_file` is returned, the caller is
+/// then responsible for queueing a new AstGen job for the new file.
+/// Assumes that `comp.mutex` is NOT locked. It will be locked by this function where necessary.
+pub fn discoverImport(
+ pt: Zcu.PerThread,
+ importer_path: Compilation.Path,
+ import_string: []const u8,
+) Allocator.Error!union(enum) {
+ module,
+ existing_file: Zcu.File.Index,
+ new_file: struct {
+ index: Zcu.File.Index,
+ file: *Zcu.File,
+ },
+} {
const zcu = pt.zcu;
const gpa = zcu.gpa;
- // The resolved path is used as the key in the import table, to detect if
- // an import refers to the same as another, despite different relative paths
- // or differently mapped package names.
- const resolved_path = try std.fs.path.resolve(gpa, &.{
- mod.root.root_dir.path orelse ".",
- mod.root.sub_path,
- mod.root_src_path,
- });
- var keep_resolved_path = false;
- defer if (!keep_resolved_path) gpa.free(resolved_path);
+ if (!mem.endsWith(u8, import_string, ".zig") and !mem.endsWith(u8, import_string, ".zon")) {
+ return .module;
+ }
- const gop = try zcu.import_table.getOrPut(gpa, resolved_path);
+ const new_path = try importer_path.upJoin(gpa, zcu.comp.dirs, import_string);
+ errdefer new_path.deinit(gpa);
+
+ // We're about to do a GOP on `import_table`, so we need the mutex.
+ zcu.comp.mutex.lock();
+ defer zcu.comp.mutex.unlock();
+
+ const gop = try zcu.import_table.getOrPutAdapted(gpa, new_path, Zcu.ImportTableAdapter{ .zcu = zcu });
errdefer _ = zcu.import_table.pop();
if (gop.found_existing) {
- const file_index = gop.value_ptr.*;
- const file = zcu.fileByIndex(file_index);
- try file.addReference(zcu, .{ .root = mod });
- return .{
- .file = file,
- .file_index = file_index,
- .is_new = false,
- .is_pkg = true,
- };
- }
-
- const ip = &zcu.intern_pool;
- if (mod.builtin_file) |builtin_file| {
- const path_digest = Zcu.computePathDigest(zcu, mod, builtin_file.sub_file_path);
- const file_index = try ip.createFile(gpa, pt.tid, .{
- .bin_digest = path_digest,
- .file = builtin_file,
- .root_type = .none,
- });
- keep_resolved_path = true; // It's now owned by import_table.
- gop.value_ptr.* = file_index;
- try builtin_file.addReference(zcu, .{ .root = mod });
- return .{
- .file = builtin_file,
- .file_index = file_index,
- .is_new = false,
- .is_pkg = true,
- };
+ new_path.deinit(gpa); // we didn't need it for `File.path`
+ return .{ .existing_file = gop.key_ptr.* };
}
- const sub_file_path = try gpa.dupe(u8, mod.root_src_path);
- errdefer gpa.free(sub_file_path);
-
- const comp = zcu.comp;
- if (comp.file_system_inputs) |fsi|
- try comp.appendFileSystemInput(fsi, mod.root, sub_file_path);
+ zcu.import_table.lockPointers();
+ defer zcu.import_table.unlockPointers();
const new_file = try gpa.create(Zcu.File);
errdefer gpa.destroy(new_file);
- const path_digest = zcu.computePathDigest(mod, sub_file_path);
- const new_file_index = try ip.createFile(gpa, pt.tid, .{
- .bin_digest = path_digest,
+ const new_file_index = try zcu.intern_pool.createFile(gpa, pt.tid, .{
+ .bin_digest = new_path.digest(),
.file = new_file,
.root_type = .none,
});
- keep_resolved_path = true; // It's now owned by import_table.
- gop.value_ptr.* = new_file_index;
+ errdefer comptime unreachable; // because we don't remove the file from the internpool
+
+ gop.key_ptr.* = new_file_index;
new_file.* = .{
- .sub_file_path = sub_file_path,
+ .status = .never_loaded,
+ .path = new_path,
.stat = undefined,
+ .is_builtin = false,
.source = null,
.tree = null,
.zir = null,
.zoir = null,
- .status = .never_loaded,
- .mod = mod,
+ .mod = null,
+ .sub_file_path = undefined,
+ .module_changed = false,
+ .prev_zir = null,
+ .zoir_invalidated = false,
};
- try new_file.addReference(zcu, .{ .root = mod });
- return .{
+ return .{ .new_file = .{
+ .index = new_file_index,
.file = new_file,
- .file_index = new_file_index,
- .is_new = true,
- .is_pkg = true,
- };
+ } };
}
-/// Called from a worker thread during AstGen (with the Compilation mutex held).
-/// Also called from Sema during semantic analysis.
-/// Does not attempt to load the file from disk; just returns a corresponding `*Zcu.File`.
-pub fn importFile(
+pub fn doImport(
pt: Zcu.PerThread,
- cur_file: *Zcu.File,
+ /// This file must have its `mod` populated.
+ importer: *Zcu.File,
import_string: []const u8,
) error{
OutOfMemory,
ModuleNotFound,
- ImportOutsideModulePath,
- CurrentWorkingDirectoryUnlinked,
-}!Zcu.ImportFileResult {
+ IllegalZigImport,
+}!struct {
+ file: Zcu.File.Index,
+ module_root: ?*Module,
+} {
const zcu = pt.zcu;
- const mod = cur_file.mod;
-
- if (std.mem.eql(u8, import_string, "std")) {
- return pt.importPkg(zcu.std_mod);
- }
- if (std.mem.eql(u8, import_string, "root")) {
- return pt.importPkg(zcu.root_mod);
- }
- if (mod.deps.get(import_string)) |pkg| {
- return pt.importPkg(pkg);
+ const gpa = zcu.gpa;
+ const imported_mod: ?*Module = m: {
+ if (mem.eql(u8, import_string, "std")) break :m zcu.std_mod;
+ if (mem.eql(u8, import_string, "root")) break :m zcu.root_mod;
+ if (mem.eql(u8, import_string, "builtin")) {
+ const opts = importer.mod.?.getBuiltinOptions(zcu.comp.config);
+ break :m zcu.builtin_modules.get(opts.hash()).?;
+ }
+ break :m importer.mod.?.deps.get(import_string);
+ };
+ if (imported_mod) |mod| {
+ if (zcu.module_roots.get(mod).?.unwrap()) |file_index| {
+ return .{
+ .file = file_index,
+ .module_root = mod,
+ };
+ }
}
if (!std.mem.endsWith(u8, import_string, ".zig") and
!std.mem.endsWith(u8, import_string, ".zon"))
{
return error.ModuleNotFound;
}
+ const path = try importer.path.upJoin(gpa, zcu.comp.dirs, import_string);
+ defer path.deinit(gpa);
+ if (try path.isIllegalZigImport(gpa, zcu.comp.dirs)) {
+ return error.IllegalZigImport;
+ }
+ return .{
+ .file = zcu.import_table.getKeyAdapted(path, Zcu.ImportTableAdapter{ .zcu = zcu }).?,
+ .module_root = null,
+ };
+}
+/// This is called once during `Compilation.create` and never again. "builtin" modules don't yet
+/// exist, so are not added to `module_roots` here. They must be added when they are created.
+pub fn populateModuleRootTable(pt: Zcu.PerThread) error{
+ OutOfMemory,
+ /// One of the specified modules had its root source file at an illegal path.
+ IllegalZigImport,
+}!void {
+ const zcu = pt.zcu;
const gpa = zcu.gpa;
- // The resolved path is used as the key in the import table, to detect if
- // an import refers to the same as another, despite different relative paths
- // or differently mapped package names.
- const resolved_path = try std.fs.path.resolve(gpa, &.{
- mod.root.root_dir.path orelse ".",
- mod.root.sub_path,
- cur_file.sub_file_path,
- "..",
- import_string,
- });
+ // We'll initially add [mod, undefined] pairs, and when we reach the pair while
+ // iterating, rewrite the undefined value.
+ const roots = &zcu.module_roots;
+ roots.clearRetainingCapacity();
+
+ // Start with:
+ // * `std_mod`, which is the main root of analysis
+ // * `root_mod`, which is `@import("root")`
+ // * `main_mod`, which is a special analysis root in tests (and otherwise equal to `root_mod`)
+ // All other modules will be found by traversing their dependency tables.
+ try roots.ensureTotalCapacity(gpa, 3);
+ roots.putAssumeCapacity(zcu.std_mod, undefined);
+ roots.putAssumeCapacity(zcu.root_mod, undefined);
+ roots.putAssumeCapacity(zcu.main_mod, undefined);
+ var i: usize = 0;
+ while (i < roots.count()) {
+ const mod = roots.keys()[i];
+ try roots.ensureUnusedCapacity(gpa, mod.deps.count());
+ for (mod.deps.values()) |dep| {
+ const gop = roots.getOrPutAssumeCapacity(dep);
+ _ = gop; // we want to leave the value undefined if it was added
+ }
- var keep_resolved_path = false;
- defer if (!keep_resolved_path) gpa.free(resolved_path);
+ const root_file_out = &roots.values()[i];
+ roots.lockPointers();
+ defer roots.unlockPointers();
- const gop = try zcu.import_table.getOrPut(gpa, resolved_path);
- errdefer _ = zcu.import_table.pop();
- if (gop.found_existing) {
- const file_index = gop.value_ptr.*;
- return .{
- .file = zcu.fileByIndex(file_index),
- .file_index = file_index,
- .is_new = false,
- .is_pkg = false,
+ i += 1;
+
+ if (Zcu.File.modeFromPath(mod.root_src_path) == null) {
+ root_file_out.* = .none;
+ continue;
+ }
+
+ const path = try mod.root.join(gpa, zcu.comp.dirs, mod.root_src_path);
+ errdefer path.deinit(gpa);
+
+ if (try path.isIllegalZigImport(gpa, zcu.comp.dirs)) {
+ return error.IllegalZigImport;
+ }
+
+ const gop = try zcu.import_table.getOrPutAdapted(gpa, path, Zcu.ImportTableAdapter{ .zcu = zcu });
+ errdefer _ = zcu.import_table.pop();
+
+ if (gop.found_existing) {
+ path.deinit(gpa);
+ root_file_out.* = gop.key_ptr.*.toOptional();
+ continue;
+ }
+
+ zcu.import_table.lockPointers();
+ defer zcu.import_table.unlockPointers();
+
+ const new_file = try gpa.create(Zcu.File);
+ errdefer gpa.destroy(new_file);
+
+ const new_file_index = try zcu.intern_pool.createFile(gpa, pt.tid, .{
+ .bin_digest = path.digest(),
+ .file = new_file,
+ .root_type = .none,
+ });
+ errdefer comptime unreachable; // because we don't remove the file from the internpool
+
+ gop.key_ptr.* = new_file_index;
+ root_file_out.* = new_file_index.toOptional();
+ new_file.* = .{
+ .status = .never_loaded,
+ .path = path,
+ .stat = undefined,
+ .is_builtin = false,
+ .source = null,
+ .tree = null,
+ .zir = null,
+ .zoir = null,
+ .mod = null,
+ .sub_file_path = undefined,
+ .module_changed = false,
+ .prev_zir = null,
+ .zoir_invalidated = false,
};
}
+}
- const ip = &zcu.intern_pool;
+/// Clears and re-populates `pt.zcu.alive_files`, and determines the module identity of every alive
+/// file. If a file's module changes, its `module_changed` flag is set for `updateZirRefs` to see.
+/// Also clears and re-populates `failed_imports` and `multi_module_err` based on the set of alive
+/// files.
+///
+/// Live files are also added as file system inputs if necessary.
+///
+/// Returns whether there is any live file which is failed. Howewver, this function does *not*
+/// modify `pt.zcu.skip_analysis_this_update`.
+///
+/// If an error is returned, `pt.zcu.alive_files` might contain undefined values.
+pub fn computeAliveFiles(pt: Zcu.PerThread) Allocator.Error!bool {
+ const zcu = pt.zcu;
+ const comp = zcu.comp;
+ const gpa = zcu.gpa;
- const new_file = try gpa.create(Zcu.File);
- errdefer gpa.destroy(new_file);
+ var any_fatal_files = false;
+ zcu.multi_module_err = null;
+ zcu.failed_imports.clearRetainingCapacity();
+ zcu.alive_files.clearRetainingCapacity();
+
+ // This function will iterate the keys of `alive_files`, adding new entries as it discovers
+ // imports. Once a file is in `alive_files`, it has its `mod` field up-to-date. If conflicting
+ // imports are discovered for a file, we will set `multi_module_err`. Crucially, this traversal
+ // is single-threaded, and depends only on the order of the imports map from AstGen, which makes
+ // its behavior (in terms of which multi module errors are discovered) entirely consistent in a
+ // multi-threaded environment (where things like file indices could differ between compiler runs).
+
+ // The roots of our file liveness analysis will be the analysis roots.
+ try zcu.alive_files.ensureTotalCapacity(gpa, zcu.analysis_roots.len);
+ for (zcu.analysis_roots.slice()) |mod| {
+ const file_index = zcu.module_roots.get(mod).?.unwrap() orelse continue;
+ const file = zcu.fileByIndex(file_index);
- const resolved_root_path = try std.fs.path.resolve(gpa, &.{
- mod.root.root_dir.path orelse ".",
- mod.root.sub_path,
- });
- defer gpa.free(resolved_root_path);
+ file.mod = mod;
+ file.sub_file_path = mod.root_src_path;
- const sub_file_path = p: {
- const relative = std.fs.path.relative(gpa, resolved_root_path, resolved_path) catch |err| switch (err) {
- error.Unexpected => unreachable,
- else => |e| return e,
- };
- errdefer gpa.free(relative);
+ zcu.alive_files.putAssumeCapacityNoClobber(file_index, .{ .analysis_root = mod });
+ }
+
+ var live_check_idx: usize = 0;
+ while (live_check_idx < zcu.alive_files.count()) {
+ const file_idx = zcu.alive_files.keys()[live_check_idx];
+ const file = zcu.fileByIndex(file_idx);
+ live_check_idx += 1;
- if (!isUpDir(relative) and !std.fs.path.isAbsolute(relative)) {
- break :p relative;
+ switch (file.status) {
+ .never_loaded => unreachable, // everything reachable is loaded by the AstGen workers
+ .retryable_failure, .astgen_failure => any_fatal_files = true,
+ .success => {},
}
- return error.ImportOutsideModulePath;
- };
- errdefer gpa.free(sub_file_path);
- log.debug("new importFile. resolved_root_path={s}, resolved_path={s}, sub_file_path={s}, import_string={s}", .{
- resolved_root_path, resolved_path, sub_file_path, import_string,
- });
+ try comp.appendFileSystemInput(file.path);
+
+ switch (file.getMode()) {
+ .zig => {}, // continue to logic below
+ .zon => continue, // ZON can't import anything
+ }
+
+ if (file.status != .success) continue; // ZIR not valid if there was a file failure
+
+ const zir = file.zir.?;
+ const imports_index = zir.extra[@intFromEnum(Zir.ExtraIndex.imports)];
+ if (imports_index == 0) continue; // this Zig file has no imports
+ const extra = zir.extraData(Zir.Inst.Imports, imports_index);
+ var extra_index = extra.end;
+ try zcu.alive_files.ensureUnusedCapacity(gpa, extra.data.imports_len);
+ for (0..extra.data.imports_len) |_| {
+ const item = zir.extraData(Zir.Inst.Imports.Item, extra_index);
+ extra_index = item.end;
+ const import_path = zir.nullTerminatedString(item.data.name);
+
+ if (std.mem.eql(u8, import_path, "builtin")) {
+ // We've not necessarily generated builtin modules yet, so `doImport` could fail. Instead,
+ // create the module here. Then, since we know that `builtin.zig` doesn't have an error and
+ // has no imports other than 'std', we can just continue onto the next import.
+ try pt.updateBuiltinModule(file.mod.?.getBuiltinOptions(comp.config));
+ continue;
+ }
+
+ const res = pt.doImport(file, import_path) catch |err| switch (err) {
+ error.OutOfMemory => |e| return e,
+ error.ModuleNotFound => {
+ // It'd be nice if this were a file-level error, but allowing this turns out to
+ // be quite important in practice, e.g. for optional dependencies whose import
+ // is behind a comptime condition. So, the error here happens in `Sema` instead.
+ continue;
+ },
+ error.IllegalZigImport => {
+ try zcu.failed_imports.append(gpa, .{
+ .file_index = file_idx,
+ .import_string = item.data.name,
+ .import_token = item.data.token,
+ .kind = .illegal_zig_import,
+ });
+ continue;
+ },
+ };
+
+ // If the import was not of a module, we propagate our own module.
+ const imported_mod = res.module_root orelse file.mod.?;
+ const imported_file = zcu.fileByIndex(res.file);
+
+ const imported_ref: Zcu.File.Reference = .{ .import = .{
+ .importer = file_idx,
+ .tok = item.data.token,
+ .module = res.module_root,
+ } };
+
+ const gop = zcu.alive_files.getOrPutAssumeCapacity(res.file);
+ if (gop.found_existing) {
+ // This means `imported_file.mod` is already populated. If it doesn't match
+ // `imported_mod`, then this file exists in multiple modules.
+ if (imported_file.mod.? != imported_mod) {
+ // We only report the first multi-module error we see. Thanks to this traversal
+ // being deterministic, this doesn't raise consistency issues. Moreover, it's a
+ // useful behavior; we know that this error can be reached *without* realising
+ // that any other files are multi-module, so it's probably approximately where
+ // the problem "begins". Any compilation with a multi-module file is likely to
+ // have a huge number of them by transitive imports, so just reporting this one
+ // hopefully keeps the error focused.
+ zcu.multi_module_err = .{
+ .file = file_idx,
+ .modules = .{ imported_file.mod.?, imported_mod },
+ .refs = .{ gop.value_ptr.*, imported_ref },
+ };
+ // If we discover a multi-module error, it's the only error which matters, and we
+ // can't discern any useful information about the file's own imports; so just do
+ // an early exit now we've populated `zcu.multi_module_err`.
+ return any_fatal_files;
+ }
+ continue;
+ }
+ // We're the first thing we've found referencing `res.file`.
+ gop.value_ptr.* = imported_ref;
+ if (imported_file.mod) |m| {
+ if (m == imported_mod) {
+ // Great, the module and sub path are already populated correctly.
+ continue;
+ }
+ }
+ // We need to set the file's module, meaning we also need to compute its sub path.
+ // This string is externally managed and has a lifetime at least equal to the
+ // lifetime of `imported_file`. `null` means the file is outside its module root.
+ switch (imported_file.path.isNested(imported_mod.root)) {
+ .yes => |sub_path| {
+ if (imported_file.mod != null) {
+ // There was a module from a previous update; instruct `updateZirRefs` to
+ // invalidate everything.
+ imported_file.module_changed = true;
+ }
+ imported_file.mod = imported_mod;
+ imported_file.sub_file_path = sub_path;
+ },
+ .different_roots, .no => {
+ try zcu.failed_imports.append(gpa, .{
+ .file_index = file_idx,
+ .import_string = item.data.name,
+ .import_token = item.data.token,
+ .kind = .file_outside_module_root,
+ });
+ _ = zcu.alive_files.pop(); // we failed to populate `mod`/`sub_file_path`
+ },
+ }
+ }
+ }
+ return any_fatal_files;
+}
+
+/// Ensures that the `@import("builtin")` module corresponding to `opts` is available in
+/// `builtin_modules`, and that its file is populated. Also ensures the file on disk is
+/// up-to-date, setting a misc failure if updating it fails.
+/// Asserts that the imported `builtin.zig` has no ZIR errors, and that it has only one
+/// import, which is 'std'.
+pub fn updateBuiltinModule(pt: Zcu.PerThread, opts: Builtin) Allocator.Error!void {
+ const zcu = pt.zcu;
const comp = zcu.comp;
- if (comp.file_system_inputs) |fsi|
- try comp.appendFileSystemInput(fsi, mod.root, sub_file_path);
+ const gpa = zcu.gpa;
- const path_digest = zcu.computePathDigest(mod, sub_file_path);
- const new_file_index = try ip.createFile(gpa, pt.tid, .{
- .bin_digest = path_digest,
- .file = new_file,
- .root_type = .none,
- });
- keep_resolved_path = true; // It's now owned by import_table.
- gop.value_ptr.* = new_file_index;
- new_file.* = .{
- .sub_file_path = sub_file_path,
+ const gop = try zcu.builtin_modules.getOrPut(gpa, opts.hash());
+ if (gop.found_existing) return; // the `File` is up-to-date
+ errdefer _ = zcu.builtin_modules.pop();
+
+ const mod: *Module = try .createBuiltin(comp.arena, opts, comp.dirs);
+ assert(std.mem.eql(u8, &mod.getBuiltinOptions(comp.config).hash(), gop.key_ptr)); // builtin is its own builtin
+
+ const path = try mod.root.join(gpa, comp.dirs, "builtin.zig");
+ errdefer path.deinit(gpa);
+ const file_gop = try zcu.import_table.getOrPutAdapted(gpa, path, Zcu.ImportTableAdapter{ .zcu = zcu });
+ // `Compilation.Path.isIllegalZigImport` checks guard file creation, so
+ // there isn't an `import_table` entry for this path yet.
+ assert(!file_gop.found_existing);
+ errdefer _ = zcu.import_table.pop();
+
+ try zcu.module_roots.ensureUnusedCapacity(gpa, 1);
+
+ const file = try gpa.create(Zcu.File);
+ errdefer gpa.destroy(file);
+
+ file.* = .{
.status = .never_loaded,
.stat = undefined,
-
+ .path = path,
+ .is_builtin = true,
.source = null,
.tree = null,
.zir = null,
.zoir = null,
-
.mod = mod,
+ .sub_file_path = "builtin.zig",
+ .module_changed = false,
+ .prev_zir = null,
+ .zoir_invalidated = false,
};
- return .{
- .file = new_file,
- .file_index = new_file_index,
- .is_new = true,
- .is_pkg = false,
- };
+ const file_index = try zcu.intern_pool.createFile(gpa, pt.tid, .{
+ .bin_digest = path.digest(),
+ .file = file,
+ .root_type = .none,
+ });
+
+ gop.value_ptr.* = mod;
+ file_gop.key_ptr.* = file_index;
+ zcu.module_roots.putAssumeCapacityNoClobber(mod, file_index.toOptional());
+ try opts.populateFile(gpa, file);
+
+ assert(file.status == .success);
+ assert(!file.zir.?.hasCompileErrors());
+ {
+ // Check that it has only one import, which is 'std'.
+ const imports_idx = file.zir.?.extra[@intFromEnum(Zir.ExtraIndex.imports)];
+ assert(imports_idx != 0); // there is an import
+ const extra = file.zir.?.extraData(Zir.Inst.Imports, imports_idx);
+ assert(extra.data.imports_len == 1); // there is exactly one import
+ const item = file.zir.?.extraData(Zir.Inst.Imports.Item, extra.end);
+ const import_path = file.zir.?.nullTerminatedString(item.data.name);
+ assert(mem.eql(u8, import_path, "std")); // the single import is of 'std'
+ }
+
+ Builtin.updateFileOnDisk(file, comp) catch |err| comp.setMiscFailure(
+ .write_builtin_zig,
+ "unable to write '{}': {s}",
+ .{ file.path.fmt(comp), @errorName(err) },
+ );
}
pub fn embedFile(
@@ -2091,63 +2355,49 @@ pub fn embedFile(
const zcu = pt.zcu;
const gpa = zcu.gpa;
- if (cur_file.mod.deps.get(import_string)) |mod| {
- const resolved_path = try std.fs.path.resolve(gpa, &.{
- mod.root.root_dir.path orelse ".",
- mod.root.sub_path,
- mod.root_src_path,
- });
- errdefer gpa.free(resolved_path);
-
- const gop = try zcu.embed_table.getOrPut(gpa, resolved_path);
- errdefer assert(std.mem.eql(u8, zcu.embed_table.pop().?.key, resolved_path));
+ const opt_mod: ?*Module = m: {
+ if (mem.eql(u8, import_string, "std")) break :m zcu.std_mod;
+ if (mem.eql(u8, import_string, "root")) break :m zcu.root_mod;
+ if (mem.eql(u8, import_string, "builtin")) {
+ const opts = cur_file.mod.?.getBuiltinOptions(zcu.comp.config);
+ break :m zcu.builtin_modules.get(opts.hash()).?;
+ }
+ break :m cur_file.mod.?.deps.get(import_string);
+ };
+ if (opt_mod) |mod| {
+ const path = try mod.root.join(gpa, zcu.comp.dirs, mod.root_src_path);
+ errdefer path.deinit(gpa);
+ const gop = try zcu.embed_table.getOrPutAdapted(gpa, path, Zcu.EmbedTableAdapter{});
if (gop.found_existing) {
- gpa.free(resolved_path); // we're not using this key
+ path.deinit(gpa); // we're not using this key
return @enumFromInt(gop.index);
}
-
- gop.value_ptr.* = try pt.newEmbedFile(mod, mod.root_src_path, resolved_path);
+ errdefer _ = zcu.embed_table.pop();
+ gop.key_ptr.* = try pt.newEmbedFile(path);
return @enumFromInt(gop.index);
}
- // The resolved path is used as the key in the table, to detect if a file
- // refers to the same as another, despite different relative paths.
- const resolved_path = try std.fs.path.resolve(gpa, &.{
- cur_file.mod.root.root_dir.path orelse ".",
- cur_file.mod.root.sub_path,
- cur_file.sub_file_path,
- "..",
- import_string,
- });
- errdefer gpa.free(resolved_path);
-
- const gop = try zcu.embed_table.getOrPut(gpa, resolved_path);
- errdefer assert(std.mem.eql(u8, zcu.embed_table.pop().?.key, resolved_path));
-
- if (gop.found_existing) {
- gpa.free(resolved_path); // we're not using this key
- return @enumFromInt(gop.index);
- }
-
- const resolved_root_path = try std.fs.path.resolve(gpa, &.{
- cur_file.mod.root.root_dir.path orelse ".",
- cur_file.mod.root.sub_path,
- });
- defer gpa.free(resolved_root_path);
-
- const sub_file_path = std.fs.path.relative(gpa, resolved_root_path, resolved_path) catch |err| switch (err) {
- error.Unexpected => unreachable,
- else => |e| return e,
+ const embed_file: *Zcu.EmbedFile, const embed_file_idx: Zcu.EmbedFile.Index = ef: {
+ const path = try cur_file.path.upJoin(gpa, zcu.comp.dirs, import_string);
+ errdefer path.deinit(gpa);
+ const gop = try zcu.embed_table.getOrPutAdapted(gpa, path, Zcu.EmbedTableAdapter{});
+ if (gop.found_existing) {
+ path.deinit(gpa); // we're not using this key
+ break :ef .{ gop.key_ptr.*, @enumFromInt(gop.index) };
+ } else {
+ errdefer _ = zcu.embed_table.pop();
+ gop.key_ptr.* = try pt.newEmbedFile(path);
+ break :ef .{ gop.key_ptr.*, @enumFromInt(gop.index) };
+ }
};
- defer gpa.free(sub_file_path);
- if (isUpDir(sub_file_path) or std.fs.path.isAbsolute(sub_file_path)) {
- return error.ImportOutsideModulePath;
+ switch (embed_file.path.isNested(cur_file.mod.?.root)) {
+ .yes => {},
+ .different_roots, .no => return error.ImportOutsideModulePath,
}
- gop.value_ptr.* = try pt.newEmbedFile(cur_file.mod, sub_file_path, resolved_path);
- return @enumFromInt(gop.index);
+ return embed_file_idx;
}
pub fn updateEmbedFile(
@@ -2177,7 +2427,10 @@ fn updateEmbedFileInner(
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
- var file = try ef.owner.root.openFile(ef.sub_file_path.toSlice(ip), .{});
+ var file = f: {
+ const dir, const sub_path = ef.path.openInfo(zcu.comp.dirs);
+ break :f try dir.openFile(sub_path, .{});
+ };
defer file.close();
const stat: Cache.File.Stat = .fromFs(try file.stat());
@@ -2232,28 +2485,21 @@ fn updateEmbedFileInner(
ef.stat = stat;
}
+/// Assumes that `path` is allocated into `gpa`. Takes ownership of `path` on success.
fn newEmbedFile(
pt: Zcu.PerThread,
- mod: *Module,
- /// The path of the file to embed relative to the root of `mod`.
- sub_file_path: []const u8,
- /// The resolved path of the file to embed.
- resolved_path: []const u8,
+ path: Compilation.Path,
) !*Zcu.EmbedFile {
const zcu = pt.zcu;
const comp = zcu.comp;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
- if (comp.file_system_inputs) |fsi|
- try comp.appendFileSystemInput(fsi, mod.root, sub_file_path);
-
const new_file = try gpa.create(Zcu.EmbedFile);
errdefer gpa.destroy(new_file);
new_file.* = .{
- .owner = mod,
- .sub_file_path = try ip.getOrPutString(gpa, pt.tid, sub_file_path, .no_embedded_nulls),
+ .path = path,
.val = .none,
.err = null,
.stat = undefined,
@@ -2262,6 +2508,8 @@ fn newEmbedFile(
var opt_ip_str: ?InternPool.String = null;
try pt.updateEmbedFile(new_file, &opt_ip_str);
+ try comp.appendFileSystemInput(path);
+
// Add the file contents to the `whole` cache manifest if necessary.
cache: {
const whole = switch (zcu.comp.cache_use) {
@@ -2269,17 +2517,18 @@ fn newEmbedFile(
.incremental => break :cache,
};
const man = whole.cache_manifest orelse break :cache;
- const ip_str = opt_ip_str orelse break :cache;
-
- const copied_resolved_path = try gpa.dupe(u8, resolved_path);
- errdefer gpa.free(copied_resolved_path);
+ const ip_str = opt_ip_str orelse break :cache; // this will be a compile error
const array_len = Value.fromInterned(new_file.val).typeOf(zcu).childType(zcu).arrayLen(zcu);
+ const contents = ip_str.toSlice(array_len, ip);
+
+ const path_str = try path.toAbsolute(comp.dirs, gpa);
+ defer gpa.free(path_str);
whole.cache_manifest_mutex.lock();
defer whole.cache_manifest_mutex.unlock();
- man.addFilePostContents(copied_resolved_path, ip_str.toSlice(array_len, ip), new_file.stat) catch |err| switch (err) {
+ man.addFilePostContents(path_str, contents, new_file.stat) catch |err| switch (err) {
error.Unexpected => unreachable,
else => |e| return e,
};
@@ -2805,7 +3054,7 @@ pub fn getErrorValueFromSlice(pt: Zcu.PerThread, name: []const u8) Allocator.Err
/// Removes any entry from `Zcu.failed_files` associated with `file`. Acquires `Compilation.mutex` as needed.
/// `file.zir` must be unchanged from the last update, as it is used to determine if there is such an entry.
-fn lockAndClearFileCompileError(pt: Zcu.PerThread, file: *Zcu.File) void {
+fn lockAndClearFileCompileError(pt: Zcu.PerThread, file_index: Zcu.File.Index, file: *Zcu.File) void {
const maybe_has_error = switch (file.status) {
.never_loaded => false,
.retryable_failure => true,
@@ -2829,9 +3078,9 @@ fn lockAndClearFileCompileError(pt: Zcu.PerThread, file: *Zcu.File) void {
pt.zcu.comp.mutex.lock();
defer pt.zcu.comp.mutex.unlock();
- if (pt.zcu.failed_files.fetchSwapRemove(file)) |kv| {
+ if (pt.zcu.failed_files.fetchSwapRemove(file_index)) |kv| {
assert(maybe_has_error); // the runtime safety case above
- if (kv.value) |msg| msg.destroy(pt.zcu.gpa); // delete previous error message
+ if (kv.value) |msg| pt.zcu.gpa.free(msg); // delete previous error message
}
}
@@ -3009,8 +3258,8 @@ pub fn populateTestFunctions(
const zcu = pt.zcu;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
- const builtin_mod = zcu.root_mod.getBuiltinDependency();
- const builtin_file_index = (pt.importPkg(builtin_mod) catch unreachable).file_index;
+ const builtin_mod = zcu.builtin_modules.get(zcu.root_mod.getBuiltinOptions(zcu.comp.config).hash()).?;
+ const builtin_file_index = zcu.module_roots.get(builtin_mod).?.unwrap().?;
pt.ensureFileAnalyzed(builtin_file_index) catch |err| switch (err) {
error.AnalysisFail => unreachable, // builtin module is generated so cannot be corrupt
error.OutOfMemory => |e| return e,
@@ -3213,54 +3462,8 @@ pub fn linkerUpdateLineNumber(pt: Zcu.PerThread, ti: InternPool.TrackedInst.Inde
}
}
-/// Sets `File.status` of `file_index` to `retryable_failure`, and stores an error in `pt.zcu.failed_files`.
-pub fn reportRetryableAstGenError(
- pt: Zcu.PerThread,
- src: Zcu.AstGenSrc,
- file_index: Zcu.File.Index,
- err: anyerror,
-) error{OutOfMemory}!void {
- const zcu = pt.zcu;
- const gpa = zcu.gpa;
- const ip = &zcu.intern_pool;
-
- const file = zcu.fileByIndex(file_index);
- file.status = .retryable_failure;
-
- const src_loc: Zcu.LazySrcLoc = switch (src) {
- .root => .{
- .base_node_inst = try ip.trackZir(gpa, pt.tid, .{
- .file = file_index,
- .inst = .main_struct_inst,
- }),
- .offset = .entire_file,
- },
- .import => |info| .{
- .base_node_inst = try ip.trackZir(gpa, pt.tid, .{
- .file = info.importing_file,
- .inst = .main_struct_inst,
- }),
- .offset = .{ .token_abs = info.import_tok },
- },
- };
-
- const err_msg = try Zcu.ErrorMsg.create(gpa, src_loc, "unable to load '{}/{s}': {s}", .{
- file.mod.root, file.sub_file_path, @errorName(err),
- });
- errdefer err_msg.destroy(gpa);
-
- zcu.comp.mutex.lock();
- defer zcu.comp.mutex.unlock();
- const gop = try zcu.failed_files.getOrPut(gpa, file);
- if (gop.found_existing) {
- if (gop.value_ptr.*) |old_err_msg| {
- old_err_msg.destroy(gpa);
- }
- }
- gop.value_ptr.* = err_msg;
-}
-
-/// Sets `File.status` of `file_index` to `retryable_failure`, and stores an error in `pt.zcu.failed_files`.
+/// Stores an error in `pt.zcu.failed_files` for this file, and sets the file
+/// status to `retryable_failure`.
pub fn reportRetryableFileError(
pt: Zcu.PerThread,
file_index: Zcu.File.Index,
@@ -3269,35 +3472,27 @@ pub fn reportRetryableFileError(
) error{OutOfMemory}!void {
const zcu = pt.zcu;
const gpa = zcu.gpa;
- const ip = &zcu.intern_pool;
const file = zcu.fileByIndex(file_index);
+
file.status = .retryable_failure;
- const err_msg = try Zcu.ErrorMsg.create(
- gpa,
- .{
- .base_node_inst = try ip.trackZir(gpa, pt.tid, .{
- .file = file_index,
- .inst = .main_struct_inst,
- }),
- .offset = .entire_file,
- },
- format,
- args,
- );
- errdefer err_msg.destroy(gpa);
+ const msg = try std.fmt.allocPrint(gpa, format, args);
+ errdefer gpa.free(msg);
- zcu.comp.mutex.lock();
- defer zcu.comp.mutex.unlock();
+ const old_msg: ?[]u8 = old_msg: {
+ zcu.comp.mutex.lock();
+ defer zcu.comp.mutex.unlock();
- const gop = try zcu.failed_files.getOrPut(gpa, file);
- if (gop.found_existing) {
- if (gop.value_ptr.*) |old_err_msg| {
- old_err_msg.destroy(gpa);
- }
- }
- gop.value_ptr.* = err_msg;
+ const gop = try zcu.failed_files.getOrPut(gpa, file_index);
+ const old: ?[]u8 = if (gop.found_existing) old: {
+ break :old gop.value_ptr.*;
+ } else null;
+ gop.value_ptr.* = msg;
+
+ break :old_msg old;
+ };
+ if (old_msg) |m| gpa.free(m);
}
/// Shortcut for calling `intern_pool.get`.
@@ -3850,7 +4045,7 @@ fn recreateStructType(
codegen_type: {
if (zcu.comp.config.use_llvm) break :codegen_type;
- if (file.mod.strip) break :codegen_type;
+ if (file.mod.?.strip) break :codegen_type;
// This job depends on any resolve_type_fully jobs queued up before it.
try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
@@ -3946,7 +4141,7 @@ fn recreateUnionType(
codegen_type: {
if (zcu.comp.config.use_llvm) break :codegen_type;
- if (file.mod.strip) break :codegen_type;
+ if (file.mod.?.strip) break :codegen_type;
// This job depends on any resolve_type_fully jobs queued up before it.
try zcu.comp.queueJob(.{ .codegen_type = wip_ty.index });
}
src/Builtin.zig
@@ -19,6 +19,26 @@ code_model: std.builtin.CodeModel,
omit_frame_pointer: bool,
wasi_exec_model: std.builtin.WasiExecModel,
+/// Compute an abstract hash representing this `Builtin`. This is *not* a hash
+/// of the resulting file contents.
+pub fn hash(opts: @This()) [std.Build.Cache.bin_digest_len]u8 {
+ var h: Cache.Hasher = Cache.hasher_init;
+ inline for (@typeInfo(@This()).@"struct".fields) |f| {
+ if (comptime std.mem.eql(u8, f.name, "target")) {
+ // This needs special handling.
+ std.hash.autoHash(&h, opts.target.cpu);
+ std.hash.autoHash(&h, opts.target.os.tag);
+ std.hash.autoHash(&h, opts.target.os.versionRange());
+ std.hash.autoHash(&h, opts.target.abi);
+ std.hash.autoHash(&h, opts.target.ofmt);
+ std.hash.autoHash(&h, opts.target.dynamic_linker);
+ } else {
+ std.hash.autoHash(&h, @field(opts, f.name));
+ }
+ }
+ return h.finalResult();
+}
+
pub fn generate(opts: @This(), allocator: Allocator) Allocator.Error![:0]u8 {
var buffer = std.ArrayList(u8).init(allocator);
try append(opts, &buffer);
@@ -263,50 +283,66 @@ pub fn append(opts: @This(), buffer: *std.ArrayList(u8)) Allocator.Error!void {
}
}
-pub fn populateFile(comp: *Compilation, mod: *Module, file: *File) !void {
- if (mod.root.statFile(mod.root_src_path)) |stat| {
+/// This essentially takes the place of `Zcu.PerThread.updateFile`, but for 'builtin' modules.
+/// Instead of reading the file from disk, its contents are generated in-memory.
+pub fn populateFile(opts: @This(), gpa: Allocator, file: *File) Allocator.Error!void {
+ assert(file.is_builtin);
+ assert(file.status == .never_loaded);
+ assert(file.source == null);
+ assert(file.tree == null);
+ assert(file.zir == null);
+
+ file.source = try opts.generate(gpa);
+
+ log.debug("parsing and generating 'builtin.zig'", .{});
+
+ file.tree = try std.zig.Ast.parse(gpa, file.source.?, .zig);
+ assert(file.tree.?.errors.len == 0); // builtin.zig must parse
+
+ file.zir = try AstGen.generate(gpa, file.tree.?);
+ assert(!file.zir.?.hasCompileErrors()); // builtin.zig must not have astgen errors
+ file.status = .success;
+}
+
+/// After `populateFile` succeeds, call this function to write the generated file out to disk
+/// if necessary. This is useful for external tooling such as debuggers.
+/// Assumes that `file.mod` is correctly set to the builtin module.
+pub fn updateFileOnDisk(file: *File, comp: *Compilation) !void {
+ assert(file.is_builtin);
+ assert(file.status == .success);
+ assert(file.source != null);
+
+ const root_dir, const sub_path = file.path.openInfo(comp.dirs);
+
+ if (root_dir.statFile(sub_path)) |stat| {
if (stat.size != file.source.?.len) {
std.log.warn(
- "the cached file '{}{s}' had the wrong size. Expected {d}, found {d}. " ++
+ "the cached file '{}' had the wrong size. Expected {d}, found {d}. " ++
"Overwriting with correct file contents now",
- .{ mod.root, mod.root_src_path, file.source.?.len, stat.size },
+ .{ file.path.fmt(comp), file.source.?.len, stat.size },
);
-
- try writeFile(file, mod);
} else {
file.stat = .{
.size = stat.size,
.inode = stat.inode,
.mtime = stat.mtime,
};
+ return;
}
} else |err| switch (err) {
- error.BadPathName => unreachable, // it's always "builtin.zig"
- error.NameTooLong => unreachable, // it's always "builtin.zig"
- error.PipeBusy => unreachable, // it's not a pipe
- error.NoDevice => unreachable, // it's not a pipe
- error.WouldBlock => unreachable, // not asking for non-blocking I/O
+ error.FileNotFound => {},
- error.FileNotFound => try writeFile(file, mod),
+ error.WouldBlock => unreachable, // not asking for non-blocking I/O
+ error.BadPathName => unreachable, // it's always "o/digest/builtin.zig"
+ error.NameTooLong => unreachable, // it's always "o/digest/builtin.zig"
+ // We don't expect the file to be a pipe, but can't mark `error.PipeBusy` as `unreachable`,
+ // because the user could always replace the file on disk.
else => |e| return e,
}
- log.debug("parsing and generating '{s}'", .{mod.root_src_path});
-
- file.tree = try std.zig.Ast.parse(comp.gpa, file.source.?, .zig);
- assert(file.tree.?.errors.len == 0); // builtin.zig must parse
-
- file.zir = try AstGen.generate(comp.gpa, file.tree.?);
- assert(!file.zir.?.hasCompileErrors()); // builtin.zig must not have astgen errors
- file.status = .success;
- // Note that whilst we set `zir` here, we populated `path_digest`
- // all the way back in `Package.Module.create`.
-}
-
-fn writeFile(file: *File, mod: *Module) !void {
- var buf: [std.fs.max_path_bytes]u8 = undefined;
- var af = try mod.root.atomicFile(mod.root_src_path, .{ .make_path = true }, &buf);
+ // `make_path` matters because the dir hasn't actually been created yet.
+ var af = try root_dir.atomicFile(sub_path, .{ .make_path = true });
defer af.deinit();
try af.file.writeAll(file.source.?);
af.finish() catch |err| switch (err) {
@@ -331,6 +367,7 @@ fn writeFile(file: *File, mod: *Module) !void {
const builtin = @import("builtin");
const std = @import("std");
const Allocator = std.mem.Allocator;
+const Cache = std.Build.Cache;
const build_options = @import("build_options");
const Module = @import("Package/Module.zig");
const assert = std.debug.assert;
src/codegen.zig
@@ -56,7 +56,7 @@ pub fn generateFunction(
) CodeGenError!void {
const zcu = pt.zcu;
const func = zcu.funcInfo(func_index);
- const target = zcu.navFileScope(func.owner_nav).mod.resolved_target.result;
+ const target = zcu.navFileScope(func.owner_nav).mod.?.resolved_target.result;
switch (target_util.zigBackend(target, false)) {
else => unreachable,
inline .stage2_aarch64,
@@ -81,7 +81,7 @@ pub fn generateLazyFunction(
) CodeGenError!void {
const zcu = pt.zcu;
const target = if (Type.fromInterned(lazy_sym.ty).typeDeclInstAllowGeneratedTag(zcu)) |inst_index|
- zcu.fileByIndex(inst_index.resolveFile(&zcu.intern_pool)).mod.resolved_target.result
+ zcu.fileByIndex(inst_index.resolveFile(&zcu.intern_pool)).mod.?.resolved_target.result
else
zcu.getTarget();
switch (target_util.zigBackend(target, false)) {
@@ -722,7 +722,7 @@ fn lowerNavRef(
const zcu = pt.zcu;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
- const target = zcu.navFileScope(nav_index).mod.resolved_target.result;
+ const target = zcu.navFileScope(nav_index).mod.?.resolved_target.result;
const ptr_width_bytes = @divExact(target.ptrBitWidth(), 8);
const is_obj = lf.comp.config.output_mode == .Obj;
const nav_ty = Type.fromInterned(ip.getNav(nav_index).typeOf(ip));
@@ -884,7 +884,7 @@ fn genNavRef(
else
.{ false, .none, nav.isThreadlocal(ip) };
- const single_threaded = zcu.navFileScope(nav_index).mod.single_threaded;
+ const single_threaded = zcu.navFileScope(nav_index).mod.?.single_threaded;
const name = nav.name;
if (lf.cast(.elf)) |elf_file| {
const zo = elf_file.zigObjectPtr().?;
src/Compilation.zig
@@ -2,6 +2,7 @@ const Compilation = @This();
const std = @import("std");
const builtin = @import("builtin");
+const fs = std.fs;
const mem = std.mem;
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
@@ -10,12 +11,13 @@ const Target = std.Target;
const ThreadPool = std.Thread.Pool;
const WaitGroup = std.Thread.WaitGroup;
const ErrorBundle = std.zig.ErrorBundle;
-const Path = Cache.Path;
+const fatal = std.process.fatal;
const Value = @import("Value.zig");
const Type = @import("Type.zig");
const target_util = @import("target.zig");
const Package = @import("Package.zig");
+const introspect = @import("introspect.zig");
const link = @import("link.zig");
const tracy = @import("tracy.zig");
const trace = tracy.trace;
@@ -28,7 +30,6 @@ const mingw = @import("libs/mingw.zig");
const libunwind = @import("libs/libunwind.zig");
const libcxx = @import("libs/libcxx.zig");
const wasi_libc = @import("libs/wasi_libc.zig");
-const fatal = @import("main.zig").fatal;
const clangMain = @import("main.zig").clangMain;
const Zcu = @import("Zcu.zig");
const Sema = @import("Sema.zig");
@@ -43,7 +44,6 @@ const LlvmObject = @import("codegen/llvm.zig").Object;
const dev = @import("dev.zig");
const ThreadSafeQueue = @import("ThreadSafeQueue.zig").ThreadSafeQueue;
-pub const Directory = Cache.Directory;
pub const Config = @import("Compilation/Config.zig");
/// General-purpose allocator. Used for both temporary and long-term storage.
@@ -75,9 +75,9 @@ bin_file: ?*link.File,
/// The root path for the dynamic linker and system libraries (as well as frameworks on Darwin)
sysroot: ?[]const u8,
/// This is `null` when not building a Windows DLL, or when `-fno-emit-implib` is used.
-implib_emit: ?Path,
+implib_emit: ?Cache.Path,
/// This is non-null when `-femit-docs` is provided.
-docs_emit: ?Path,
+docs_emit: ?Cache.Path,
root_name: [:0]const u8,
compiler_rt_strat: RtStrat,
ubsan_rt_strat: RtStrat,
@@ -152,11 +152,6 @@ win32_resource_work_queue: if (dev.env.supports(.win32_resource)) std.fifo.Linea
pub fn deinit(_: @This()) void {}
},
-/// These jobs are to tokenize, parse, and astgen files, which may be outdated
-/// since the last compilation, as well as scan for `@import` and queue up
-/// additional jobs corresponding to those new files.
-astgen_work_queue: std.fifo.LinearFifo(Zcu.File.Index, .Dynamic),
-
/// The ErrorMsg memory is owned by the `CObject`, using Compilation's general purpose allocator.
/// This data is accessed by multiple threads and is protected by `mutex`.
failed_c_objects: std.AutoArrayHashMapUnmanaged(*CObject, *CObject.Diag.Bundle) = .empty,
@@ -207,9 +202,8 @@ cache_parent: *Cache,
parent_whole_cache: ?ParentWholeCache,
/// Path to own executable for invoking `zig clang`.
self_exe_path: ?[]const u8,
-zig_lib_directory: Directory,
-local_cache_directory: Directory,
-global_cache_directory: Directory,
+/// Owned by the caller of `Compilation.create`.
+dirs: Directories,
libc_include_dir_list: []const []const u8,
libc_framework_dir_list: []const []const u8,
rc_includes: RcIncludes,
@@ -293,7 +287,6 @@ const QueuedJobs = struct {
ubsan_rt_lib: bool = false,
ubsan_rt_obj: bool = false,
fuzzer_lib: bool = false,
- update_builtin_zig: bool,
musl_crt_file: [@typeInfo(musl.CrtFile).@"enum".fields.len]bool = @splat(false),
glibc_crt_file: [@typeInfo(glibc.CrtFile).@"enum".fields.len]bool = @splat(false),
freebsd_crt_file: [@typeInfo(freebsd.CrtFile).@"enum".fields.len]bool = @splat(false),
@@ -312,12 +305,466 @@ const QueuedJobs = struct {
zigc_lib: bool = false,
};
+/// A filesystem path, represented relative to one of a few specific directories where possible.
+/// Every path (considering symlinks as distinct paths) has a canonical representation in this form.
+/// This abstraction allows us to:
+/// * always open files relative to a consistent root on the filesystem
+/// * detect when two paths correspond to the same file, e.g. for deduplicating `@import`s
+pub const Path = struct {
+ root: Root,
+ /// This path is always in a normalized form, where:
+ /// * All components are separated by `fs.path.sep`
+ /// * There are no repeated separators (like "foo//bar")
+ /// * There are no "." or ".." components
+ /// * There is no trailing path separator
+ ///
+ /// There is a leading separator iff `root` is `.none` *and* `builtin.target.os.tag != .wasi`.
+ ///
+ /// If this `Path` exactly represents a `Root`, the sub path is "", not ".".
+ sub_path: []u8,
+
+ const Root = enum {
+ /// `sub_path` is relative to the Zig lib directory on `Compilation`.
+ zig_lib,
+ /// `sub_path` is relative to the global cache directory on `Compilation`.
+ global_cache,
+ /// `sub_path` is relative to the local cache directory on `Compilation`.
+ local_cache,
+ /// `sub_path` is not relative to any of the roots listed above.
+ /// It is resolved starting with `Directories.cwd`; so it is an absolute path on most
+ /// targets, but cwd-relative on WASI. We do not make it cwd-relative on other targets
+ /// so that `Path.digest` gives hashes which can be stored in the Zig cache (as they
+ /// don't depend on a specific compiler instance).
+ none,
+ };
+
+ /// In general, we can only construct canonical `Path`s at runtime, because weird nesting might
+ /// mean that e.g. a sub path inside zig/lib/ is actually in the global cache. However, because
+ /// `Directories` guarantees that `zig_lib` is a distinct path from both cache directories, it's
+ /// okay for us to construct this path, and only this path, as a comptime constant.
+ pub const zig_lib_root: Path = .{ .root = .zig_lib, .sub_path = "" };
+
+ pub fn deinit(p: Path, gpa: Allocator) void {
+ gpa.free(p.sub_path);
+ }
+
+ /// The returned digest is relocatable across any compiler process using the same lib and cache
+ /// directories; it does not depend on cwd.
+ pub fn digest(p: Path) Cache.BinDigest {
+ var h = Cache.hasher_init;
+ h.update(&.{@intFromEnum(p.root)});
+ h.update(p.sub_path);
+ return h.finalResult();
+ }
+
+ /// Given a `Path`, returns the directory handle and sub path to be used to open the path.
+ pub fn openInfo(p: Path, dirs: Directories) struct { fs.Dir, []const u8 } {
+ const dir = switch (p.root) {
+ .none => {
+ const cwd_sub_path = absToCwdRelative(p.sub_path, dirs.cwd);
+ return .{ fs.cwd(), cwd_sub_path };
+ },
+ .zig_lib => dirs.zig_lib.handle,
+ .global_cache => dirs.global_cache.handle,
+ .local_cache => dirs.local_cache.handle,
+ };
+ if (p.sub_path.len == 0) return .{ dir, "." };
+ assert(!fs.path.isAbsolute(p.sub_path));
+ return .{ dir, p.sub_path };
+ }
+
+ pub const format = unreachable; // do not format direcetly
+ pub fn fmt(p: Path, comp: *Compilation) Formatter {
+ return .{ .p = p, .comp = comp };
+ }
+ const Formatter = struct {
+ p: Path,
+ comp: *Compilation,
+ pub fn format(f: Formatter, comptime unused_fmt: []const u8, options: std.fmt.FormatOptions, w: anytype) !void {
+ comptime assert(unused_fmt.len == 0);
+ _ = options;
+ const root_path: []const u8 = switch (f.p.root) {
+ .zig_lib => f.comp.dirs.zig_lib.path orelse ".",
+ .global_cache => f.comp.dirs.global_cache.path orelse ".",
+ .local_cache => f.comp.dirs.local_cache.path orelse ".",
+ .none => {
+ const cwd_sub_path = absToCwdRelative(f.p.sub_path, f.comp.dirs.cwd);
+ try w.writeAll(cwd_sub_path);
+ return;
+ },
+ };
+ assert(root_path.len != 0);
+ try w.writeAll(root_path);
+ if (f.p.sub_path.len > 0) {
+ try w.writeByte(fs.path.sep);
+ try w.writeAll(f.p.sub_path);
+ }
+ }
+ };
+
+ /// Given the `sub_path` of a `Path` with `Path.root == .none`, attempts to convert
+ /// the (absolute) path to a cwd-relative path. Otherwise, returns the absolute path
+ /// unmodified. The returned string is never empty: "" is converted to ".".
+ fn absToCwdRelative(sub_path: []const u8, cwd_path: []const u8) []const u8 {
+ if (builtin.target.os.tag == .wasi) {
+ if (sub_path.len == 0) return ".";
+ assert(!fs.path.isAbsolute(sub_path));
+ return sub_path;
+ }
+ assert(fs.path.isAbsolute(sub_path));
+ if (!std.mem.startsWith(u8, sub_path, cwd_path)) return sub_path;
+ if (sub_path.len == cwd_path.len) return "."; // the strings are equal
+ if (sub_path[cwd_path.len] != fs.path.sep) return sub_path; // last component before cwd differs
+ return sub_path[cwd_path.len + 1 ..]; // remove '/path/to/cwd/' prefix
+ }
+
+ /// From an unresolved path (which can be made of multiple not-yet-joined strings), construct a
+ /// canonical `Path`.
+ pub fn fromUnresolved(gpa: Allocator, dirs: Compilation.Directories, unresolved_parts: []const []const u8) Allocator.Error!Path {
+ const resolved = try introspect.resolvePath(gpa, dirs.cwd, unresolved_parts);
+ errdefer gpa.free(resolved);
+
+ // If, for instance, `dirs.local_cache.path` is within the lib dir, it must take priority,
+ // so that we prefer `.root = .local_cache` over `.root = .zig_lib`. The easiest way to do
+ // this is simply to prioritize the longest root path.
+ const PathAndRoot = struct { ?[]const u8, Root };
+ var roots: [3]PathAndRoot = .{
+ .{ dirs.zig_lib.path, .zig_lib },
+ .{ dirs.global_cache.path, .global_cache },
+ .{ dirs.local_cache.path, .local_cache },
+ };
+ // This must be a stable sort, because the global and local cache directories may be the same, in
+ // which case we need to make a consistent choice.
+ std.mem.sort(PathAndRoot, &roots, {}, struct {
+ fn lessThan(_: void, lhs: PathAndRoot, rhs: PathAndRoot) bool {
+ const lhs_path_len = if (lhs[0]) |p| p.len else 0;
+ const rhs_path_len = if (rhs[0]) |p| p.len else 0;
+ return lhs_path_len > rhs_path_len; // '>' instead of '<' to sort descending
+ }
+ }.lessThan);
+
+ for (roots) |path_and_root| {
+ const opt_root_path, const root = path_and_root;
+ const root_path = opt_root_path orelse {
+ // This root is the cwd.
+ if (!fs.path.isAbsolute(resolved)) {
+ return .{
+ .root = root,
+ .sub_path = resolved,
+ };
+ }
+ continue;
+ };
+ if (!mem.startsWith(u8, resolved, root_path)) continue;
+ const sub: []const u8 = if (resolved.len != root_path.len) sub: {
+ // Check the trailing slash, so that we don't match e.g. `/foo/bar` with `/foo/barren`
+ if (resolved[root_path.len] != fs.path.sep) continue;
+ break :sub resolved[root_path.len + 1 ..];
+ } else "";
+ const duped = try gpa.dupe(u8, sub);
+ gpa.free(resolved);
+ return .{ .root = root, .sub_path = duped };
+ }
+
+ // We're not relative to any root, so we will use an absolute path (on targets where they are available).
+
+ if (builtin.target.os.tag == .wasi or fs.path.isAbsolute(resolved)) {
+ // `resolved` is already absolute (or we're on WASI, where absolute paths don't really exist).
+ return .{ .root = .none, .sub_path = resolved };
+ }
+
+ if (resolved.len == 0) {
+ // We just need the cwd path, no trailing separator. Note that `gpa.free(resolved)` would be a nop.
+ return .{ .root = .none, .sub_path = try gpa.dupe(u8, dirs.cwd) };
+ }
+
+ // We need to make an absolute path. Because `resolved` came from `introspect.resolvePath`, we can just
+ // join the paths with a simple format string.
+ const abs_path = try std.fmt.allocPrint(gpa, "{s}{c}{s}", .{ dirs.cwd, fs.path.sep, resolved });
+ gpa.free(resolved);
+ return .{ .root = .none, .sub_path = abs_path };
+ }
+
+ /// Constructs a canonical `Path` representing `sub_path` relative to `root`.
+ ///
+ /// If `sub_path` is resolved, this is almost like directly constructing a `Path`, but this
+ /// function also canonicalizes the result, which matters because `sub_path` may move us into
+ /// a different root.
+ ///
+ /// For instance, if the Zig lib directory is inside the global cache, passing `root` as
+ /// `.global_cache` could still end up returning a `Path` with `Path.root == .zig_lib`.
+ pub fn fromRoot(
+ gpa: Allocator,
+ dirs: Compilation.Directories,
+ root: Path.Root,
+ sub_path: []const u8,
+ ) Allocator.Error!Path {
+ // Currently, this just wraps `fromUnresolved` for simplicity. A more efficient impl is
+ // probably possible if this function ever ends up impacting performance somehow.
+ return .fromUnresolved(gpa, dirs, &.{
+ switch (root) {
+ .zig_lib => dirs.zig_lib.path orelse "",
+ .global_cache => dirs.global_cache.path orelse "",
+ .local_cache => dirs.local_cache.path orelse "",
+ .none => "",
+ },
+ sub_path,
+ });
+ }
+
+ /// Given a `Path` and an (unresolved) sub path relative to it, construct a `Path` representing
+ /// the joined path `p/sub_path`. Note that, like with `fromRoot`, the `sub_path` might cause us
+ /// to move into a different `Path.Root`.
+ pub fn join(
+ p: Path,
+ gpa: Allocator,
+ dirs: Compilation.Directories,
+ sub_path: []const u8,
+ ) Allocator.Error!Path {
+ // Currently, this just wraps `fromUnresolved` for simplicity. A more efficient impl is
+ // probably possible if this function ever ends up impacting performance somehow.
+ return .fromUnresolved(gpa, dirs, &.{
+ switch (p.root) {
+ .zig_lib => dirs.zig_lib.path orelse "",
+ .global_cache => dirs.global_cache.path orelse "",
+ .local_cache => dirs.local_cache.path orelse "",
+ .none => "",
+ },
+ p.sub_path,
+ sub_path,
+ });
+ }
+
+ /// Like `join`, but `sub_path` is relative to the dirname of `p` instead of `p` itself.
+ pub fn upJoin(
+ p: Path,
+ gpa: Allocator,
+ dirs: Compilation.Directories,
+ sub_path: []const u8,
+ ) Allocator.Error!Path {
+ return .fromUnresolved(gpa, dirs, &.{
+ switch (p.root) {
+ .zig_lib => dirs.zig_lib.path orelse "",
+ .global_cache => dirs.global_cache.path orelse "",
+ .local_cache => dirs.local_cache.path orelse "",
+ .none => "",
+ },
+ p.sub_path,
+ "..",
+ sub_path,
+ });
+ }
+
+ pub fn toCachePath(p: Path, dirs: Directories) Cache.Path {
+ const root_dir: Cache.Directory = switch (p.root) {
+ .zig_lib => dirs.zig_lib,
+ .global_cache => dirs.global_cache,
+ .local_cache => dirs.local_cache,
+ else => {
+ const cwd_sub_path = absToCwdRelative(p.sub_path, dirs.cwd);
+ return .{
+ .root_dir = .cwd(),
+ .sub_path = cwd_sub_path,
+ };
+ },
+ };
+ assert(!fs.path.isAbsolute(p.sub_path));
+ return .{
+ .root_dir = root_dir,
+ .sub_path = p.sub_path,
+ };
+ }
+
+ /// This should not be used for most of the compiler pipeline, but is useful when emitting
+ /// paths from the compilation (e.g. in debug info), because they will not depend on the cwd.
+ /// The returned path is owned by the caller and allocated into `gpa`.
+ pub fn toAbsolute(p: Path, dirs: Directories, gpa: Allocator) Allocator.Error![]u8 {
+ const root_path: []const u8 = switch (p.root) {
+ .zig_lib => dirs.zig_lib.path orelse "",
+ .global_cache => dirs.global_cache.path orelse "",
+ .local_cache => dirs.local_cache.path orelse "",
+ .none => "",
+ };
+ return fs.path.resolve(gpa, &.{
+ dirs.cwd,
+ root_path,
+ p.sub_path,
+ });
+ }
+
+ pub fn isNested(inner: Path, outer: Path) union(enum) {
+ /// Value is the sub path, which is a sub-slice of `inner.sub_path`.
+ yes: []const u8,
+ no,
+ different_roots,
+ } {
+ if (inner.root != outer.root) return .different_roots;
+ if (!mem.startsWith(u8, inner.sub_path, outer.sub_path)) return .no;
+ if (inner.sub_path.len == outer.sub_path.len) return .no;
+ if (outer.sub_path.len == 0) return .{ .yes = inner.sub_path };
+ if (inner.sub_path[outer.sub_path.len] != fs.path.sep) return .no;
+ return .{ .yes = inner.sub_path[outer.sub_path.len + 1 ..] };
+ }
+
+ /// Returns whether this `Path` is illegal to have as a user-imported `Zcu.File` (including
+ /// as the root of a module). Such paths exist in directories which the Zig compiler treats
+ /// specially, like 'global_cache/b/', which stores 'builtin.zig' files.
+ pub fn isIllegalZigImport(p: Path, gpa: Allocator, dirs: Directories) Allocator.Error!bool {
+ const zig_builtin_dir: Path = try .fromRoot(gpa, dirs, .global_cache, "b");
+ defer zig_builtin_dir.deinit(gpa);
+ return switch (p.isNested(zig_builtin_dir)) {
+ .yes => true,
+ .no, .different_roots => false,
+ };
+ }
+};
+
+pub const Directories = struct {
+ /// The string returned by `introspect.getResolvedCwd`. This is typically an absolute path,
+ /// but on WASI is the empty string "" instead, because WASI does not have absolute paths.
+ cwd: []const u8,
+ /// The Zig 'lib' directory.
+ /// `zig_lib.path` is resolved (`introspect.resolvePath`) or `null` for cwd.
+ /// Guaranteed to be a different path from `global_cache` and `local_cache`.
+ zig_lib: Cache.Directory,
+ /// The global Zig cache directory.
+ /// `global_cache.path` is resolved (`introspect.resolvePath`) or `null` for cwd.
+ global_cache: Cache.Directory,
+ /// The local Zig cache directory.
+ /// `local_cache.path` is resolved (`introspect.resolvePath`) or `null` for cwd.
+ /// This may be the same as `global_cache`.
+ local_cache: Cache.Directory,
+
+ pub fn deinit(dirs: *Directories) void {
+ // The local and global caches could be the same.
+ const close_local = dirs.local_cache.handle.fd != dirs.global_cache.handle.fd;
+
+ dirs.global_cache.handle.close();
+ if (close_local) dirs.local_cache.handle.close();
+ dirs.zig_lib.handle.close();
+ }
+
+ /// Returns a `Directories` where `local_cache` is replaced with `global_cache`, intended for
+ /// use by sub-compilations (e.g. compiler_rt). Do not `deinit` the returned `Directories`; it
+ /// shares handles with `dirs`.
+ pub fn withoutLocalCache(dirs: Directories) Directories {
+ return .{
+ .cwd = dirs.cwd,
+ .zig_lib = dirs.zig_lib,
+ .global_cache = dirs.global_cache,
+ .local_cache = dirs.global_cache,
+ };
+ }
+
+ /// Uses `std.process.fatal` on error conditions.
+ pub fn init(
+ arena: Allocator,
+ override_zig_lib: ?[]const u8,
+ override_global_cache: ?[]const u8,
+ local_cache_strat: union(enum) {
+ override: []const u8,
+ search,
+ global,
+ },
+ wasi_preopens: switch (builtin.target.os.tag) {
+ .wasi => std.fs.wasi.Preopens,
+ else => void,
+ },
+ self_exe_path: switch (builtin.target.os.tag) {
+ .wasi => void,
+ else => []const u8,
+ },
+ ) Directories {
+ const wasi = builtin.target.os.tag == .wasi;
+
+ const cwd = introspect.getResolvedCwd(arena) catch |err| {
+ fatal("unable to get cwd: {s}", .{@errorName(err)});
+ };
+
+ const zig_lib: Cache.Directory = d: {
+ if (override_zig_lib) |path| break :d openUnresolved(arena, cwd, path, .@"zig lib");
+ if (wasi) break :d openWasiPreopen(wasi_preopens, "/lib");
+ break :d introspect.findZigLibDirFromSelfExe(arena, cwd, self_exe_path) catch |err| {
+ fatal("unable to find zig installation directory '{s}': {s}", .{ self_exe_path, @errorName(err) });
+ };
+ };
+
+ const global_cache: Cache.Directory = d: {
+ if (override_global_cache) |path| break :d openUnresolved(arena, cwd, path, .@"global cache");
+ if (wasi) break :d openWasiPreopen(wasi_preopens, "/cache");
+ const path = introspect.resolveGlobalCacheDir(arena) catch |err| {
+ fatal("unable to resolve zig cache directory: {s}", .{@errorName(err)});
+ };
+ break :d openUnresolved(arena, cwd, path, .@"global cache");
+ };
+
+ const local_cache: Cache.Directory = switch (local_cache_strat) {
+ .override => |path| openUnresolved(arena, cwd, path, .@"local cache"),
+ .search => d: {
+ const maybe_path = introspect.resolveSuitableLocalCacheDir(arena, cwd) catch |err| {
+ fatal("unable to resolve zig cache directory: {s}", .{@errorName(err)});
+ };
+ const path = maybe_path orelse break :d global_cache;
+ break :d openUnresolved(arena, cwd, path, .@"local cache");
+ },
+ .global => global_cache,
+ };
+
+ if (std.mem.eql(u8, zig_lib.path orelse "", global_cache.path orelse "")) {
+ fatal("zig lib directory '{}' cannot be equal to global cache directory '{}'", .{ zig_lib, global_cache });
+ }
+ if (std.mem.eql(u8, zig_lib.path orelse "", local_cache.path orelse "")) {
+ fatal("zig lib directory '{}' cannot be equal to local cache directory '{}'", .{ zig_lib, local_cache });
+ }
+
+ return .{
+ .cwd = cwd,
+ .zig_lib = zig_lib,
+ .global_cache = global_cache,
+ .local_cache = local_cache,
+ };
+ }
+ fn openWasiPreopen(preopens: std.fs.wasi.Preopens, name: []const u8) Cache.Directory {
+ return .{
+ .path = if (std.mem.eql(u8, name, ".")) null else name,
+ .handle = .{
+ .fd = preopens.find(name) orelse fatal("WASI preopen not found: '{s}'", .{name}),
+ },
+ };
+ }
+ fn openUnresolved(arena: Allocator, cwd: []const u8, unresolved_path: []const u8, thing: enum { @"zig lib", @"global cache", @"local cache" }) Cache.Directory {
+ const path = introspect.resolvePath(arena, cwd, &.{unresolved_path}) catch |err| {
+ fatal("unable to resolve {s} directory: {s}", .{ @tagName(thing), @errorName(err) });
+ };
+ const nonempty_path = if (path.len == 0) "." else path;
+ const handle_or_err = switch (thing) {
+ .@"zig lib" => std.fs.cwd().openDir(nonempty_path, .{}),
+ .@"global cache", .@"local cache" => std.fs.cwd().makeOpenPath(nonempty_path, .{}),
+ };
+ return .{
+ .path = if (path.len == 0) null else path,
+ .handle = handle_or_err catch |err| {
+ const extra_str: []const u8 = e: {
+ if (thing == .@"global cache") switch (err) {
+ error.AccessDenied, error.ReadOnlyFileSystem => break :e "\n" ++
+ "If this location is not writable then consider specifying an alternative with " ++
+ "the ZIG_GLOBAL_CACHE_DIR environment variable or the --global-cache-dir option.",
+ else => {},
+ };
+ break :e "";
+ };
+ fatal("unable to open {s} directory '{s}': {s}{s}", .{ @tagName(thing), nonempty_path, @errorName(err), extra_str });
+ },
+ };
+ }
+};
+
pub const default_stack_protector_buffer_size = target_util.default_stack_protector_buffer_size;
pub const SemaError = Zcu.SemaError;
pub const CrtFile = struct {
lock: Cache.Lock,
- full_object_path: Path,
+ full_object_path: Cache.Path,
pub fn isObject(cf: CrtFile) bool {
return switch (classifyFileExt(cf.full_object_path.sub_path)) {
@@ -430,7 +877,7 @@ pub const CObject = struct {
new,
success: struct {
/// The outputted result. `sub_path` owned by gpa.
- object_path: Path,
+ object_path: Cache.Path,
/// This is a file system lock on the cache hash manifest representing this
/// object. It prevents other invocations of the Zig compiler from interfering
/// with this object until released.
@@ -854,7 +1301,7 @@ pub const MiscError = struct {
pub const EmitLoc = struct {
/// If this is `null` it means the file will be output to the cache directory.
/// When provided, both the open file handle and the path name must outlive the `Compilation`.
- directory: ?Compilation.Directory,
+ directory: ?Cache.Directory,
/// This may not have sub-directories in it.
basename: []const u8,
};
@@ -977,7 +1424,7 @@ const CacheUse = union(CacheMode) {
implib_sub_path: ?[]u8,
docs_sub_path: ?[]u8,
lf_open_opts: link.File.OpenOptions,
- tmp_artifact_directory: ?Directory,
+ tmp_artifact_directory: ?Cache.Directory,
/// Prevents other processes from clobbering files in the output directory.
lock: ?Cache.Lock,
@@ -997,7 +1444,7 @@ const CacheUse = union(CacheMode) {
const Incremental = struct {
/// Where build artifacts and incremental compilation metadata serialization go.
- artifact_directory: Compilation.Directory,
+ artifact_directory: Cache.Directory,
};
fn deinit(cu: CacheUse) void {
@@ -1013,9 +1460,7 @@ const CacheUse = union(CacheMode) {
};
pub const CreateOptions = struct {
- zig_lib_directory: Directory,
- local_cache_directory: Directory,
- global_cache_directory: Directory,
+ dirs: Directories,
thread_pool: *ThreadPool,
self_exe_path: ?[]const u8 = null,
@@ -1059,7 +1504,7 @@ pub const CreateOptions = struct {
/// This field is intended to be removed.
/// The ELF implementation no longer uses this data, however the MachO and COFF
/// implementations still do.
- lib_directories: []const Directory = &.{},
+ lib_directories: []const Cache.Directory = &.{},
rpath_list: []const []const u8 = &[0][]const u8{},
symbol_wrap_set: std.StringArrayHashMapUnmanaged(void) = .empty,
c_source_files: []const CSourceFile = &.{},
@@ -1195,68 +1640,35 @@ pub const CreateOptions = struct {
};
fn addModuleTableToCacheHash(
- gpa: Allocator,
+ zcu: *Zcu,
arena: Allocator,
hash: *Cache.HashHelper,
- root_mod: *Package.Module,
- main_mod: *Package.Module,
hash_type: union(enum) { path_bytes, files: *Cache.Manifest },
-) (error{OutOfMemory} || std.process.GetCwdError)!void {
- var seen_table: std.AutoArrayHashMapUnmanaged(*Package.Module, void) = .empty;
- defer seen_table.deinit(gpa);
-
- // root_mod and main_mod may be the same pointer. In fact they usually are.
- // However in the case of `zig test` or `zig build` they will be different,
- // and it's possible for one to not reference the other via the import table.
- try seen_table.put(gpa, root_mod, {});
- try seen_table.put(gpa, main_mod, {});
-
- const SortByName = struct {
- has_builtin: bool,
- names: []const []const u8,
-
- pub fn lessThan(ctx: @This(), lhs: usize, rhs: usize) bool {
- return if (ctx.has_builtin and (lhs == 0 or rhs == 0))
- lhs < rhs
- else
- mem.lessThan(u8, ctx.names[lhs], ctx.names[rhs]);
+) error{
+ OutOfMemory,
+ Unexpected,
+ CurrentWorkingDirectoryUnlinked,
+}!void {
+ assert(zcu.module_roots.count() != 0); // module_roots is populated
+
+ for (zcu.module_roots.keys(), zcu.module_roots.values()) |mod, opt_mod_root_file| {
+ if (mod == zcu.std_mod) continue; // redundant
+ if (opt_mod_root_file.unwrap()) |mod_root_file| {
+ if (zcu.fileByIndex(mod_root_file).is_builtin) continue; // redundant
}
- };
-
- var i: usize = 0;
- while (i < seen_table.count()) : (i += 1) {
- const mod = seen_table.keys()[i];
- if (mod.isBuiltin()) {
- // Skip builtin.zig; it is useless as an input, and we don't want to
- // have to write it before checking for a cache hit.
- continue;
- }
-
cache_helpers.addModule(hash, mod);
-
switch (hash_type) {
.path_bytes => {
- hash.addBytes(mod.root_src_path);
- hash.addOptionalBytes(mod.root.root_dir.path);
+ hash.add(mod.root.root);
hash.addBytes(mod.root.sub_path);
+ hash.addBytes(mod.root_src_path);
},
.files => |man| if (mod.root_src_path.len != 0) {
- const pkg_zig_file = try mod.root.joinString(arena, mod.root_src_path);
- _ = try man.addFile(pkg_zig_file, null);
+ const root_src_path = try mod.root.toCachePath(zcu.comp.dirs).join(arena, mod.root_src_path);
+ _ = try man.addFilePath(root_src_path, null);
},
}
-
- mod.deps.sortUnstable(SortByName{
- .has_builtin = mod.deps.count() >= 1 and
- mod.deps.values()[0].isBuiltin(),
- .names = mod.deps.keys(),
- });
-
hash.addListOfBytes(mod.deps.keys());
-
- const deps = mod.deps.values();
- try seen_table.ensureUnusedCapacity(gpa, deps.len);
- for (deps) |dep| seen_table.putAssumeCapacity(dep, {});
}
}
@@ -1310,7 +1722,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
const libc_dirs = try std.zig.LibCDirs.detect(
arena,
- options.zig_lib_directory.path.?,
+ options.dirs.zig_lib.path.?,
options.root_mod.resolved_target.result,
options.root_mod.resolved_target.is_native_abi,
link_libc,
@@ -1332,11 +1744,8 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
// For objects, this mechanism relies on essentially `_ = @import("compiler-rt");`
// injected into the object.
const compiler_rt_mod = try Package.Module.create(arena, .{
- .global_cache_directory = options.global_cache_directory,
.paths = .{
- .root = .{
- .root_dir = options.zig_lib_directory,
- },
+ .root = .zig_lib_root,
.root_src_path = "compiler_rt.zig",
},
.fully_qualified_name = "compiler_rt",
@@ -1348,8 +1757,6 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
},
.global = options.config,
.parent = options.root_mod,
- .builtin_mod = options.root_mod.getBuiltinDependency(),
- .builtin_modules = null, // `builtin_mod` is set
});
try options.root_mod.deps.putNoClobber(arena, "compiler_rt", compiler_rt_mod);
}
@@ -1369,11 +1776,8 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
if (ubsan_rt_strat == .zcu) {
const ubsan_rt_mod = try Package.Module.create(arena, .{
- .global_cache_directory = options.global_cache_directory,
.paths = .{
- .root = .{
- .root_dir = options.zig_lib_directory,
- },
+ .root = .zig_lib_root,
.root_src_path = "ubsan_rt.zig",
},
.fully_qualified_name = "ubsan_rt",
@@ -1381,8 +1785,6 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
.inherited = .{},
.global = options.config,
.parent = options.root_mod,
- .builtin_mod = options.root_mod.getBuiltinDependency(),
- .builtin_modules = null, // `builtin_mod` is set
});
try options.root_mod.deps.putNoClobber(arena, "ubsan_rt", ubsan_rt_mod);
}
@@ -1415,13 +1817,13 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
const cache = try arena.create(Cache);
cache.* = .{
.gpa = gpa,
- .manifest_dir = try options.local_cache_directory.handle.makeOpenPath("h", .{}),
+ .manifest_dir = try options.dirs.local_cache.handle.makeOpenPath("h", .{}),
};
// These correspond to std.zig.Server.Message.PathPrefix.
cache.addPrefix(.{ .path = null, .handle = std.fs.cwd() });
- cache.addPrefix(options.zig_lib_directory);
- cache.addPrefix(options.local_cache_directory);
- cache.addPrefix(options.global_cache_directory);
+ cache.addPrefix(options.dirs.zig_lib);
+ cache.addPrefix(options.dirs.local_cache);
+ cache.addPrefix(options.dirs.global_cache);
errdefer cache.manifest_dir.close();
// This is shared hasher state common to zig source and all C source files.
@@ -1458,26 +1860,22 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
// to redundantly happen for each AstGen operation.
const zir_sub_dir = "z";
- var local_zir_dir = try options.local_cache_directory.handle.makeOpenPath(zir_sub_dir, .{});
+ var local_zir_dir = try options.dirs.local_cache.handle.makeOpenPath(zir_sub_dir, .{});
errdefer local_zir_dir.close();
- const local_zir_cache: Directory = .{
+ const local_zir_cache: Cache.Directory = .{
.handle = local_zir_dir,
- .path = try options.local_cache_directory.join(arena, &[_][]const u8{zir_sub_dir}),
+ .path = try options.dirs.local_cache.join(arena, &.{zir_sub_dir}),
};
- var global_zir_dir = try options.global_cache_directory.handle.makeOpenPath(zir_sub_dir, .{});
+ var global_zir_dir = try options.dirs.global_cache.handle.makeOpenPath(zir_sub_dir, .{});
errdefer global_zir_dir.close();
- const global_zir_cache: Directory = .{
+ const global_zir_cache: Cache.Directory = .{
.handle = global_zir_dir,
- .path = try options.global_cache_directory.join(arena, &[_][]const u8{zir_sub_dir}),
+ .path = try options.dirs.global_cache.join(arena, &.{zir_sub_dir}),
};
const std_mod = options.std_mod orelse try Package.Module.create(arena, .{
- .global_cache_directory = options.global_cache_directory,
.paths = .{
- .root = .{
- .root_dir = options.zig_lib_directory,
- .sub_path = "std",
- },
+ .root = try .fromRoot(arena, options.dirs, .zig_lib, "std"),
.root_src_path = "std.zig",
},
.fully_qualified_name = "std",
@@ -1485,8 +1883,6 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
.inherited = .{},
.global = options.config,
.parent = options.root_mod,
- .builtin_mod = options.root_mod.getBuiltinDependency(),
- .builtin_modules = null, // `builtin_mod` is set
});
const zcu = try arena.create(Zcu);
@@ -1522,16 +1918,13 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
.docs_emit = null, // handled below
.root_mod = options.root_mod,
.config = options.config,
- .zig_lib_directory = options.zig_lib_directory,
- .local_cache_directory = options.local_cache_directory,
- .global_cache_directory = options.global_cache_directory,
+ .dirs = options.dirs,
.emit_asm = options.emit_asm,
.emit_llvm_ir = options.emit_llvm_ir,
.emit_llvm_bc = options.emit_llvm_bc,
.work_queues = @splat(.init(gpa)),
.c_object_work_queue = std.fifo.LinearFifo(*CObject, .Dynamic).init(gpa),
.win32_resource_work_queue = if (dev.env.supports(.win32_resource)) std.fifo.LinearFifo(*Win32Resource, .Dynamic).init(gpa) else .{},
- .astgen_work_queue = std.fifo.LinearFifo(Zcu.File.Index, .Dynamic).init(gpa),
.c_source_files = options.c_source_files,
.rc_source_files = options.rc_source_files,
.cache_parent = cache,
@@ -1572,9 +1965,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
.framework_dirs = options.framework_dirs,
.llvm_opt_bisect_limit = options.llvm_opt_bisect_limit,
.skip_linker_dependencies = options.skip_linker_dependencies,
- .queued_jobs = .{
- .update_builtin_zig = have_zcu,
- },
+ .queued_jobs = .{},
.function_sections = options.function_sections,
.data_sections = options.data_sections,
.native_system_include_paths = options.native_system_include_paths,
@@ -1596,6 +1987,13 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
comp.config.any_sanitize_c = any_sanitize_c;
comp.config.any_fuzz = any_fuzz;
+ if (opt_zcu) |zcu| {
+ // Populate `zcu.module_roots`.
+ const pt: Zcu.PerThread = .activate(zcu, .main);
+ defer pt.deactivate();
+ try pt.populateModuleRootTable();
+ }
+
const lf_open_opts: link.File.OpenOptions = .{
.linker_script = options.linker_script,
.z_nodelete = options.linker_z_nodelete,
@@ -1686,7 +2084,11 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
// do want to namespace different source file names because they are
// likely different compilations and therefore this would be likely to
// cause cache hits.
- try addModuleTableToCacheHash(gpa, arena, &hash, options.root_mod, main_mod, .path_bytes);
+ if (comp.zcu) |zcu| {
+ try addModuleTableToCacheHash(zcu, arena, &hash, .path_bytes);
+ } else {
+ cache_helpers.addModule(&hash, options.root_mod);
+ }
// In the case of incremental cache mode, this `artifact_directory`
// is computed based on a hash of non-linker inputs, and it is where all
@@ -1695,11 +2097,11 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
const digest = hash.final();
const artifact_sub_dir = "o" ++ std.fs.path.sep_str ++ digest;
- var artifact_dir = try options.local_cache_directory.handle.makeOpenPath(artifact_sub_dir, .{});
+ var artifact_dir = try options.dirs.local_cache.handle.makeOpenPath(artifact_sub_dir, .{});
errdefer artifact_dir.close();
- const artifact_directory: Directory = .{
+ const artifact_directory: Cache.Directory = .{
.handle = artifact_dir,
- .path = try options.local_cache_directory.join(arena, &[_][]const u8{artifact_sub_dir}),
+ .path = try options.dirs.local_cache.join(arena, &.{artifact_sub_dir}),
};
const incremental = try arena.create(CacheUse.Incremental);
@@ -1709,7 +2111,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
comp.cache_use = .{ .incremental = incremental };
if (options.emit_bin) |emit_bin| {
- const emit: Path = .{
+ const emit: Cache.Path = .{
.root_dir = emit_bin.directory orelse artifact_directory,
.sub_path = emit_bin.basename,
};
@@ -1998,10 +2400,10 @@ pub fn destroy(comp: *Compilation) void {
if (comp.bin_file) |lf| lf.destroy();
if (comp.zcu) |zcu| zcu.deinit();
comp.cache_use.deinit();
+
for (comp.work_queues) |work_queue| work_queue.deinit();
comp.c_object_work_queue.deinit();
comp.win32_resource_work_queue.deinit();
- comp.astgen_work_queue.deinit();
comp.windows_libs.deinit(gpa);
@@ -2207,15 +2609,15 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
log.debug("CacheMode.whole cache miss for {s}", .{comp.root_name});
// Compile the artifacts to a temporary directory.
- const tmp_artifact_directory: Directory = d: {
+ const tmp_artifact_directory: Cache.Directory = d: {
const s = std.fs.path.sep_str;
tmp_dir_rand_int = std.crypto.random.int(u64);
const tmp_dir_sub_path = "tmp" ++ s ++ std.fmt.hex(tmp_dir_rand_int);
- const path = try comp.local_cache_directory.join(gpa, &.{tmp_dir_sub_path});
+ const path = try comp.dirs.local_cache.join(gpa, &.{tmp_dir_sub_path});
errdefer gpa.free(path);
- const handle = try comp.local_cache_directory.handle.makeOpenPath(tmp_dir_sub_path, .{});
+ const handle = try comp.dirs.local_cache.handle.makeOpenPath(tmp_dir_sub_path, .{});
errdefer handle.close();
break :d .{
@@ -2243,7 +2645,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
}
if (whole.bin_sub_path) |sub_path| {
- const emit: Path = .{
+ const emit: Cache.Path = .{
.root_dir = tmp_artifact_directory,
.sub_path = std.fs.path.basename(sub_path),
};
@@ -2265,26 +2667,22 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
// For compiling C objects, we rely on the cache hash system to avoid duplicating work.
// Add a Job for each C object.
try comp.c_object_work_queue.ensureUnusedCapacity(comp.c_object_table.count());
- for (comp.c_object_table.keys()) |key| {
- comp.c_object_work_queue.writeItemAssumeCapacity(key);
- }
- if (comp.file_system_inputs) |fsi| {
- for (comp.c_object_table.keys()) |c_object| {
- try comp.appendFileSystemInput(fsi, Cache.Path.cwd(), c_object.src.src_path);
- }
+ for (comp.c_object_table.keys()) |c_object| {
+ comp.c_object_work_queue.writeItemAssumeCapacity(c_object);
+ try comp.appendFileSystemInput(try .fromUnresolved(arena, comp.dirs, &.{c_object.src.src_path}));
}
// For compiling Win32 resources, we rely on the cache hash system to avoid duplicating work.
// Add a Job for each Win32 resource file.
try comp.win32_resource_work_queue.ensureUnusedCapacity(comp.win32_resource_table.count());
- for (comp.win32_resource_table.keys()) |key| {
- comp.win32_resource_work_queue.writeItemAssumeCapacity(key);
- }
- if (comp.file_system_inputs) |fsi| {
- for (comp.win32_resource_table.keys()) |win32_resource| switch (win32_resource.src) {
- .rc => |f| try comp.appendFileSystemInput(fsi, Cache.Path.cwd(), f.src_path),
- .manifest => continue,
- };
+ for (comp.win32_resource_table.keys()) |win32_resource| {
+ comp.win32_resource_work_queue.writeItemAssumeCapacity(win32_resource);
+ switch (win32_resource.src) {
+ .rc => |f| {
+ try comp.appendFileSystemInput(try .fromUnresolved(arena, comp.dirs, &.{f.src_path}));
+ },
+ .manifest => {},
+ }
}
if (comp.zcu) |zcu| {
@@ -2293,69 +2691,26 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
zcu.skip_analysis_this_update = false;
- // Make sure std.zig is inside the import_table. We unconditionally need
- // it for start.zig.
- const std_mod = zcu.std_mod;
- _ = try pt.importPkg(std_mod);
-
- // Normally we rely on importing std to in turn import the root source file
- // in the start code, but when using the stage1 backend that won't happen,
- // so in order to run AstGen on the root source file we put it into the
- // import_table here.
- // Likewise, in the case of `zig test`, the test runner is the root source file,
- // and so there is nothing to import the main file.
- if (comp.config.is_test) {
- _ = try pt.importPkg(zcu.main_mod);
- }
-
- if (zcu.root_mod.deps.get("ubsan_rt")) |ubsan_rt_mod| {
- _ = try pt.importPkg(ubsan_rt_mod);
- }
-
- if (zcu.root_mod.deps.get("compiler_rt")) |compiler_rt_mod| {
- _ = try pt.importPkg(compiler_rt_mod);
- }
-
- // Put a work item in for every known source file to detect if
- // it changed, and, if so, re-compute ZIR and then queue the job
- // to update it.
- try comp.astgen_work_queue.ensureUnusedCapacity(zcu.import_table.count());
- for (zcu.import_table.values()) |file_index| {
- if (zcu.fileByIndex(file_index).mod.isBuiltin()) continue;
- comp.astgen_work_queue.writeItemAssumeCapacity(file_index);
- }
- if (comp.file_system_inputs) |fsi| {
- for (zcu.import_table.values()) |file_index| {
- const file = zcu.fileByIndex(file_index);
- try comp.appendFileSystemInput(fsi, file.mod.root, file.sub_file_path);
- }
- }
-
- if (comp.file_system_inputs) |fsi| {
- const ip = &zcu.intern_pool;
- for (zcu.embed_table.values()) |embed_file| {
- const sub_file_path = embed_file.sub_file_path.toSlice(ip);
- try comp.appendFileSystemInput(fsi, embed_file.owner.root, sub_file_path);
- }
+ // TODO: doing this in `resolveReferences` later could avoid adding inputs for dead embedfiles. Investigate!
+ for (zcu.embed_table.keys()) |embed_file| {
+ try comp.appendFileSystemInput(embed_file.path);
}
zcu.analysis_roots.clear();
- try comp.queueJob(.{ .analyze_mod = std_mod });
- zcu.analysis_roots.appendAssumeCapacity(std_mod);
+ zcu.analysis_roots.appendAssumeCapacity(zcu.std_mod);
- if (comp.config.is_test and zcu.main_mod != std_mod) {
- try comp.queueJob(.{ .analyze_mod = zcu.main_mod });
+ // Normally we rely on importing std to in turn import the root source file in the start code.
+ // However, the main module is distinct from the root module in tests, so that won't happen there.
+ if (comp.config.is_test and zcu.main_mod != zcu.std_mod) {
zcu.analysis_roots.appendAssumeCapacity(zcu.main_mod);
}
if (zcu.root_mod.deps.get("compiler_rt")) |compiler_rt_mod| {
- try comp.queueJob(.{ .analyze_mod = compiler_rt_mod });
zcu.analysis_roots.appendAssumeCapacity(compiler_rt_mod);
}
if (zcu.root_mod.deps.get("ubsan_rt")) |ubsan_rt_mod| {
- try comp.queueJob(.{ .analyze_mod = ubsan_rt_mod });
zcu.analysis_roots.appendAssumeCapacity(ubsan_rt_mod);
}
}
@@ -2451,13 +2806,13 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
break :w .no;
};
- renameTmpIntoCache(comp.local_cache_directory, tmp_dir_sub_path, o_sub_path) catch |err| {
+ renameTmpIntoCache(comp.dirs.local_cache, tmp_dir_sub_path, o_sub_path) catch |err| {
return comp.setMiscFailure(
.rename_results,
"failed to rename compilation results ('{}{s}') into local cache ('{}{s}'): {s}",
.{
- comp.local_cache_directory, tmp_dir_sub_path,
- comp.local_cache_directory, o_sub_path,
+ comp.dirs.local_cache, tmp_dir_sub_path,
+ comp.dirs.local_cache, o_sub_path,
@errorName(err),
},
);
@@ -2470,7 +2825,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
// references object file paths.
if (comp.bin_file) |lf| {
lf.emit = .{
- .root_dir = comp.local_cache_directory,
+ .root_dir = comp.dirs.local_cache,
.sub_path = whole.bin_sub_path.?,
};
@@ -2486,7 +2841,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
}
try flush(comp, arena, .{
- .root_dir = comp.local_cache_directory,
+ .root_dir = comp.dirs.local_cache,
.sub_path = o_sub_path,
}, .main, main_progress_node);
@@ -2515,34 +2870,36 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
}
}
-pub fn appendFileSystemInput(
- comp: *Compilation,
- file_system_inputs: *std.ArrayListUnmanaged(u8),
- root: Cache.Path,
- sub_file_path: []const u8,
-) Allocator.Error!void {
+pub fn appendFileSystemInput(comp: *Compilation, path: Compilation.Path) Allocator.Error!void {
const gpa = comp.gpa;
+ const fsi = comp.file_system_inputs orelse return;
const prefixes = comp.cache_parent.prefixes();
- try file_system_inputs.ensureUnusedCapacity(gpa, root.sub_path.len + sub_file_path.len + 3);
- if (file_system_inputs.items.len > 0) file_system_inputs.appendAssumeCapacity(0);
- for (prefixes, 1..) |prefix_directory, i| {
- if (prefix_directory.eql(root.root_dir)) {
- file_system_inputs.appendAssumeCapacity(@intCast(i));
- if (root.sub_path.len > 0) {
- file_system_inputs.appendSliceAssumeCapacity(root.sub_path);
- file_system_inputs.appendAssumeCapacity(std.fs.path.sep);
- }
- file_system_inputs.appendSliceAssumeCapacity(sub_file_path);
- return;
+
+ const want_prefix_dir: Cache.Directory = switch (path.root) {
+ .zig_lib => comp.dirs.zig_lib,
+ .global_cache => comp.dirs.global_cache,
+ .local_cache => comp.dirs.local_cache,
+ .none => .cwd(),
+ };
+ const prefix: u8 = for (prefixes, 1..) |prefix_dir, i| {
+ if (prefix_dir.eql(want_prefix_dir)) {
+ break @intCast(i);
}
- }
- std.debug.panic("missing prefix directory: {}, {s}", .{ root, sub_file_path });
+ } else std.debug.panic(
+ "missing prefix directory '{s}' ('{}') for '{s}'",
+ .{ @tagName(path.root), want_prefix_dir, path.sub_path },
+ );
+
+ try fsi.ensureUnusedCapacity(gpa, path.sub_path.len + 3);
+ if (fsi.items.len > 0) fsi.appendAssumeCapacity(0);
+ fsi.appendAssumeCapacity(prefix);
+ fsi.appendSliceAssumeCapacity(path.sub_path);
}
fn flush(
comp: *Compilation,
arena: Allocator,
- default_artifact_directory: Path,
+ default_artifact_directory: Cache.Path,
tid: Zcu.PerThread.Id,
prog_node: std.Progress.Node,
) !void {
@@ -2574,7 +2931,7 @@ fn flush(
/// implementation at the bottom of this function.
/// This function is only called when CacheMode is `whole`.
fn renameTmpIntoCache(
- cache_directory: Compilation.Directory,
+ cache_directory: Cache.Directory,
tmp_dir_sub_path: []const u8,
o_sub_path: []const u8,
) !void {
@@ -2627,7 +2984,7 @@ fn wholeCacheModeSetBinFilePath(
@memcpy(sub_path[digest_start..][0..digest.len], digest);
comp.implib_emit = .{
- .root_dir = comp.local_cache_directory,
+ .root_dir = comp.dirs.local_cache,
.sub_path = sub_path,
};
}
@@ -2636,7 +2993,7 @@ fn wholeCacheModeSetBinFilePath(
@memcpy(sub_path[digest_start..][0..digest.len], digest);
comp.docs_emit = .{
- .root_dir = comp.local_cache_directory,
+ .root_dir = comp.dirs.local_cache,
.sub_path = sub_path,
};
}
@@ -2661,19 +3018,17 @@ fn addNonIncrementalStuffToCacheManifest(
arena: Allocator,
man: *Cache.Manifest,
) !void {
- const gpa = comp.gpa;
-
comptime assert(link_hash_implementation_version == 14);
- if (comp.zcu) |mod| {
- try addModuleTableToCacheHash(gpa, arena, &man.hash, mod.root_mod, mod.main_mod, .{ .files = man });
+ if (comp.zcu) |zcu| {
+ try addModuleTableToCacheHash(zcu, arena, &man.hash, .{ .files = man });
// Synchronize with other matching comments: ZigOnlyHashStuff
man.hash.addListOfBytes(comp.test_filters);
man.hash.addOptionalBytes(comp.test_name_prefix);
man.hash.add(comp.skip_linker_dependencies);
- //man.hash.add(mod.emit_h != null);
- man.hash.add(mod.error_limit);
+ //man.hash.add(zcu.emit_h != null);
+ man.hash.add(zcu.error_limit);
} else {
cache_helpers.addModule(&man.hash, comp.root_mod);
}
@@ -2839,7 +3194,7 @@ fn emitOthers(comp: *Compilation) void {
pub fn emitLlvmObject(
comp: *Compilation,
arena: Allocator,
- default_artifact_directory: Path,
+ default_artifact_directory: Cache.Path,
bin_emit_loc: ?EmitLoc,
llvm_object: LlvmObject.Ptr,
prog_node: std.Progress.Node,
@@ -2866,7 +3221,7 @@ pub fn emitLlvmObject(
fn resolveEmitLoc(
arena: Allocator,
- default_artifact_directory: Path,
+ default_artifact_directory: Cache.Path,
opt_loc: ?EmitLoc,
) Allocator.Error!?[*:0]const u8 {
const loc = opt_loc orelse return null;
@@ -2877,132 +3232,6 @@ fn resolveEmitLoc(
return slice.ptr;
}
-fn reportMultiModuleErrors(pt: Zcu.PerThread) !void {
- const zcu = pt.zcu;
- const gpa = zcu.gpa;
- const ip = &zcu.intern_pool;
- // Some cases can give you a whole bunch of multi-module errors, which it's not helpful to
- // print all of, so we'll cap the number of these to emit.
- var num_errors: u32 = 0;
- const max_errors = 5;
- // Attach the "some omitted" note to the final error message
- var last_err: ?*Zcu.ErrorMsg = null;
-
- for (zcu.import_table.values()) |file_index| {
- const file = zcu.fileByIndex(file_index);
- if (!file.multi_pkg) continue;
-
- num_errors += 1;
- if (num_errors > max_errors) continue;
-
- const err = err_blk: {
- // Like with errors, let's cap the number of notes to prevent a huge error spew.
- const max_notes = 5;
- const omitted = file.references.items.len -| max_notes;
- const num_notes = file.references.items.len - omitted;
-
- const notes = try gpa.alloc(Zcu.ErrorMsg, if (omitted > 0) num_notes + 1 else num_notes);
- errdefer gpa.free(notes);
-
- for (notes[0..num_notes], file.references.items[0..num_notes], 0..) |*note, ref, i| {
- errdefer for (notes[0..i]) |*n| n.deinit(gpa);
- note.* = switch (ref) {
- .import => |import| try Zcu.ErrorMsg.init(
- gpa,
- .{
- .base_node_inst = try ip.trackZir(gpa, pt.tid, .{
- .file = import.file,
- .inst = .main_struct_inst,
- }),
- .offset = .{ .token_abs = import.token },
- },
- "imported from module {s}",
- .{zcu.fileByIndex(import.file).mod.fully_qualified_name},
- ),
- .root => |pkg| try Zcu.ErrorMsg.init(
- gpa,
- .{
- .base_node_inst = try ip.trackZir(gpa, pt.tid, .{
- .file = file_index,
- .inst = .main_struct_inst,
- }),
- .offset = .entire_file,
- },
- "root of module {s}",
- .{pkg.fully_qualified_name},
- ),
- };
- }
- errdefer for (notes[0..num_notes]) |*n| n.deinit(gpa);
-
- if (omitted > 0) {
- notes[num_notes] = try Zcu.ErrorMsg.init(
- gpa,
- .{
- .base_node_inst = try ip.trackZir(gpa, pt.tid, .{
- .file = file_index,
- .inst = .main_struct_inst,
- }),
- .offset = .entire_file,
- },
- "{} more references omitted",
- .{omitted},
- );
- }
- errdefer if (omitted > 0) notes[num_notes].deinit(gpa);
-
- const err = try Zcu.ErrorMsg.create(
- gpa,
- .{
- .base_node_inst = try ip.trackZir(gpa, pt.tid, .{
- .file = file_index,
- .inst = .main_struct_inst,
- }),
- .offset = .entire_file,
- },
- "file exists in multiple modules",
- .{},
- );
- err.notes = notes;
- break :err_blk err;
- };
- errdefer err.destroy(gpa);
- try zcu.failed_files.putNoClobber(gpa, file, err);
- last_err = err;
- }
-
- // If we omitted any errors, add a note saying that
- if (num_errors > max_errors) {
- const err = last_err.?;
-
- // There isn't really any meaningful place to put this note, so just attach it to the
- // last failed file
- var note = try Zcu.ErrorMsg.init(
- gpa,
- err.src_loc,
- "{} more errors omitted",
- .{num_errors - max_errors},
- );
- errdefer note.deinit(gpa);
-
- const i = err.notes.len;
- err.notes = try gpa.realloc(err.notes, i + 1);
- err.notes[i] = note;
- }
-
- // Now that we've reported the errors, we need to deal with
- // dependencies. Any file referenced by a multi_pkg file should also be
- // marked multi_pkg and have its status set to astgen_failure, as it's
- // ambiguous which package they should be analyzed as a part of. We need
- // to add this flag after reporting the errors however, as otherwise
- // we'd get an error for every single downstream file, which wouldn't be
- // very useful.
- for (zcu.import_table.values()) |file_index| {
- const file = zcu.fileByIndex(file_index);
- if (file.multi_pkg) file.recursiveMarkMultiPkg(pt);
- }
-}
-
/// Having the file open for writing is problematic as far as executing the
/// binary is concerned. This will remove the write flag, or close the file,
/// or whatever is needed so that it can be executed.
@@ -3326,16 +3555,77 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle {
}
if (comp.zcu) |zcu| zcu_errors: {
- for (zcu.failed_files.keys(), zcu.failed_files.values()) |file, error_msg| {
+ if (zcu.multi_module_err != null) {
+ try zcu.addFileInMultipleModulesError(&bundle);
+ break :zcu_errors;
+ }
+ for (zcu.failed_imports.items) |failed| {
+ assert(zcu.alive_files.contains(failed.file_index)); // otherwise it wouldn't have been added
+ const file = zcu.fileByIndex(failed.file_index);
+ const source = try file.getSource(zcu);
+ const tree = try file.getTree(zcu);
+ const start = tree.tokenStart(failed.import_token);
+ const end = start + tree.tokenSlice(failed.import_token).len;
+ const loc = std.zig.findLineColumn(source.bytes, start);
+ try bundle.addRootErrorMessage(.{
+ .msg = switch (failed.kind) {
+ .file_outside_module_root => try bundle.addString("import of file outside module path"),
+ .illegal_zig_import => try bundle.addString("this compiler implementation does not allow importing files from this directory"),
+ },
+ .src_loc = try bundle.addSourceLocation(.{
+ .src_path = try bundle.printString("{}", .{file.path.fmt(comp)}),
+ .span_start = start,
+ .span_main = start,
+ .span_end = @intCast(end),
+ .line = @intCast(loc.line),
+ .column = @intCast(loc.column),
+ .source_line = try bundle.addString(loc.source_line),
+ }),
+ .notes_len = 0,
+ });
+ }
+
+ // Before iterating `failed_files`, we need to sort it into a consistent order so that error
+ // messages appear consistently despite different ordering from the AstGen worker pool. File
+ // paths are a great key for this sort! We are using sorting the `ArrayHashMap` itself to
+ // make sure it reindexes; that's important because these entries need to be retained for
+ // future updates.
+ const FileSortCtx = struct {
+ zcu: *Zcu,
+ failed_files_keys: []const Zcu.File.Index,
+ pub fn lessThan(ctx: @This(), lhs_index: usize, rhs_index: usize) bool {
+ const lhs_path = ctx.zcu.fileByIndex(ctx.failed_files_keys[lhs_index]).path;
+ const rhs_path = ctx.zcu.fileByIndex(ctx.failed_files_keys[rhs_index]).path;
+ if (lhs_path.root != rhs_path.root) return @intFromEnum(lhs_path.root) < @intFromEnum(rhs_path.root);
+ return std.mem.order(u8, lhs_path.sub_path, rhs_path.sub_path).compare(.lt);
+ }
+ };
+ zcu.failed_files.sort(@as(FileSortCtx, .{
+ .zcu = zcu,
+ .failed_files_keys = zcu.failed_files.keys(),
+ }));
+
+ for (zcu.failed_files.keys(), zcu.failed_files.values()) |file_index, error_msg| {
+ if (!zcu.alive_files.contains(file_index)) continue;
+ const file = zcu.fileByIndex(file_index);
+ const is_retryable = switch (file.status) {
+ .retryable_failure => true,
+ .success, .astgen_failure => false,
+ .never_loaded => unreachable,
+ };
if (error_msg) |msg| {
- try addModuleErrorMsg(zcu, &bundle, msg.*, false);
+ assert(is_retryable);
+ try addWholeFileError(zcu, &bundle, file_index, msg);
} else {
- // Must be ZIR or Zoir errors. Note that this may include AST errors.
- _ = try file.getTree(gpa); // Tree must be loaded.
+ assert(!is_retryable);
+ // AstGen/ZoirGen succeeded with errors. Note that this may include AST errors.
+ _ = try file.getTree(zcu); // Tree must be loaded.
+ const path = try std.fmt.allocPrint(gpa, "{}", .{file.path.fmt(comp)});
+ defer gpa.free(path);
if (file.zir != null) {
- try addZirErrorMessages(&bundle, file);
+ try bundle.addZirErrorMessages(file.zir.?, file.tree.?, file.source.?, path);
} else if (file.zoir != null) {
- try addZoirErrorMessages(&bundle, file);
+ try bundle.addZoirErrorMessages(file.zoir.?, file.tree.?, file.source.?, path);
} else {
// Either Zir or Zoir must have been loaded.
unreachable;
@@ -3646,20 +3936,16 @@ pub fn addModuleErrorMsg(
const gpa = eb.gpa;
const ip = &zcu.intern_pool;
const err_src_loc = module_err_msg.src_loc.upgrade(zcu);
- const err_source = err_src_loc.file_scope.getSource(gpa) catch |err| {
- const file_path = try err_src_loc.file_scope.fullPath(gpa);
- defer gpa.free(file_path);
+ const err_source = err_src_loc.file_scope.getSource(zcu) catch |err| {
try eb.addRootErrorMessage(.{
- .msg = try eb.printString("unable to load '{s}': {s}", .{
- file_path, @errorName(err),
+ .msg = try eb.printString("unable to load '{}': {s}", .{
+ err_src_loc.file_scope.path.fmt(zcu.comp), @errorName(err),
}),
});
return;
};
- const err_span = try err_src_loc.span(gpa);
+ const err_span = try err_src_loc.span(zcu);
const err_loc = std.zig.findLineColumn(err_source.bytes, err_span.main);
- const file_path = try err_src_loc.file_scope.fullPath(gpa);
- defer gpa.free(file_path);
var ref_traces: std.ArrayListUnmanaged(ErrorBundle.ReferenceTrace) = .empty;
defer ref_traces.deinit(gpa);
@@ -3715,16 +4001,13 @@ pub fn addModuleErrorMsg(
}
const src_loc = try eb.addSourceLocation(.{
- .src_path = try eb.addString(file_path),
+ .src_path = try eb.printString("{}", .{err_src_loc.file_scope.path.fmt(zcu.comp)}),
.span_start = err_span.start,
.span_main = err_span.main,
.span_end = err_span.end,
.line = @intCast(err_loc.line),
.column = @intCast(err_loc.column),
- .source_line = if (err_src_loc.lazy == .entire_file)
- 0
- else
- try eb.addString(err_loc.source_line),
+ .source_line = try eb.addString(err_loc.source_line),
.reference_trace_len = @intCast(ref_traces.items.len),
});
@@ -3740,11 +4023,9 @@ pub fn addModuleErrorMsg(
var last_note_loc: ?std.zig.Loc = null;
for (module_err_msg.notes) |module_note| {
const note_src_loc = module_note.src_loc.upgrade(zcu);
- const source = try note_src_loc.file_scope.getSource(gpa);
- const span = try note_src_loc.span(gpa);
+ const source = try note_src_loc.file_scope.getSource(zcu);
+ const span = try note_src_loc.span(zcu);
const loc = std.zig.findLineColumn(source.bytes, span.main);
- const note_file_path = try note_src_loc.file_scope.fullPath(gpa);
- defer gpa.free(note_file_path);
const omit_source_line = loc.eql(err_loc) or (last_note_loc != null and loc.eql(last_note_loc.?));
last_note_loc = loc;
@@ -3752,7 +4033,7 @@ pub fn addModuleErrorMsg(
const gop = try notes.getOrPutContext(gpa, .{
.msg = try eb.addString(module_note.msg),
.src_loc = try eb.addSourceLocation(.{
- .src_path = try eb.addString(note_file_path),
+ .src_path = try eb.printString("{}", .{note_src_loc.file_scope.path.fmt(zcu.comp)}),
.span_start = span.start,
.span_main = span.main,
.span_end = span.end,
@@ -3791,15 +4072,13 @@ fn addReferenceTraceFrame(
) !void {
const gpa = zcu.gpa;
const src = lazy_src.upgrade(zcu);
- const source = try src.file_scope.getSource(gpa);
- const span = try src.span(gpa);
+ const source = try src.file_scope.getSource(zcu);
+ const span = try src.span(zcu);
const loc = std.zig.findLineColumn(source.bytes, span.main);
- const rt_file_path = try src.file_scope.fullPath(gpa);
- defer gpa.free(rt_file_path);
try ref_traces.append(gpa, .{
.decl_name = try eb.printString("{s}{s}", .{ name, if (inlined) " [inlined]" else "" }),
.src_loc = try eb.addSourceLocation(.{
- .src_path = try eb.addString(rt_file_path),
+ .src_path = try eb.printString("{}", .{src.file_scope.path.fmt(zcu.comp)}),
.span_start = span.start,
.span_main = span.main,
.span_end = span.end,
@@ -3810,18 +4089,30 @@ fn addReferenceTraceFrame(
});
}
-pub fn addZirErrorMessages(eb: *ErrorBundle.Wip, file: *Zcu.File) !void {
- const gpa = eb.gpa;
- const src_path = try file.fullPath(gpa);
- defer gpa.free(src_path);
- return eb.addZirErrorMessages(file.zir.?, file.tree.?, file.source.?, src_path);
-}
+pub fn addWholeFileError(
+ zcu: *Zcu,
+ eb: *ErrorBundle.Wip,
+ file_index: Zcu.File.Index,
+ msg: []const u8,
+) !void {
+ // note: "file imported here" on the import reference token
+ const imported_note: ?ErrorBundle.MessageIndex = switch (zcu.alive_files.get(file_index).?) {
+ .analysis_root => null,
+ .import => |import| try eb.addErrorMessage(.{
+ .msg = try eb.addString("file imported here"),
+ .src_loc = try zcu.fileByIndex(import.importer).errorBundleTokenSrc(import.tok, zcu, eb),
+ }),
+ };
-pub fn addZoirErrorMessages(eb: *ErrorBundle.Wip, file: *Zcu.File) !void {
- const gpa = eb.gpa;
- const src_path = try file.fullPath(gpa);
- defer gpa.free(src_path);
- return eb.addZoirErrorMessages(file.zoir.?, file.tree.?, file.source.?, src_path);
+ try eb.addRootErrorMessage(.{
+ .msg = try eb.addString(msg),
+ .src_loc = try zcu.fileByIndex(file_index).errorBundleWholeFileSrc(zcu, eb),
+ .notes_len = if (imported_note != null) 1 else 0,
+ });
+ if (imported_note) |n| {
+ const note_idx = try eb.reserveNotes(1);
+ eb.extra.items[note_idx] = @intFromEnum(n);
+ }
}
pub fn performAllTheWork(
@@ -3966,51 +4257,48 @@ fn performAllTheWorkInner(
var astgen_wait_group: WaitGroup = .{};
defer astgen_wait_group.wait();
- // builtin.zig is handled specially for two reasons:
- // 1. to avoid race condition of zig processes truncating each other's builtin.zig files
- // 2. optimization; in the hot path it only incurs a stat() syscall, which happens
- // in the `astgen_wait_group`.
- if (comp.queued_jobs.update_builtin_zig) b: {
- comp.queued_jobs.update_builtin_zig = false;
- if (comp.zcu == null) break :b;
- // TODO put all the modules in a flat array to make them easy to iterate.
- var seen: std.AutoArrayHashMapUnmanaged(*Package.Module, void) = .empty;
- defer seen.deinit(comp.gpa);
- try seen.put(comp.gpa, comp.root_mod, {});
- var i: usize = 0;
- while (i < seen.count()) : (i += 1) {
- const mod = seen.keys()[i];
- for (mod.deps.values()) |dep|
- try seen.put(comp.gpa, dep, {});
-
- const file = mod.builtin_file orelse continue;
-
- comp.thread_pool.spawnWg(&astgen_wait_group, workerUpdateBuiltinZigFile, .{
- comp, mod, file,
+ if (comp.zcu) |zcu| {
+ const gpa = zcu.gpa;
+
+ // We cannot reference `zcu.import_table` after we spawn any `workerUpdateFile` jobs,
+ // because on single-threaded targets the worker will be run eagerly, meaning the
+ // `import_table` could be mutated, and not even holding `comp.mutex` will save us. So,
+ // build up a list of the files to update *before* we spawn any jobs.
+ var astgen_work_items: std.MultiArrayList(struct {
+ file_index: Zcu.File.Index,
+ file: *Zcu.File,
+ }) = .empty;
+ defer astgen_work_items.deinit(gpa);
+ // Not every item in `import_table` will need updating, because some are builtin.zig
+ // files. However, most will, so let's just reserve sufficient capacity upfront.
+ try astgen_work_items.ensureTotalCapacity(gpa, zcu.import_table.count());
+ for (zcu.import_table.keys()) |file_index| {
+ const file = zcu.fileByIndex(file_index);
+ if (file.is_builtin) {
+ // This is a `builtin.zig`, so updating is redundant. However, we want to make
+ // sure the file contents are still correct on disk, since it can improve the
+ // debugging experience better. That job only needs `file`, so we can kick it
+ // off right now.
+ comp.thread_pool.spawnWg(&astgen_wait_group, workerUpdateBuiltinFile, .{ comp, file });
+ continue;
+ }
+ astgen_work_items.appendAssumeCapacity(.{
+ .file_index = file_index,
+ .file = file,
});
}
- }
- if (comp.zcu) |zcu| {
- {
- // Worker threads may append to zcu.files and zcu.import_table
- // so we must hold the lock while spawning those tasks, since
- // we access those tables in this loop.
- comp.mutex.lock();
- defer comp.mutex.unlock();
-
- while (comp.astgen_work_queue.readItem()) |file_index| {
- // Pre-load these things from our single-threaded context since they
- // will be needed by the worker threads.
- const path_digest = zcu.filePathDigest(file_index);
- const file = zcu.fileByIndex(file_index);
- comp.thread_pool.spawnWgId(&astgen_wait_group, workerUpdateFile, .{
- comp, file, file_index, path_digest, zir_prog_node, &astgen_wait_group, .root,
- });
- }
+ // Now that we're not going to touch `zcu.import_table` again, we can spawn `workerUpdateFile` jobs.
+ for (astgen_work_items.items(.file_index), astgen_work_items.items(.file)) |file_index, file| {
+ comp.thread_pool.spawnWgId(&astgen_wait_group, workerUpdateFile, .{
+ comp, file, file_index, zir_prog_node, &astgen_wait_group,
+ });
}
- for (0.., zcu.embed_table.values()) |ef_index_usize, ef| {
+ // On the other hand, it's fine to directly iterate `zcu.embed_table.keys()` here
+ // because `workerUpdateEmbedFile` can't invalidate it. The different here is that one
+ // `@embedFile` can't trigger analysis of a new `@embedFile`!
+ for (0.., zcu.embed_table.keys()) |ef_index_usize, ef| {
const ef_index: Zcu.EmbedFile.Index = @enumFromInt(ef_index_usize);
comp.thread_pool.spawnWgId(&astgen_wait_group, workerUpdateEmbedFile, .{
comp, ef_index, ef,
@@ -4035,25 +4323,39 @@ fn performAllTheWorkInner(
const pt: Zcu.PerThread = .activate(zcu, .main);
defer pt.deactivate();
- // If the cache mode is `whole`, then add every source file to the cache manifest.
+ const gpa = zcu.gpa;
+
+ // On an incremental update, a source file might become "dead", in that all imports of
+ // the file were removed. This could even change what module the file belongs to! As such,
+ // we do a traversal over the files, to figure out which ones are alive and the modules
+ // they belong to.
+ const any_fatal_files = try pt.computeAliveFiles();
+
+ // If the cache mode is `whole`, add every alive source file to the manifest.
switch (comp.cache_use) {
.whole => |whole| if (whole.cache_manifest) |man| {
- const gpa = zcu.gpa;
- for (zcu.import_table.values()) |file_index| {
+ for (zcu.alive_files.keys()) |file_index| {
const file = zcu.fileByIndex(file_index);
- const source = file.getSource(gpa) catch |err| {
- try pt.reportRetryableFileError(file_index, "unable to load source: {s}", .{@errorName(err)});
- continue;
+
+ switch (file.status) {
+ .never_loaded => unreachable, // AstGen tried to load it
+ .retryable_failure => continue, // the file cannot be read; this is a guaranteed error
+ .astgen_failure, .success => {}, // the file was read successfully
+ }
+
+ const path = try file.path.toAbsolute(comp.dirs, gpa);
+ defer gpa.free(path);
+
+ const result = res: {
+ whole.cache_manifest_mutex.lock();
+ defer whole.cache_manifest_mutex.unlock();
+ if (file.source) |source| {
+ break :res man.addFilePostContents(path, source, file.stat);
+ } else {
+ break :res man.addFilePost(path);
+ }
};
- const resolved_path = try std.fs.path.resolve(gpa, &.{
- file.mod.root.root_dir.path orelse ".",
- file.mod.root.sub_path,
- file.sub_file_path,
- });
- errdefer gpa.free(resolved_path);
- whole.cache_manifest_mutex.lock();
- defer whole.cache_manifest_mutex.unlock();
- man.addFilePostContents(resolved_path, source.bytes, source.stat) catch |err| switch (err) {
+ result catch |err| switch (err) {
error.OutOfMemory => |e| return e,
else => {
try pt.reportRetryableFileError(file_index, "unable to update cache: {s}", .{@errorName(err)});
@@ -4065,23 +4367,14 @@ fn performAllTheWorkInner(
.incremental => {},
}
- try reportMultiModuleErrors(pt);
-
- const any_fatal_files = for (zcu.import_table.values()) |file_index| {
- const file = zcu.fileByIndex(file_index);
- switch (file.status) {
- .never_loaded => unreachable, // everything is loaded by the workers
- .retryable_failure, .astgen_failure => break true,
- .success => {},
- }
- } else false;
-
- if (any_fatal_files or comp.alloc_failure_occurred) {
+ if (any_fatal_files or
+ zcu.multi_module_err != null or
+ zcu.failed_imports.items.len > 0 or
+ comp.alloc_failure_occurred)
+ {
// We give up right now! No updating of ZIR refs, no nothing. The idea is that this prevents
// us from invalidating lots of incremental dependencies due to files with e.g. parse errors.
// However, this means our analysis data is invalid, so we want to omit all analysis errors.
-
- assert(zcu.failed_files.count() > 0); // we will get an error
zcu.skip_analysis_this_update = true;
return;
}
@@ -4093,6 +4386,11 @@ fn performAllTheWorkInner(
}
try zcu.flushRetryableFailures();
+ // It's analysis time! Queue up our initial analysis.
+ for (zcu.analysis_roots.slice()) |mod| {
+ try comp.queueJob(.{ .analyze_mod = mod });
+ }
+
zcu.sema_prog_node = main_progress_node.start("Semantic Analysis", 0);
zcu.codegen_prog_node = if (comp.bin_file != null) main_progress_node.start("Code Generation", 0) else .none;
}
@@ -4236,7 +4534,7 @@ fn processOneJob(tid: usize, comp: *Compilation, job: Job) JobError!void {
const pt: Zcu.PerThread = .activate(comp.zcu.?, @enumFromInt(tid));
defer pt.deactivate();
- pt.semaPkg(mod) catch |err| switch (err) {
+ pt.semaMod(mod) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => return,
};
@@ -4301,7 +4599,7 @@ fn docsCopyFallible(comp: *Compilation) anyerror!void {
for (&[_][]const u8{ "docs/main.js", "docs/index.html" }) |sub_path| {
const basename = std.fs.path.basename(sub_path);
- comp.zig_lib_directory.handle.copyFile(sub_path, out_dir, basename, .{}) catch |err| {
+ comp.dirs.zig_lib.handle.copyFile(sub_path, out_dir, basename, .{}) catch |err| {
comp.lockAndSetMiscFailure(.docs_copy, "unable to copy {s}: {s}", .{
sub_path,
@errorName(err),
@@ -4338,10 +4636,12 @@ fn docsCopyFallible(comp: *Compilation) anyerror!void {
fn docsCopyModule(comp: *Compilation, module: *Package.Module, name: []const u8, tar_file: std.fs.File) !void {
const root = module.root;
- const sub_path = if (root.sub_path.len == 0) "." else root.sub_path;
- var mod_dir = root.root_dir.handle.openDir(sub_path, .{ .iterate = true }) catch |err| {
+ var mod_dir = d: {
+ const root_dir, const sub_path = root.openInfo(comp.dirs);
+ break :d root_dir.openDir(sub_path, .{ .iterate = true });
+ } catch |err| {
return comp.lockAndSetMiscFailure(.docs_copy, "unable to open directory '{}': {s}", .{
- root, @errorName(err),
+ root.fmt(comp), @errorName(err),
});
};
defer mod_dir.close();
@@ -4363,13 +4663,13 @@ fn docsCopyModule(comp: *Compilation, module: *Package.Module, name: []const u8,
}
var file = mod_dir.openFile(entry.path, .{}) catch |err| {
return comp.lockAndSetMiscFailure(.docs_copy, "unable to open '{}{s}': {s}", .{
- root, entry.path, @errorName(err),
+ root.fmt(comp), entry.path, @errorName(err),
});
};
defer file.close();
archiver.writeFile(entry.path, file) catch |err| {
return comp.lockAndSetMiscFailure(.docs_copy, "unable to archive '{}{s}': {s}", .{
- root, entry.path, @errorName(err),
+ root.fmt(comp), entry.path, @errorName(err),
});
};
}
@@ -4430,13 +4730,11 @@ fn workerDocsWasmFallible(comp: *Compilation, prog_node: std.Progress.Node) anye
const src_basename = "main.zig";
const root_name = std.fs.path.stem(src_basename);
+ const dirs = comp.dirs.withoutLocalCache();
+
const root_mod = try Package.Module.create(arena, .{
- .global_cache_directory = comp.global_cache_directory,
.paths = .{
- .root = .{
- .root_dir = comp.zig_lib_directory,
- .sub_path = "docs/wasm",
- },
+ .root = try .fromRoot(arena, dirs, .zig_lib, "docs/wasm"),
.root_src_path = src_basename,
},
.fully_qualified_name = root_name,
@@ -4447,16 +4745,10 @@ fn workerDocsWasmFallible(comp: *Compilation, prog_node: std.Progress.Node) anye
.global = config,
.cc_argv = &.{},
.parent = null,
- .builtin_mod = null,
- .builtin_modules = null,
});
const walk_mod = try Package.Module.create(arena, .{
- .global_cache_directory = comp.global_cache_directory,
.paths = .{
- .root = .{
- .root_dir = comp.zig_lib_directory,
- .sub_path = "docs/wasm",
- },
+ .root = try .fromRoot(arena, dirs, .zig_lib, "docs/wasm"),
.root_src_path = "Walk.zig",
},
.fully_qualified_name = "Walk",
@@ -4467,8 +4759,6 @@ fn workerDocsWasmFallible(comp: *Compilation, prog_node: std.Progress.Node) anye
.global = config,
.cc_argv = &.{},
.parent = root_mod,
- .builtin_mod = root_mod.getBuiltinDependency(),
- .builtin_modules = null, // `builtin_mod` is set
});
try root_mod.deps.put(arena, "Walk", walk_mod);
const bin_basename = try std.zig.binNameAlloc(arena, .{
@@ -4478,9 +4768,7 @@ fn workerDocsWasmFallible(comp: *Compilation, prog_node: std.Progress.Node) anye
});
const sub_compilation = try Compilation.create(gpa, arena, .{
- .global_cache_directory = comp.global_cache_directory,
- .local_cache_directory = comp.global_cache_directory,
- .zig_lib_directory = comp.zig_lib_directory,
+ .dirs = dirs,
.self_exe_path = comp.self_exe_path,
.config = config,
.root_mod = root_mod,
@@ -4517,14 +4805,14 @@ fn workerDocsWasmFallible(comp: *Compilation, prog_node: std.Progress.Node) anye
};
defer out_dir.close();
- sub_compilation.local_cache_directory.handle.copyFile(
+ sub_compilation.dirs.local_cache.handle.copyFile(
sub_compilation.cache_use.whole.bin_sub_path.?,
out_dir,
"main.wasm",
.{},
) catch |err| {
return comp.lockAndSetMiscFailure(.docs_copy, "unable to copy '{}{s}' to '{}{s}': {s}", .{
- sub_compilation.local_cache_directory,
+ sub_compilation.dirs.local_cache,
sub_compilation.cache_use.whole.bin_sub_path.?,
emit.root_dir,
emit.sub_path,
@@ -4538,28 +4826,23 @@ fn workerUpdateFile(
comp: *Compilation,
file: *Zcu.File,
file_index: Zcu.File.Index,
- path_digest: Cache.BinDigest,
prog_node: std.Progress.Node,
wg: *WaitGroup,
- src: Zcu.AstGenSrc,
) void {
- const child_prog_node = prog_node.start(file.sub_file_path, 0);
+ const child_prog_node = prog_node.start(std.fs.path.basename(file.path.sub_path), 0);
defer child_prog_node.end();
const pt: Zcu.PerThread = .activate(comp.zcu.?, @enumFromInt(tid));
defer pt.deactivate();
- pt.updateFile(file, path_digest) catch |err| switch (err) {
- error.AnalysisFail => return,
- else => {
- pt.reportRetryableAstGenError(src, file_index, err) catch |oom| switch (oom) {
- error.OutOfMemory => {
- comp.mutex.lock();
- defer comp.mutex.unlock();
- comp.setAllocFailure();
- },
- };
- return;
- },
+ pt.updateFile(file_index, file) catch |err| {
+ pt.reportRetryableFileError(file_index, "unable to load '{s}': {s}", .{ std.fs.path.basename(file.path.sub_path), @errorName(err) }) catch |oom| switch (oom) {
+ error.OutOfMemory => {
+ comp.mutex.lock();
+ defer comp.mutex.unlock();
+ comp.setAllocFailure();
+ },
+ };
+ return;
};
switch (file.getMode()) {
@@ -4567,9 +4850,9 @@ fn workerUpdateFile(
.zon => return, // ZON can't import anything so we're done
}
- // Pre-emptively look for `@import` paths and queue them up.
- // If we experience an error preemptively fetching the
- // file, just ignore it and let it happen again later during Sema.
+ // Discover all imports in the file. Imports of modules we ignore for now since we don't
+ // know which module we're in, but imports of file paths might need us to queue up other
+ // AstGen jobs.
const imports_index = file.zir.?.extra[@intFromEnum(Zir.ExtraIndex.imports)];
if (imports_index != 0) {
const extra = file.zir.?.extraData(Zir.Inst.Imports, imports_index);
@@ -4581,54 +4864,34 @@ fn workerUpdateFile(
extra_index = item.end;
const import_path = file.zir.?.nullTerminatedString(item.data.name);
- // `@import("builtin")` is handled specially.
- if (mem.eql(u8, import_path, "builtin")) continue;
-
- const import_result, const imported_path_digest = blk: {
- comp.mutex.lock();
- defer comp.mutex.unlock();
- const res = pt.importFile(file, import_path) catch continue;
- if (!res.is_pkg) {
- res.file.addReference(pt.zcu, .{ .import = .{
- .file = file_index,
- .token = item.data.token,
- } }) catch continue;
- }
- if (res.is_new) if (comp.file_system_inputs) |fsi| {
- comp.appendFileSystemInput(fsi, res.file.mod.root, res.file.sub_file_path) catch continue;
- };
- const imported_path_digest = pt.zcu.filePathDigest(res.file_index);
- break :blk .{ res, imported_path_digest };
- };
- if (import_result.is_new) {
- log.debug("AstGen of {s} has import '{s}'; queuing AstGen of {s}", .{
- file.sub_file_path, import_path, import_result.file.sub_file_path,
- });
- const sub_src: Zcu.AstGenSrc = .{ .import = .{
- .importing_file = file_index,
- .import_tok = item.data.token,
- } };
- comp.thread_pool.spawnWgId(wg, workerUpdateFile, .{
- comp, import_result.file, import_result.file_index, imported_path_digest, prog_node, wg, sub_src,
- });
+ if (pt.discoverImport(file.path, import_path)) |res| switch (res) {
+ .module, .existing_file => {},
+ .new_file => |new| {
+ comp.thread_pool.spawnWgId(wg, workerUpdateFile, .{
+ comp, new.file, new.index, prog_node, wg,
+ });
+ },
+ } else |err| switch (err) {
+ error.OutOfMemory => {
+ comp.mutex.lock();
+ defer comp.mutex.unlock();
+ comp.setAllocFailure();
+ },
}
}
}
}
-fn workerUpdateBuiltinZigFile(
- comp: *Compilation,
- mod: *Package.Module,
- file: *Zcu.File,
-) void {
- Builtin.populateFile(comp, mod, file) catch |err| {
+fn workerUpdateBuiltinFile(comp: *Compilation, file: *Zcu.File) void {
+ Builtin.updateFileOnDisk(file, comp) catch |err| {
comp.mutex.lock();
defer comp.mutex.unlock();
-
- comp.setMiscFailure(.write_builtin_zig, "unable to write '{}{s}': {s}", .{
- mod.root, mod.root_src_path, @errorName(err),
- });
+ comp.setMiscFailure(
+ .write_builtin_zig,
+ "unable to write '{}': {s}",
+ .{ file.path.fmt(comp), @errorName(err) },
+ );
};
}
@@ -4738,10 +5001,10 @@ pub fn cImport(comp: *Compilation, c_src: []const u8, owner_mod: *Package.Module
const tmp_digest = man.hash.peek();
const tmp_dir_sub_path = try std.fs.path.join(arena, &[_][]const u8{ "o", &tmp_digest });
- var zig_cache_tmp_dir = try comp.local_cache_directory.handle.makeOpenPath(tmp_dir_sub_path, .{});
+ var zig_cache_tmp_dir = try comp.dirs.local_cache.handle.makeOpenPath(tmp_dir_sub_path, .{});
defer zig_cache_tmp_dir.close();
const cimport_basename = "cimport.h";
- const out_h_path = try comp.local_cache_directory.join(arena, &[_][]const u8{
+ const out_h_path = try comp.dirs.local_cache.join(arena, &[_][]const u8{
tmp_dir_sub_path, cimport_basename,
});
const out_dep_path = try std.fmt.allocPrint(arena, "{s}.d", .{out_h_path});
@@ -4779,7 +5042,7 @@ pub fn cImport(comp: *Compilation, c_src: []const u8, owner_mod: *Package.Module
new_argv[i] = try arena.dupeZ(u8, arg);
}
- const c_headers_dir_path_z = try comp.zig_lib_directory.joinZ(arena, &[_][]const u8{"include"});
+ const c_headers_dir_path_z = try comp.dirs.zig_lib.joinZ(arena, &.{"include"});
var errors = std.zig.ErrorBundle.empty;
errdefer errors.deinit(comp.gpa);
break :tree translate_c.translate(
@@ -4820,7 +5083,7 @@ pub fn cImport(comp: *Compilation, c_src: []const u8, owner_mod: *Package.Module
const bin_digest = man.finalBin();
const hex_digest = Cache.binToHex(bin_digest);
const o_sub_path = "o" ++ std.fs.path.sep_str ++ hex_digest;
- var o_dir = try comp.local_cache_directory.handle.makeOpenPath(o_sub_path, .{});
+ var o_dir = try comp.dirs.local_cache.handle.makeOpenPath(o_sub_path, .{});
defer o_dir.close();
var out_zig_file = try o_dir.createFile(cimport_zig_basename, .{});
@@ -5226,7 +5489,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr
// We can't know the digest until we do the C compiler invocation,
// so we need a temporary filename.
const out_obj_path = try comp.tmpFilePath(arena, o_basename);
- var zig_cache_tmp_dir = try comp.local_cache_directory.handle.makeOpenPath("tmp", .{});
+ var zig_cache_tmp_dir = try comp.dirs.local_cache.handle.makeOpenPath("tmp", .{});
defer zig_cache_tmp_dir.close();
const out_diag_path = if (comp.clang_passthrough_mode or !ext.clangSupportsDiagnostics())
@@ -5362,7 +5625,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr
// Rename into place.
const digest = man.final();
const o_sub_path = try std.fs.path.join(arena, &[_][]const u8{ "o", &digest });
- var o_dir = try comp.local_cache_directory.handle.makeOpenPath(o_sub_path, .{});
+ var o_dir = try comp.dirs.local_cache.handle.makeOpenPath(o_sub_path, .{});
defer o_dir.close();
const tmp_basename = std.fs.path.basename(out_obj_path);
try std.fs.rename(zig_cache_tmp_dir, tmp_basename, o_dir, o_basename);
@@ -5386,7 +5649,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: std.Pr
c_object.status = .{
.success = .{
.object_path = .{
- .root_dir = comp.local_cache_directory,
+ .root_dir = comp.dirs.local_cache,
.sub_path = try std.fs.path.join(gpa, &.{ "o", &digest, o_basename }),
},
.lock = man.toOwnedLock(),
@@ -5449,13 +5712,13 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32
const digest = man.final();
const o_sub_path = try std.fs.path.join(arena, &.{ "o", &digest });
- var o_dir = try comp.local_cache_directory.handle.makeOpenPath(o_sub_path, .{});
+ var o_dir = try comp.dirs.local_cache.handle.makeOpenPath(o_sub_path, .{});
defer o_dir.close();
- const in_rc_path = try comp.local_cache_directory.join(comp.gpa, &.{
+ const in_rc_path = try comp.dirs.local_cache.join(comp.gpa, &.{
o_sub_path, rc_basename,
});
- const out_res_path = try comp.local_cache_directory.join(comp.gpa, &.{
+ const out_res_path = try comp.dirs.local_cache.join(comp.gpa, &.{
o_sub_path, res_basename,
});
@@ -5517,7 +5780,7 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32
win32_resource.status = .{
.success = .{
- .res_path = try comp.local_cache_directory.join(comp.gpa, &[_][]const u8{
+ .res_path = try comp.dirs.local_cache.join(comp.gpa, &[_][]const u8{
"o", &digest, res_basename,
}),
.lock = man.toOwnedLock(),
@@ -5535,7 +5798,7 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32
const rc_basename_noext = src_basename[0 .. src_basename.len - std.fs.path.extension(src_basename).len];
const digest = if (try man.hit()) man.final() else blk: {
- var zig_cache_tmp_dir = try comp.local_cache_directory.handle.makeOpenPath("tmp", .{});
+ var zig_cache_tmp_dir = try comp.dirs.local_cache.handle.makeOpenPath("tmp", .{});
defer zig_cache_tmp_dir.close();
const res_filename = try std.fmt.allocPrint(arena, "{s}.res", .{rc_basename_noext});
@@ -5605,7 +5868,7 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32
// Rename into place.
const digest = man.final();
const o_sub_path = try std.fs.path.join(arena, &[_][]const u8{ "o", &digest });
- var o_dir = try comp.local_cache_directory.handle.makeOpenPath(o_sub_path, .{});
+ var o_dir = try comp.dirs.local_cache.handle.makeOpenPath(o_sub_path, .{});
defer o_dir.close();
const tmp_basename = std.fs.path.basename(out_res_path);
try std.fs.rename(zig_cache_tmp_dir, tmp_basename, o_dir, res_filename);
@@ -5626,7 +5889,7 @@ fn updateWin32Resource(comp: *Compilation, win32_resource: *Win32Resource, win32
win32_resource.status = .{
.success = .{
- .res_path = try comp.local_cache_directory.join(comp.gpa, &[_][]const u8{
+ .res_path = try comp.dirs.local_cache.join(comp.gpa, &[_][]const u8{
"o", &digest, res_basename,
}),
.lock = man.toOwnedLock(),
@@ -5721,7 +5984,7 @@ fn spawnZigRc(
pub fn tmpFilePath(comp: Compilation, ally: Allocator, suffix: []const u8) error{OutOfMemory}![]const u8 {
const s = std.fs.path.sep_str;
const rand_int = std.crypto.random.int(u64);
- if (comp.local_cache_directory.path) |p| {
+ if (comp.dirs.local_cache.path) |p| {
return std.fmt.allocPrint(ally, "{s}" ++ s ++ "tmp" ++ s ++ "{x}-{s}", .{ p, rand_int, suffix });
} else {
return std.fmt.allocPrint(ally, "tmp" ++ s ++ "{x}-{s}", .{ rand_int, suffix });
@@ -5962,12 +6225,12 @@ pub fn addCCArgs(
if (comp.config.link_libcpp) {
try argv.append("-isystem");
try argv.append(try std.fs.path.join(arena, &[_][]const u8{
- comp.zig_lib_directory.path.?, "libcxx", "include",
+ comp.dirs.zig_lib.path.?, "libcxx", "include",
}));
try argv.append("-isystem");
try argv.append(try std.fs.path.join(arena, &[_][]const u8{
- comp.zig_lib_directory.path.?, "libcxxabi", "include",
+ comp.dirs.zig_lib.path.?, "libcxxabi", "include",
}));
try libcxx.addCxxArgs(comp, arena, argv);
@@ -5977,7 +6240,7 @@ pub fn addCCArgs(
// However as noted by @dimenus, appending libc headers before compiler headers breaks
// intrinsics and other compiler specific items.
try argv.append("-isystem");
- try argv.append(try std.fs.path.join(arena, &[_][]const u8{ comp.zig_lib_directory.path.?, "include" }));
+ try argv.append(try std.fs.path.join(arena, &.{ comp.dirs.zig_lib.path.?, "include" }));
try argv.ensureUnusedCapacity(comp.libc_include_dir_list.len * 2);
for (comp.libc_include_dir_list) |include_dir| {
@@ -5996,7 +6259,7 @@ pub fn addCCArgs(
if (comp.config.link_libunwind) {
try argv.append("-isystem");
try argv.append(try std.fs.path.join(arena, &[_][]const u8{
- comp.zig_lib_directory.path.?, "libunwind", "include",
+ comp.dirs.zig_lib.path.?, "libunwind", "include",
}));
}
@@ -6584,12 +6847,12 @@ test "classifyFileExt" {
try std.testing.expectEqual(FileExt.zig, classifyFileExt("foo.zig"));
}
-fn get_libc_crt_file(comp: *Compilation, arena: Allocator, basename: []const u8) !Path {
+fn get_libc_crt_file(comp: *Compilation, arena: Allocator, basename: []const u8) !Cache.Path {
return (try crtFilePath(&comp.crt_files, basename)) orelse {
const lci = comp.libc_installation orelse return error.LibCInstallationNotAvailable;
const crt_dir_path = lci.crt_dir orelse return error.LibCInstallationMissingCrtDir;
const full_path = try std.fs.path.join(arena, &[_][]const u8{ crt_dir_path, basename });
- return Path.initCwd(full_path);
+ return Cache.Path.initCwd(full_path);
};
}
@@ -6598,7 +6861,7 @@ pub fn crtFileAsString(comp: *Compilation, arena: Allocator, basename: []const u
return path.toString(arena);
}
-fn crtFilePath(crt_files: *std.StringHashMapUnmanaged(CrtFile), basename: []const u8) Allocator.Error!?Path {
+fn crtFilePath(crt_files: *std.StringHashMapUnmanaged(CrtFile), basename: []const u8) Allocator.Error!?Cache.Path {
const crt_file = crt_files.get(basename) orelse return null;
return crt_file.full_object_path;
}
@@ -6736,9 +6999,8 @@ fn buildOutputFromZig(
});
const root_mod = try Package.Module.create(arena, .{
- .global_cache_directory = comp.global_cache_directory,
.paths = .{
- .root = .{ .root_dir = comp.zig_lib_directory },
+ .root = .zig_lib_root,
.root_src_path = src_basename,
},
.fully_qualified_name = "root",
@@ -6760,8 +7022,6 @@ fn buildOutputFromZig(
.global = config,
.cc_argv = &.{},
.parent = null,
- .builtin_mod = null,
- .builtin_modules = null, // there is only one module in this compilation
});
const target = comp.getTarget();
const bin_basename = try std.zig.binNameAlloc(arena, .{
@@ -6785,9 +7045,7 @@ fn buildOutputFromZig(
};
const sub_compilation = try Compilation.create(gpa, arena, .{
- .global_cache_directory = comp.global_cache_directory,
- .local_cache_directory = comp.global_cache_directory,
- .zig_lib_directory = comp.zig_lib_directory,
+ .dirs = comp.dirs.withoutLocalCache(),
.cache_mode = .whole,
.parent_whole_cache = parent_whole_cache,
.self_exe_path = comp.self_exe_path,
@@ -6878,9 +7136,8 @@ pub fn build_crt_file(
},
});
const root_mod = try Package.Module.create(arena, .{
- .global_cache_directory = comp.global_cache_directory,
.paths = .{
- .root = .{ .root_dir = comp.zig_lib_directory },
+ .root = .zig_lib_root,
.root_src_path = "",
},
.fully_qualified_name = "root",
@@ -6908,8 +7165,6 @@ pub fn build_crt_file(
.global = config,
.cc_argv = &.{},
.parent = null,
- .builtin_mod = null,
- .builtin_modules = null, // there is only one module in this compilation
});
for (c_source_files) |*item| {
@@ -6917,9 +7172,7 @@ pub fn build_crt_file(
}
const sub_compilation = try Compilation.create(gpa, arena, .{
- .local_cache_directory = comp.global_cache_directory,
- .global_cache_directory = comp.global_cache_directory,
- .zig_lib_directory = comp.zig_lib_directory,
+ .dirs = comp.dirs.withoutLocalCache(),
.self_exe_path = comp.self_exe_path,
.cache_mode = .whole,
.config = config,
@@ -6962,7 +7215,7 @@ pub fn build_crt_file(
}
}
-pub fn queueLinkTaskMode(comp: *Compilation, path: Path, output_mode: std.builtin.OutputMode) void {
+pub fn queueLinkTaskMode(comp: *Compilation, path: Cache.Path, output_mode: std.builtin.OutputMode) void {
comp.queueLinkTasks(switch (output_mode) {
.Exe => unreachable,
.Obj => &.{.{ .load_object = path }},
@@ -6983,7 +7236,7 @@ pub fn queueLinkTasks(comp: *Compilation, tasks: []const link.Task) void {
pub fn toCrtFile(comp: *Compilation) Allocator.Error!CrtFile {
return .{
.full_object_path = .{
- .root_dir = comp.local_cache_directory,
+ .root_dir = comp.dirs.local_cache,
.sub_path = try comp.gpa.dupe(u8, comp.cache_use.whole.bin_sub_path.?),
},
.lock = comp.cache_use.whole.moveLock(),
src/crash_report.zig
@@ -86,15 +86,12 @@ fn dumpStatusReport() !void {
const file, const src_base_node = Zcu.LazySrcLoc.resolveBaseNode(block.src_base_inst, zcu) orelse {
const file = zcu.fileByIndex(block.src_base_inst.resolveFile(&zcu.intern_pool));
- try stderr.writeAll("Analyzing lost instruction in file '");
- try writeFilePath(file, stderr);
- try stderr.writeAll("'. This should not happen!\n\n");
+ try stderr.print("Analyzing lost instruction in file '{}'. This should not happen!\n\n", .{file.path.fmt(zcu.comp)});
return;
};
try stderr.writeAll("Analyzing ");
- try writeFilePath(file, stderr);
- try stderr.writeAll("\n");
+ try stderr.print("Analyzing '{}'\n", .{file.path.fmt(zcu.comp)});
print_zir.renderInstructionContext(
allocator,
@@ -108,23 +105,24 @@ fn dumpStatusReport() !void {
error.OutOfMemory => try stderr.writeAll(" <out of memory dumping zir>\n"),
else => |e| return e,
};
- try stderr.writeAll(" For full context, use the command\n zig ast-check -t ");
- try writeFilePath(file, stderr);
- try stderr.writeAll("\n\n");
+ try stderr.print(
+ \\ For full context, use the command
+ \\ zig ast-check -t {}
+ \\
+ \\
+ , .{file.path.fmt(zcu.comp)});
var parent = anal.parent;
while (parent) |curr| {
fba.reset();
- try stderr.writeAll(" in ");
- const cur_block_file, const cur_block_src_base_node = Zcu.LazySrcLoc.resolveBaseNode(curr.block.src_base_inst, zcu) orelse {
- const cur_block_file = zcu.fileByIndex(curr.block.src_base_inst.resolveFile(&zcu.intern_pool));
- try writeFilePath(cur_block_file, stderr);
- try stderr.writeAll("\n > [lost instruction; this should not happen]\n");
+ const cur_block_file = zcu.fileByIndex(curr.block.src_base_inst.resolveFile(&zcu.intern_pool));
+ try stderr.print(" in {}\n", .{cur_block_file.path.fmt(zcu.comp)});
+ _, const cur_block_src_base_node = Zcu.LazySrcLoc.resolveBaseNode(curr.block.src_base_inst, zcu) orelse {
+ try stderr.writeAll(" > [lost instruction; this should not happen]\n");
parent = curr.parent;
continue;
};
- try writeFilePath(cur_block_file, stderr);
- try stderr.writeAll("\n > ");
+ try stderr.writeAll(" > ");
print_zir.renderSingleInstruction(
allocator,
curr.body[curr.body_index],
@@ -146,18 +144,6 @@ fn dumpStatusReport() !void {
var crash_heap: [16 * 4096]u8 = undefined;
-fn writeFilePath(file: *Zcu.File, writer: anytype) !void {
- if (file.mod.root.root_dir.path) |path| {
- try writer.writeAll(path);
- try writer.writeAll(std.fs.path.sep_str);
- }
- if (file.mod.root.sub_path.len > 0) {
- try writer.writeAll(file.mod.root.sub_path);
- try writer.writeAll(std.fs.path.sep_str);
- }
- try writer.writeAll(file.sub_file_path);
-}
-
pub fn compilerPanic(msg: []const u8, maybe_ret_addr: ?usize) noreturn {
@branchHint(.cold);
PanicSwitch.preDispatch();
src/InternPool.zig
@@ -1723,6 +1723,19 @@ pub const FileIndex = enum(u32) {
.index = @intFromEnum(file_index) & ip.getIndexMask(u32),
};
}
+ pub fn toOptional(i: FileIndex) Optional {
+ return @enumFromInt(@intFromEnum(i));
+ }
+ pub const Optional = enum(u32) {
+ none = std.math.maxInt(u32),
+ _,
+ pub fn unwrap(opt: Optional) ?FileIndex {
+ return switch (opt) {
+ .none => null,
+ _ => @enumFromInt(@intFromEnum(opt)),
+ };
+ }
+ };
};
const File = struct {
src/introspect.zig
@@ -1,15 +1,18 @@
const std = @import("std");
const builtin = @import("builtin");
const mem = std.mem;
+const Allocator = mem.Allocator;
const os = std.os;
const fs = std.fs;
+const Cache = std.Build.Cache;
const Compilation = @import("Compilation.zig");
+const Package = @import("Package.zig");
const build_options = @import("build_options");
/// Returns the sub_path that worked, or `null` if none did.
/// The path of the returned Directory is relative to `base`.
/// The handle of the returned Directory is open.
-fn testZigInstallPrefix(base_dir: fs.Dir) ?Compilation.Directory {
+fn testZigInstallPrefix(base_dir: fs.Dir) ?Cache.Directory {
const test_index_file = "std" ++ fs.path.sep_str ++ "std.zig";
zig_dir: {
@@ -21,7 +24,7 @@ fn testZigInstallPrefix(base_dir: fs.Dir) ?Compilation.Directory {
break :zig_dir;
};
file.close();
- return Compilation.Directory{ .handle = test_zig_dir, .path = lib_zig };
+ return .{ .handle = test_zig_dir, .path = lib_zig };
}
// Try lib/std/std.zig
@@ -31,37 +34,50 @@ fn testZigInstallPrefix(base_dir: fs.Dir) ?Compilation.Directory {
return null;
};
file.close();
- return Compilation.Directory{ .handle = test_zig_dir, .path = "lib" };
-}
-
-/// This is a small wrapper around selfExePathAlloc that adds support for WASI
-/// based on a hard-coded Preopen directory ("/zig")
-pub fn findZigExePath(allocator: mem.Allocator) ![]u8 {
- if (builtin.os.tag == .wasi) {
- @compileError("this function is unsupported on WASI");
- }
-
- return fs.selfExePathAlloc(allocator);
+ return .{ .handle = test_zig_dir, .path = "lib" };
}
/// Both the directory handle and the path are newly allocated resources which the caller now owns.
-pub fn findZigLibDir(gpa: mem.Allocator) !Compilation.Directory {
- const self_exe_path = try findZigExePath(gpa);
+pub fn findZigLibDir(gpa: Allocator) !Cache.Directory {
+ const cwd_path = try getResolvedCwd(gpa);
+ defer gpa.free(cwd_path);
+ const self_exe_path = try fs.selfExePathAlloc(gpa);
defer gpa.free(self_exe_path);
- return findZigLibDirFromSelfExe(gpa, self_exe_path);
+ return findZigLibDirFromSelfExe(gpa, cwd_path, self_exe_path);
}
-/// Both the directory handle and the path are newly allocated resources which the caller now owns.
-pub fn findZigLibDirFromSelfExe(
- allocator: mem.Allocator,
- self_exe_path: []const u8,
-) error{
+/// Like `std.process.getCwdAlloc`, but also resolves the path with `std.fs.path.resolve`. This
+/// means the path has no repeated separators, no "." or ".." components, and no trailing separator.
+/// On WASI, "" is returned instead of ".".
+pub fn getResolvedCwd(gpa: Allocator) error{
OutOfMemory,
- FileNotFound,
CurrentWorkingDirectoryUnlinked,
Unexpected,
-}!Compilation.Directory {
+}![]u8 {
+ if (builtin.target.os.tag == .wasi) {
+ if (std.debug.runtime_safety) {
+ const cwd = try std.process.getCwdAlloc(gpa);
+ defer gpa.free(cwd);
+ std.debug.assert(mem.eql(u8, cwd, "."));
+ }
+ return "";
+ }
+ const cwd = try std.process.getCwdAlloc(gpa);
+ defer gpa.free(cwd);
+ const resolved = try fs.path.resolve(gpa, &.{cwd});
+ std.debug.assert(fs.path.isAbsolute(resolved));
+ return resolved;
+}
+
+/// Both the directory handle and the path are newly allocated resources which the caller now owns.
+pub fn findZigLibDirFromSelfExe(
+ allocator: Allocator,
+ /// The return value of `getResolvedCwd`.
+ /// Passed as an argument to avoid pointlessly repeating the call.
+ cwd_path: []const u8,
+ self_exe_path: []const u8,
+) error{ OutOfMemory, FileNotFound }!Cache.Directory {
const cwd = fs.cwd();
var cur_path: []const u8 = self_exe_path;
while (fs.path.dirname(cur_path)) |dirname| : (cur_path = dirname) {
@@ -69,18 +85,20 @@ pub fn findZigLibDirFromSelfExe(
defer base_dir.close();
const sub_directory = testZigInstallPrefix(base_dir) orelse continue;
- const p = try fs.path.join(allocator, &[_][]const u8{ dirname, sub_directory.path.? });
+ const p = try fs.path.join(allocator, &.{ dirname, sub_directory.path.? });
defer allocator.free(p);
- return Compilation.Directory{
+
+ const resolved = try resolvePath(allocator, cwd_path, &.{p});
+ return .{
.handle = sub_directory.handle,
- .path = try resolvePath(allocator, p),
+ .path = if (resolved.len == 0) null else resolved,
};
}
return error.FileNotFound;
}
/// Caller owns returned memory.
-pub fn resolveGlobalCacheDir(allocator: mem.Allocator) ![]u8 {
+pub fn resolveGlobalCacheDir(allocator: Allocator) ![]u8 {
if (builtin.os.tag == .wasi)
@compileError("on WASI the global cache dir must be resolved with preopens");
@@ -91,56 +109,107 @@ pub fn resolveGlobalCacheDir(allocator: mem.Allocator) ![]u8 {
if (builtin.os.tag != .windows) {
if (std.zig.EnvVar.XDG_CACHE_HOME.getPosix()) |cache_root| {
if (cache_root.len > 0) {
- return fs.path.join(allocator, &[_][]const u8{ cache_root, appname });
+ return fs.path.join(allocator, &.{ cache_root, appname });
}
}
if (std.zig.EnvVar.HOME.getPosix()) |home| {
- return fs.path.join(allocator, &[_][]const u8{ home, ".cache", appname });
+ return fs.path.join(allocator, &.{ home, ".cache", appname });
}
}
return fs.getAppDataDir(allocator, appname);
}
-/// Similar to std.fs.path.resolve, with a few important differences:
-/// * If the input is an absolute path, check it against the cwd and try to
-/// convert it to a relative path.
-/// * If the resulting path would start with a relative up-dir ("../"), instead
-/// return an absolute path based on the cwd.
-/// * When targeting WASI, fail with an error message if an absolute path is
-/// used.
+/// Similar to `fs.path.resolve`, but converts to a cwd-relative path, or, if that would
+/// start with a relative up-dir (".."), an absolute path based on the cwd. Also, the cwd
+/// returns the empty string ("") instead of ".".
pub fn resolvePath(
- ally: mem.Allocator,
- p: []const u8,
-) error{
- OutOfMemory,
- CurrentWorkingDirectoryUnlinked,
- Unexpected,
-}![]u8 {
- if (fs.path.isAbsolute(p)) {
- const cwd_path = try std.process.getCwdAlloc(ally);
- defer ally.free(cwd_path);
- const relative = try fs.path.relative(ally, cwd_path, p);
- if (isUpDir(relative)) {
- ally.free(relative);
- return ally.dupe(u8, p);
- } else {
- return relative;
+ gpa: Allocator,
+ /// The return value of `getResolvedCwd`.
+ /// Passed as an argument to avoid pointlessly repeating the call.
+ cwd_resolved: []const u8,
+ paths: []const []const u8,
+) Allocator.Error![]u8 {
+ if (builtin.target.os.tag == .wasi) {
+ std.debug.assert(mem.eql(u8, cwd_resolved, ""));
+ const res = try fs.path.resolve(gpa, paths);
+ if (mem.eql(u8, res, ".")) {
+ gpa.free(res);
+ return "";
}
+ return res;
+ }
+
+ // Heuristic for a fast path: if no component is absolute and ".." never appears, we just need to resolve `paths`.
+ for (paths) |p| {
+ if (fs.path.isAbsolute(p)) break; // absolute path
+ if (mem.indexOf(u8, p, "..") != null) break; // may contain up-dir
} else {
- const resolved = try fs.path.resolve(ally, &.{p});
- if (isUpDir(resolved)) {
- ally.free(resolved);
- const cwd_path = try std.process.getCwdAlloc(ally);
- defer ally.free(cwd_path);
- return fs.path.resolve(ally, &.{ cwd_path, p });
- } else {
- return resolved;
+ // no absolute path, no "..".
+ const res = try fs.path.resolve(gpa, paths);
+ if (mem.eql(u8, res, ".")) {
+ gpa.free(res);
+ return "";
}
+ std.debug.assert(!fs.path.isAbsolute(res));
+ std.debug.assert(!isUpDir(res));
+ return res;
+ }
+
+ // The fast path failed; resolve the whole thing.
+ // Optimization: `paths` often has just one element.
+ const path_resolved = switch (paths.len) {
+ 0 => unreachable,
+ 1 => try fs.path.resolve(gpa, &.{ cwd_resolved, paths[0] }),
+ else => r: {
+ const all_paths = try gpa.alloc([]const u8, paths.len + 1);
+ defer gpa.free(all_paths);
+ all_paths[0] = cwd_resolved;
+ @memcpy(all_paths[1..], paths);
+ break :r try fs.path.resolve(gpa, all_paths);
+ },
+ };
+ errdefer gpa.free(path_resolved);
+
+ std.debug.assert(fs.path.isAbsolute(path_resolved));
+ std.debug.assert(fs.path.isAbsolute(cwd_resolved));
+
+ if (!std.mem.startsWith(u8, path_resolved, cwd_resolved)) return path_resolved; // not in cwd
+ if (path_resolved.len == cwd_resolved.len) {
+ // equal to cwd
+ gpa.free(path_resolved);
+ return "";
}
+ if (path_resolved[cwd_resolved.len] != std.fs.path.sep) return path_resolved; // not in cwd (last component differs)
+
+ // in cwd; extract sub path
+ const sub_path = try gpa.dupe(u8, path_resolved[cwd_resolved.len + 1 ..]);
+ gpa.free(path_resolved);
+ return sub_path;
}
/// TODO move this to std.fs.path
pub fn isUpDir(p: []const u8) bool {
return mem.startsWith(u8, p, "..") and (p.len == 2 or p[2] == fs.path.sep);
}
+
+pub const default_local_zig_cache_basename = ".zig-cache";
+
+/// Searches upwards from `cwd` for a directory containing a `build.zig` file.
+/// If such a directory is found, returns the path to it joined to the `.zig_cache` name.
+/// Otherwise, returns `null`, indicating no suitable local cache location.
+pub fn resolveSuitableLocalCacheDir(arena: Allocator, cwd: []const u8) Allocator.Error!?[]u8 {
+ var cur_dir = cwd;
+ while (true) {
+ const joined = try fs.path.join(arena, &.{ cur_dir, Package.build_zig_basename });
+ if (fs.cwd().access(joined, .{})) |_| {
+ return try fs.path.join(arena, &.{ cur_dir, default_local_zig_cache_basename });
+ } else |err| switch (err) {
+ error.FileNotFound => {
+ cur_dir = fs.path.dirname(cur_dir) orelse return null;
+ continue;
+ },
+ else => return null,
+ }
+ }
+}
src/link.zig
@@ -1675,8 +1675,8 @@ pub fn spawnLld(
const rand_int = std.crypto.random.int(u64);
const rsp_path = "tmp" ++ s ++ std.fmt.hex(rand_int) ++ ".rsp";
- const rsp_file = try comp.local_cache_directory.handle.createFileZ(rsp_path, .{});
- defer comp.local_cache_directory.handle.deleteFileZ(rsp_path) catch |err|
+ const rsp_file = try comp.dirs.local_cache.handle.createFileZ(rsp_path, .{});
+ defer comp.dirs.local_cache.handle.deleteFileZ(rsp_path) catch |err|
log.warn("failed to delete response file {s}: {s}", .{ rsp_path, @errorName(err) });
{
defer rsp_file.close();
@@ -1700,7 +1700,7 @@ pub fn spawnLld(
var rsp_child = std.process.Child.init(&.{ argv[0], argv[1], try std.fmt.allocPrint(
arena,
"@{s}",
- .{try comp.local_cache_directory.join(arena, &.{rsp_path})},
+ .{try comp.dirs.local_cache.join(arena, &.{rsp_path})},
) }, arena);
if (comp.clang_passthrough_mode) {
rsp_child.stdin_behavior = .Inherit;
src/main.zig
@@ -63,19 +63,7 @@ pub fn wasi_cwd() std.os.wasi.fd_t {
return cwd_fd;
}
-fn getWasiPreopen(name: []const u8) Directory {
- return .{
- .path = name,
- .handle = .{
- .fd = wasi_preopens.find(name) orelse fatal("WASI preopen not found: '{s}'", .{name}),
- },
- };
-}
-
-pub fn fatal(comptime format: []const u8, args: anytype) noreturn {
- std.log.err(format, args);
- process.exit(1);
-}
+const fatal = std.process.fatal;
/// Shaming all the locations that inappropriately use an O(N) search algorithm.
/// Please delete this and fix the compilation errors!
@@ -136,7 +124,6 @@ const debug_usage = normal_usage ++
;
const usage = if (build_options.enable_debug_extensions) debug_usage else normal_usage;
-const default_local_zig_cache_basename = ".zig-cache";
var log_scopes: std.ArrayListUnmanaged([]const u8) = .empty;
@@ -377,13 +364,13 @@ fn mainArgs(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
dev.check(.help_command);
return io.getStdOut().writeAll(usage);
} else if (mem.eql(u8, cmd, "ast-check")) {
- return cmdAstCheck(gpa, arena, cmd_args);
+ return cmdAstCheck(arena, cmd_args);
} else if (mem.eql(u8, cmd, "detect-cpu")) {
- return cmdDetectCpu(gpa, arena, cmd_args);
+ return cmdDetectCpu(cmd_args);
} else if (build_options.enable_debug_extensions and mem.eql(u8, cmd, "changelist")) {
- return cmdChangelist(gpa, arena, cmd_args);
+ return cmdChangelist(arena, cmd_args);
} else if (build_options.enable_debug_extensions and mem.eql(u8, cmd, "dump-zir")) {
- return cmdDumpZir(gpa, arena, cmd_args);
+ return cmdDumpZir(arena, cmd_args);
} else if (build_options.enable_debug_extensions and mem.eql(u8, cmd, "llvm-ints")) {
return cmdDumpLlvmInts(gpa, arena, cmd_args);
} else {
@@ -809,7 +796,8 @@ const Framework = struct {
};
const CliModule = struct {
- paths: Package.Module.CreateOptions.Paths,
+ root_path: []const u8,
+ root_src_path: []const u8,
cc_argv: []const []const u8,
inherited: Package.Module.CreateOptions.Inherited,
target_arch_os_abi: ?[]const u8,
@@ -976,7 +964,7 @@ fn buildOutputType(
// error output consistent. "root" is special.
var create_module: CreateModule = .{
// Populated just before the call to `createModule`.
- .global_cache_directory = undefined,
+ .dirs = undefined,
.object_format = null,
.dynamic_linker = null,
.modules = .{},
@@ -1859,7 +1847,7 @@ fn buildOutputType(
} else root_src_file = arg;
},
.def, .unknown => {
- if (std.ascii.eqlIgnoreCase(".xml", std.fs.path.extension(arg))) {
+ if (std.ascii.eqlIgnoreCase(".xml", fs.path.extension(arg))) {
warn("embedded manifest files must have the extension '.manifest'", .{});
}
fatal("unrecognized file extension of parameter '{s}'", .{arg});
@@ -2924,13 +2912,14 @@ fn buildOutputType(
}
implicit_root_mod: {
- const unresolved_src_path = b: {
+ const src_path = b: {
if (root_src_file) |src_path| {
if (create_module.modules.count() != 0) {
- fatal("main module provided both by '-M{s}={}{s}' and by positional argument '{s}'", .{
+ fatal("main module provided both by '-M{s}={s}{c}{s}' and by positional argument '{s}'", .{
create_module.modules.keys()[0],
- create_module.modules.values()[0].paths.root,
- create_module.modules.values()[0].paths.root_src_path,
+ create_module.modules.values()[0].root_path,
+ fs.path.sep,
+ create_module.modules.values()[0].root_src_path,
src_path,
});
}
@@ -2987,20 +2976,14 @@ fn buildOutputType(
if (mod_opts.error_tracing == true)
create_module.opts.any_error_tracing = true;
- const src_path = try introspect.resolvePath(arena, unresolved_src_path);
const name = switch (arg_mode) {
.zig_test => "test",
.build, .cc, .cpp, .translate_c, .zig_test_obj, .run => fs.path.stem(fs.path.basename(src_path)),
};
try create_module.modules.put(arena, name, .{
- .paths = .{
- .root = .{
- .root_dir = Cache.Directory.cwd(),
- .sub_path = fs.path.dirname(src_path) orelse "",
- },
- .root_src_path = fs.path.basename(src_path),
- },
+ .root_path = fs.path.dirname(src_path) orelse ".",
+ .root_src_path = fs.path.basename(src_path),
.cc_argv = try cc_argv.toOwnedSlice(arena),
.inherited = mod_opts,
.target_arch_os_abi = target_arch_os_abi,
@@ -3036,85 +3019,50 @@ fn buildOutputType(
});
}
- const self_exe_path: ?[]const u8 = if (!process.can_spawn)
- null
- else
- introspect.findZigExePath(arena) catch |err| {
+ const self_exe_path = switch (native_os) {
+ .wasi => {},
+ else => fs.selfExePathAlloc(arena) catch |err| {
fatal("unable to find zig self exe path: {s}", .{@errorName(err)});
- };
-
- var zig_lib_directory: Directory = d: {
- if (override_lib_dir) |unresolved_lib_dir| {
- const lib_dir = try introspect.resolvePath(arena, unresolved_lib_dir);
- break :d .{
- .path = lib_dir,
- .handle = fs.cwd().openDir(lib_dir, .{}) catch |err| {
- fatal("unable to open zig lib directory '{s}': {s}", .{ lib_dir, @errorName(err) });
- },
- };
- } else if (native_os == .wasi) {
- break :d getWasiPreopen("/lib");
- } else if (self_exe_path) |p| {
- break :d introspect.findZigLibDirFromSelfExe(arena, p) catch |err| {
- fatal("unable to find zig installation directory '{s}': {s}", .{ p, @errorName(err) });
- };
- } else {
- unreachable;
- }
+ },
};
- defer zig_lib_directory.handle.close();
- var global_cache_directory: Directory = l: {
- if (override_global_cache_dir) |p| {
- break :l .{
- .handle = try fs.cwd().makeOpenPath(p, .{}),
- .path = p,
+ // This `init` calls `fatal` on error.
+ var dirs: Compilation.Directories = .init(
+ arena,
+ override_lib_dir,
+ override_global_cache_dir,
+ s: {
+ if (override_local_cache_dir) |p| break :s .{ .override = p };
+ break :s switch (arg_mode) {
+ .run => .global,
+ else => .search,
};
- }
- if (native_os == .wasi) {
- break :l getWasiPreopen("/cache");
- }
- const p = try introspect.resolveGlobalCacheDir(arena);
- break :l .{
- .handle = try fs.cwd().makeOpenPath(p, .{}),
- .path = p,
- };
- };
- defer global_cache_directory.handle.close();
+ },
+ if (native_os == .wasi) wasi_preopens,
+ self_exe_path,
+ );
+ defer dirs.deinit();
if (linker_optimization) |o| {
warn("ignoring deprecated linker optimization setting '{s}'", .{o});
}
- create_module.global_cache_directory = global_cache_directory;
+ create_module.dirs = dirs;
create_module.opts.emit_llvm_ir = emit_llvm_ir != .no;
create_module.opts.emit_llvm_bc = emit_llvm_bc != .no;
create_module.opts.emit_bin = emit_bin != .no;
create_module.opts.any_c_source_files = create_module.c_source_files.items.len != 0;
- var builtin_modules: std.StringHashMapUnmanaged(*Package.Module) = .empty;
- // `builtin_modules` allocated into `arena`, so no deinit
- const main_mod = try createModule(gpa, arena, &create_module, 0, null, zig_lib_directory, &builtin_modules, color);
+ const main_mod = try createModule(gpa, arena, &create_module, 0, null, color);
for (create_module.modules.keys(), create_module.modules.values()) |key, cli_mod| {
if (cli_mod.resolved == null)
fatal("module '{s}' declared but not used", .{key});
}
- // When you're testing std, the main module is std. In that case,
- // we'll just set the std module to the main one, since avoiding
- // the errors caused by duplicating it is more effort than it's
- // worth.
- const main_mod_is_std = m: {
- const std_path = try fs.path.resolve(arena, &.{
- zig_lib_directory.path orelse ".", "std", "std.zig",
- });
- const main_path = try fs.path.resolve(arena, &.{
- main_mod.root.root_dir.path orelse ".",
- main_mod.root.sub_path,
- main_mod.root_src_path,
- });
- break :m mem.eql(u8, main_path, std_path);
- };
+ // When you're testing std, the main module is std, and we need to avoid duplicating the module.
+ const main_mod_is_std = main_mod.root.root == .zig_lib and
+ mem.eql(u8, main_mod.root.sub_path, "std") and
+ mem.eql(u8, main_mod.root_src_path, "std.zig");
const std_mod = m: {
if (main_mod_is_std) break :m main_mod;
@@ -3126,12 +3074,8 @@ fn buildOutputType(
.zig_test, .zig_test_obj => root_mod: {
const test_mod = if (test_runner_path) |test_runner| test_mod: {
const test_mod = try Package.Module.create(arena, .{
- .global_cache_directory = global_cache_directory,
.paths = .{
- .root = .{
- .root_dir = Cache.Directory.cwd(),
- .sub_path = fs.path.dirname(test_runner) orelse "",
- },
+ .root = try .fromUnresolved(arena, dirs, &.{fs.path.dirname(test_runner) orelse "."}),
.root_src_path = fs.path.basename(test_runner),
},
.fully_qualified_name = "root",
@@ -3139,18 +3083,12 @@ fn buildOutputType(
.inherited = .{},
.global = create_module.resolved_options,
.parent = main_mod,
- .builtin_mod = main_mod.getBuiltinDependency(),
- .builtin_modules = null, // `builtin_mod` is specified
});
test_mod.deps = try main_mod.deps.clone(arena);
break :test_mod test_mod;
} else try Package.Module.create(arena, .{
- .global_cache_directory = global_cache_directory,
.paths = .{
- .root = .{
- .root_dir = zig_lib_directory,
- .sub_path = "compiler",
- },
+ .root = try .fromRoot(arena, dirs, .zig_lib, "compiler"),
.root_src_path = "test_runner.zig",
},
.fully_qualified_name = "root",
@@ -3158,8 +3096,6 @@ fn buildOutputType(
.inherited = .{},
.global = create_module.resolved_options,
.parent = main_mod,
- .builtin_mod = main_mod.getBuiltinDependency(),
- .builtin_modules = null, // `builtin_mod` is specified
});
break :root_mod test_mod;
@@ -3469,50 +3405,6 @@ fn buildOutputType(
});
defer thread_pool.deinit();
- var cleanup_local_cache_dir: ?fs.Dir = null;
- defer if (cleanup_local_cache_dir) |*dir| dir.close();
-
- var local_cache_directory: Directory = l: {
- if (override_local_cache_dir) |local_cache_dir_path| {
- const dir = try fs.cwd().makeOpenPath(local_cache_dir_path, .{});
- cleanup_local_cache_dir = dir;
- break :l .{
- .handle = dir,
- .path = local_cache_dir_path,
- };
- }
- if (arg_mode == .run) {
- break :l global_cache_directory;
- }
-
- // search upwards from cwd until we find directory with build.zig
- const cwd_path = try process.getCwdAlloc(arena);
- var dirname: []const u8 = cwd_path;
- while (true) {
- const joined_path = try fs.path.join(arena, &.{
- dirname, Package.build_zig_basename,
- });
- if (fs.cwd().access(joined_path, .{})) |_| {
- const cache_dir_path = try fs.path.join(arena, &.{ dirname, default_local_zig_cache_basename });
- const dir = try fs.cwd().makeOpenPath(cache_dir_path, .{});
- cleanup_local_cache_dir = dir;
- break :l .{ .handle = dir, .path = cache_dir_path };
- } else |err| switch (err) {
- error.FileNotFound => {
- dirname = fs.path.dirname(dirname) orelse {
- break :l global_cache_directory;
- };
- continue;
- },
- else => break :l global_cache_directory,
- }
- }
-
- // Otherwise we really don't have a reasonable place to put the local cache directory,
- // so we utilize the global one.
- break :l global_cache_directory;
- };
-
for (create_module.c_source_files.items) |*src| {
if (!mem.eql(u8, src.src_path, "-")) continue;
@@ -3524,14 +3416,14 @@ fn buildOutputType(
const dump_path = try std.fmt.allocPrint(arena, "tmp" ++ sep ++ "{x}-dump-stdin{s}", .{
std.crypto.random.int(u64), ext.canonicalName(target),
});
- try local_cache_directory.handle.makePath("tmp");
+ try dirs.local_cache.handle.makePath("tmp");
// Note that in one of the happy paths, execve() is used to switch to
// clang in which case any cleanup logic that exists for this temporary
// file will not run and this temp file will be leaked. The filename
// will be a hash of its contents โ so multiple invocations of
// `zig cc -` will result in the same temp file name.
- var f = try local_cache_directory.handle.createFile(dump_path, .{});
+ var f = try dirs.local_cache.handle.createFile(dump_path, .{});
defer f.close();
// Re-using the hasher from Cache, since the functional requirements
@@ -3550,10 +3442,10 @@ fn buildOutputType(
std.fmt.fmtSliceHexLower(&bin_digest),
ext.canonicalName(target),
});
- try local_cache_directory.handle.rename(dump_path, sub_path);
+ try dirs.local_cache.handle.rename(dump_path, sub_path);
// Convert `sub_path` to be relative to current working directory.
- src.src_path = try local_cache_directory.join(arena, &.{sub_path});
+ src.src_path = try dirs.local_cache.join(arena, &.{sub_path});
}
if (build_options.have_llvm and emit_asm != .no) {
@@ -3595,11 +3487,12 @@ fn buildOutputType(
defer file_system_inputs.deinit(gpa);
const comp = Compilation.create(gpa, arena, .{
- .zig_lib_directory = zig_lib_directory,
- .local_cache_directory = local_cache_directory,
- .global_cache_directory = global_cache_directory,
+ .dirs = dirs,
.thread_pool = &thread_pool,
- .self_exe_path = self_exe_path,
+ .self_exe_path = switch (native_os) {
+ .wasi => null,
+ else => self_exe_path,
+ },
.config = create_module.resolved_options,
.root_name = root_name,
.sysroot = create_module.sysroot,
@@ -3757,14 +3650,17 @@ fn buildOutputType(
error.ExportTableAndImportTableConflict => {
fatal("--import-table and --export-table may not be used together", .{});
},
+ error.IllegalZigImport => {
+ fatal("this compiler implementation does not support importing the root source file of a provided module", .{});
+ },
else => fatal("unable to create compilation: {s}", .{@errorName(err)}),
};
var comp_destroyed = false;
defer if (!comp_destroyed) comp.destroy();
if (show_builtin) {
- const builtin_mod = comp.root_mod.getBuiltinDependency();
- const source = builtin_mod.builtin_file.?.source.?;
+ const builtin_opts = comp.root_mod.getBuiltinOptions(comp.config);
+ const source = try builtin_opts.generate(arena);
return std.io.getStdOut().writeAll(source);
}
switch (listen) {
@@ -3844,7 +3740,7 @@ fn buildOutputType(
c_code_directory.path orelse ".", c_code_loc.basename,
});
try test_exec_args.appendSlice(arena, &.{ self_exe_path, "run" });
- if (zig_lib_directory.path) |p| {
+ if (dirs.zig_lib.path) |p| {
try test_exec_args.appendSlice(arena, &.{ "-I", p });
}
@@ -3875,7 +3771,7 @@ fn buildOutputType(
gpa,
arena,
test_exec_args.items,
- self_exe_path.?,
+ self_exe_path,
arg_mode,
&target,
&comp_destroyed,
@@ -3890,7 +3786,7 @@ fn buildOutputType(
}
const CreateModule = struct {
- global_cache_directory: Cache.Directory,
+ dirs: Compilation.Directories,
modules: std.StringArrayHashMapUnmanaged(CliModule),
opts: Compilation.Config.Options,
dynamic_linker: ?[]const u8,
@@ -3937,8 +3833,6 @@ fn createModule(
create_module: *CreateModule,
index: usize,
parent: ?*Package.Module,
- zig_lib_directory: Cache.Directory,
- builtin_modules: *std.StringHashMapUnmanaged(*Package.Module),
color: std.zig.Color,
) Allocator.Error!*Package.Module {
const cli_mod = &create_module.modules.values()[index];
@@ -4069,7 +3963,7 @@ fn createModule(
}
if (target.isMinGW()) {
- const exists = mingw.libExists(arena, target, zig_lib_directory, lib_name) catch |err| {
+ const exists = mingw.libExists(arena, target, create_module.dirs.zig_lib, lib_name) catch |err| {
fatal("failed to check zig installation for DLL import libs: {s}", .{
@errorName(err),
});
@@ -4225,17 +4119,19 @@ fn createModule(
};
}
+ const root: Compilation.Path = try .fromUnresolved(arena, create_module.dirs, &.{cli_mod.root_path});
+
const mod = Package.Module.create(arena, .{
- .global_cache_directory = create_module.global_cache_directory,
- .paths = cli_mod.paths,
+ .paths = .{
+ .root = root,
+ .root_src_path = cli_mod.root_src_path,
+ },
.fully_qualified_name = name,
.cc_argv = cli_mod.cc_argv,
.inherited = cli_mod.inherited,
.global = create_module.resolved_options,
.parent = parent,
- .builtin_mod = null,
- .builtin_modules = builtin_modules,
}) catch |err| switch (err) {
error.ValgrindUnsupportedOnTarget => fatal("unable to create module '{s}': valgrind does not support the selected target CPU architecture", .{name}),
error.TargetRequiresSingleThreaded => fatal("unable to create module '{s}': the selected target does not support multithreading", .{name}),
@@ -4258,7 +4154,7 @@ fn createModule(
for (cli_mod.deps) |dep| {
const dep_index = create_module.modules.getIndex(dep.value) orelse
fatal("module '{s}' depends on non-existent module '{s}'", .{ name, dep.key });
- const dep_mod = try createModule(gpa, arena, create_module, dep_index, mod, zig_lib_directory, builtin_modules, color);
+ const dep_mod = try createModule(gpa, arena, create_module, dep_index, mod, color);
try mod.deps.put(arena, dep.key, dep_mod);
}
@@ -4544,15 +4440,13 @@ fn runOrTestHotSwap(
// tmp zig-cache and use it to spawn the child process. This way we are free to update
// the binary with each requested hot update.
.windows => blk: {
- try lf.emit.root_dir.handle.copyFile(lf.emit.sub_path, comp.local_cache_directory.handle, lf.emit.sub_path, .{});
- break :blk try fs.path.join(gpa, &[_][]const u8{
- comp.local_cache_directory.path orelse ".", lf.emit.sub_path,
- });
+ try lf.emit.root_dir.handle.copyFile(lf.emit.sub_path, comp.dirs.local_cache.handle, lf.emit.sub_path, .{});
+ break :blk try fs.path.join(gpa, &.{ comp.dirs.local_cache.path orelse ".", lf.emit.sub_path });
},
// A naive `directory.join` here will indeed get the correct path to the binary,
// however, in the case of cwd, we actually want `./foo` so that the path can be executed.
- else => try fs.path.join(gpa, &[_][]const u8{
+ else => try fs.path.join(gpa, &.{
lf.emit.root_dir.path orelse ".", lf.emit.sub_path,
}),
};
@@ -4679,7 +4573,7 @@ fn cmdTranslateC(
},
}
- var zig_cache_tmp_dir = try comp.local_cache_directory.handle.makeOpenPath("tmp", .{});
+ var zig_cache_tmp_dir = try comp.dirs.local_cache.handle.makeOpenPath("tmp", .{});
defer zig_cache_tmp_dir.close();
const ext = Compilation.classifyFileExt(c_source_file.src_path);
@@ -4735,7 +4629,7 @@ fn cmdTranslateC(
new_argv[argv.items.len + i] = try arena.dupeZ(u8, arg);
}
- const c_headers_dir_path_z = try comp.zig_lib_directory.joinZ(arena, &[_][]const u8{"include"});
+ const c_headers_dir_path_z = try comp.dirs.zig_lib.joinZ(arena, &.{"include"});
var errors = std.zig.ErrorBundle.empty;
var tree = translate_c.translate(
comp.gpa,
@@ -4787,7 +4681,7 @@ fn cmdTranslateC(
const o_sub_path = try fs.path.join(arena, &[_][]const u8{ "o", &hex_digest });
- var o_dir = try comp.local_cache_directory.handle.makeOpenPath(o_sub_path, .{});
+ var o_dir = try comp.dirs.local_cache.handle.makeOpenPath(o_sub_path, .{});
defer o_dir.close();
var zig_file = try o_dir.createFile(translated_zig_basename, .{});
@@ -4808,9 +4702,9 @@ fn cmdTranslateC(
p.digest = bin_digest;
p.errors = std.zig.ErrorBundle.empty;
} else {
- const out_zig_path = try fs.path.join(arena, &[_][]const u8{ "o", &hex_digest, translated_zig_basename });
- const zig_file = comp.local_cache_directory.handle.openFile(out_zig_path, .{}) catch |err| {
- const path = comp.local_cache_directory.path orelse ".";
+ const out_zig_path = try fs.path.join(arena, &.{ "o", &hex_digest, translated_zig_basename });
+ const zig_file = comp.dirs.local_cache.handle.openFile(out_zig_path, .{}) catch |err| {
+ const path = comp.dirs.local_cache.path orelse ".";
fatal("unable to open cached translated zig file '{s}{s}{s}': {s}", .{ path, fs.path.sep_str, out_zig_path, @errorName(err) });
};
defer zig_file.close();
@@ -4854,7 +4748,7 @@ fn cmdInit(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
var templates = findTemplates(gpa, arena);
defer templates.deinit();
- const cwd_path = try process.getCwdAlloc(arena);
+ const cwd_path = try introspect.getResolvedCwd(arena);
const cwd_basename = fs.path.basename(cwd_path);
const sanitized_root_name = try sanitizeExampleName(arena, cwd_basename);
@@ -4952,7 +4846,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
const argv_index_exe = child_argv.items.len;
_ = try child_argv.addOne();
- const self_exe_path = try introspect.findZigExePath(arena);
+ const self_exe_path = try fs.selfExePathAlloc(arena);
try child_argv.append(self_exe_path);
const argv_index_zig_lib_dir = child_argv.items.len;
@@ -5169,60 +5063,30 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
process.raiseFileDescriptorLimit();
- var zig_lib_directory: Directory = if (override_lib_dir) |lib_dir| .{
- .path = lib_dir,
- .handle = fs.cwd().openDir(lib_dir, .{}) catch |err| {
- fatal("unable to open zig lib directory from 'zig-lib-dir' argument: '{s}': {s}", .{ lib_dir, @errorName(err) });
- },
- } else introspect.findZigLibDirFromSelfExe(arena, self_exe_path) catch |err| {
- fatal("unable to find zig installation directory '{s}': {s}", .{ self_exe_path, @errorName(err) });
- };
- defer zig_lib_directory.handle.close();
-
- const cwd_path = try process.getCwdAlloc(arena);
- child_argv.items[argv_index_zig_lib_dir] = zig_lib_directory.path orelse cwd_path;
-
+ const cwd_path = try introspect.getResolvedCwd(arena);
const build_root = try findBuildRoot(arena, .{
.cwd_path = cwd_path,
.build_file = build_file,
});
- child_argv.items[argv_index_build_file] = build_root.directory.path orelse cwd_path;
-
- var global_cache_directory: Directory = l: {
- const p = override_global_cache_dir orelse try introspect.resolveGlobalCacheDir(arena);
- const dir = fs.cwd().makeOpenPath(p, .{}) catch |err| {
- const base_msg = "unable to open or create the global Zig cache at '{s}': {s}.{s}";
- const extra = "\nIf this location is not writable then consider specifying an " ++
- "alternative with the ZIG_GLOBAL_CACHE_DIR environment variable or the " ++
- "--global-cache-dir option.";
- const show_extra = err == error.AccessDenied or err == error.ReadOnlyFileSystem;
- fatal(base_msg, .{ p, @errorName(err), if (show_extra) extra else "" });
- };
- break :l .{
- .handle = dir,
- .path = p,
- };
- };
- defer global_cache_directory.handle.close();
- child_argv.items[argv_index_global_cache_dir] = global_cache_directory.path orelse cwd_path;
-
- var local_cache_directory: Directory = l: {
- if (override_local_cache_dir) |local_cache_dir_path| {
- break :l .{
- .handle = try fs.cwd().makeOpenPath(local_cache_dir_path, .{}),
- .path = local_cache_dir_path,
- };
- }
- const cache_dir_path = try build_root.directory.join(arena, &.{default_local_zig_cache_basename});
- break :l .{
- .handle = try build_root.directory.handle.makeOpenPath(default_local_zig_cache_basename, .{}),
- .path = cache_dir_path,
- };
- };
- defer local_cache_directory.handle.close();
+ // This `init` calls `fatal` on error.
+ var dirs: Compilation.Directories = .init(
+ arena,
+ override_lib_dir,
+ override_global_cache_dir,
+ .{ .override = path: {
+ if (override_local_cache_dir) |d| break :path d;
+ break :path try build_root.directory.join(arena, &.{introspect.default_local_zig_cache_basename});
+ } },
+ {},
+ self_exe_path,
+ );
+ defer dirs.deinit();
- child_argv.items[argv_index_cache_dir] = local_cache_directory.path orelse cwd_path;
+ child_argv.items[argv_index_zig_lib_dir] = dirs.zig_lib.path orelse cwd_path;
+ child_argv.items[argv_index_build_file] = build_root.directory.path orelse cwd_path;
+ child_argv.items[argv_index_global_cache_dir] = dirs.global_cache.path orelse cwd_path;
+ child_argv.items[argv_index_cache_dir] = dirs.local_cache.path orelse cwd_path;
var thread_pool: ThreadPool = undefined;
try thread_pool.init(.{
@@ -5250,16 +5114,10 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
// big block here to ensure the cleanup gets run when we extract out our argv.
{
const main_mod_paths: Package.Module.CreateOptions.Paths = if (override_build_runner) |runner| .{
- .root = .{
- .root_dir = Cache.Directory.cwd(),
- .sub_path = fs.path.dirname(runner) orelse "",
- },
+ .root = try .fromUnresolved(arena, dirs, &.{fs.path.dirname(runner) orelse "."}),
.root_src_path = fs.path.basename(runner),
} else .{
- .root = .{
- .root_dir = zig_lib_directory,
- .sub_path = "compiler",
- },
+ .root = try .fromRoot(arena, dirs, .zig_lib, "compiler"),
.root_src_path = "build_runner.zig",
};
@@ -5272,7 +5130,6 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
});
const root_mod = try Package.Module.create(arena, .{
- .global_cache_directory = global_cache_directory,
.paths = main_mod_paths,
.fully_qualified_name = "root",
.cc_argv = &.{},
@@ -5281,16 +5138,11 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
},
.global = config,
.parent = null,
- .builtin_mod = null,
- .builtin_modules = null, // all modules will inherit this one's builtin
});
- const builtin_mod = root_mod.getBuiltinDependency();
-
const build_mod = try Package.Module.create(arena, .{
- .global_cache_directory = global_cache_directory,
.paths = .{
- .root = .{ .root_dir = build_root.directory },
+ .root = try .fromUnresolved(arena, dirs, &.{build_root.directory.path orelse "."}),
.root_src_path = build_root.build_zig_basename,
},
.fully_qualified_name = "root.@build",
@@ -5298,8 +5150,6 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
.inherited = .{},
.global = config,
.parent = root_mod,
- .builtin_mod = builtin_mod,
- .builtin_modules = null, // `builtin_mod` is specified
});
var cleanup_build_dir: ?fs.Dir = null;
@@ -5312,7 +5162,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
var job_queue: Package.Fetch.JobQueue = .{
.http_client = &http_client,
.thread_pool = &thread_pool,
- .global_cache = global_cache_directory,
+ .global_cache = dirs.global_cache,
.read_only = false,
.recursive = true,
.debug_hash = false,
@@ -5340,14 +5190,16 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
try job_queue.all_fetches.ensureUnusedCapacity(gpa, 1);
try job_queue.table.ensureUnusedCapacity(gpa, 1);
+ const phantom_package_root: Cache.Path = .{ .root_dir = build_root.directory };
+
var fetch: Package.Fetch = .{
.arena = std.heap.ArenaAllocator.init(gpa),
- .location = .{ .relative_path = build_mod.root },
+ .location = .{ .relative_path = phantom_package_root },
.location_tok = 0,
.hash_tok = .none,
.name_tok = 0,
.lazy_status = .eager,
- .parent_package_root = build_mod.root,
+ .parent_package_root = phantom_package_root,
.parent_manifest_ast = null,
.prog_node = fetch_prog_node,
.job_queue = &job_queue,
@@ -5371,7 +5223,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
job_queue.all_fetches.appendAssumeCapacity(&fetch);
job_queue.table.putAssumeCapacityNoClobber(
- Package.Fetch.relativePathDigest(build_mod.root, global_cache_directory),
+ Package.Fetch.relativePathDigest(phantom_package_root, dirs.global_cache),
&fetch,
);
@@ -5397,9 +5249,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
arena,
source_buf.items,
root_mod,
- global_cache_directory,
- local_cache_directory,
- builtin_mod,
+ dirs,
config,
);
@@ -5416,10 +5266,10 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
if (!f.has_build_zig)
continue;
const hash_slice = hash.toSlice();
+ const mod_root_path = try f.package_root.toString(arena);
const m = try Package.Module.create(arena, .{
- .global_cache_directory = global_cache_directory,
.paths = .{
- .root = try f.package_root.clone(arena),
+ .root = try .fromUnresolved(arena, dirs, &.{mod_root_path}),
.root_src_path = Package.build_zig_basename,
},
.fully_qualified_name = try std.fmt.allocPrint(
@@ -5431,8 +5281,6 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
.inherited = .{},
.global = config,
.parent = root_mod,
- .builtin_mod = builtin_mod,
- .builtin_modules = null, // `builtin_mod` is specified
});
const hash_cloned = try arena.dupe(u8, hash_slice);
deps_mod.deps.putAssumeCapacityNoClobber(hash_cloned, m);
@@ -5449,7 +5297,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
for (dep_names, man.dependencies.values()) |name, dep| {
const dep_digest = Package.Fetch.depDigest(
f.package_root,
- global_cache_directory,
+ dirs.global_cache,
dep,
) orelse continue;
const dep_mod = job_queue.table.get(dep_digest).?.module orelse continue;
@@ -5461,18 +5309,14 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
} else try createEmptyDependenciesModule(
arena,
root_mod,
- global_cache_directory,
- local_cache_directory,
- builtin_mod,
+ dirs,
config,
);
try root_mod.deps.put(arena, "@build", build_mod);
const comp = Compilation.create(gpa, arena, .{
- .zig_lib_directory = zig_lib_directory,
- .local_cache_directory = local_cache_directory,
- .global_cache_directory = global_cache_directory,
+ .dirs = dirs,
.root_name = "build",
.config = config,
.root_mod = root_mod,
@@ -5507,7 +5351,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
// above, and thus the output file is already closed.
//try comp.makeBinFileExecutable();
child_argv.items[argv_index_exe] =
- try local_cache_directory.join(arena, &.{comp.cache_use.whole.bin_sub_path.?});
+ try dirs.local_cache.join(arena, &.{comp.cache_use.whole.bin_sub_path.?});
}
if (process.can_spawn) {
@@ -5539,12 +5383,12 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
// that are missing.
const s = fs.path.sep_str;
const tmp_sub_path = "tmp" ++ s ++ results_tmp_file_nonce;
- const stdout = local_cache_directory.handle.readFileAlloc(arena, tmp_sub_path, 50 * 1024 * 1024) catch |err| {
+ const stdout = dirs.local_cache.handle.readFileAlloc(arena, tmp_sub_path, 50 * 1024 * 1024) catch |err| {
fatal("unable to read results of configure phase from '{}{s}': {s}", .{
- local_cache_directory, tmp_sub_path, @errorName(err),
+ dirs.local_cache, tmp_sub_path, @errorName(err),
});
};
- local_cache_directory.handle.deleteFile(tmp_sub_path) catch {};
+ dirs.local_cache.handle.deleteFile(tmp_sub_path) catch {};
var it = mem.splitScalar(u8, stdout, '\n');
var any_errors = false;
@@ -5633,7 +5477,7 @@ fn jitCmd(
.basename = exe_basename,
};
- const self_exe_path = introspect.findZigExePath(arena) catch |err| {
+ const self_exe_path = fs.selfExePathAlloc(arena) catch |err| {
fatal("unable to find self exe path: {s}", .{@errorName(err)});
};
@@ -5645,24 +5489,16 @@ fn jitCmd(
const override_lib_dir: ?[]const u8 = try EnvVar.ZIG_LIB_DIR.get(arena);
const override_global_cache_dir: ?[]const u8 = try EnvVar.ZIG_GLOBAL_CACHE_DIR.get(arena);
- var zig_lib_directory: Directory = if (override_lib_dir) |lib_dir| .{
- .path = lib_dir,
- .handle = fs.cwd().openDir(lib_dir, .{}) catch |err| {
- fatal("unable to open zig lib directory from 'zig-lib-dir' argument: '{s}': {s}", .{ lib_dir, @errorName(err) });
- },
- } else introspect.findZigLibDirFromSelfExe(arena, self_exe_path) catch |err| {
- fatal("unable to find zig installation directory '{s}': {s}", .{ self_exe_path, @errorName(err) });
- };
- defer zig_lib_directory.handle.close();
-
- var global_cache_directory: Directory = l: {
- const p = override_global_cache_dir orelse try introspect.resolveGlobalCacheDir(arena);
- break :l .{
- .handle = try fs.cwd().makeOpenPath(p, .{}),
- .path = p,
- };
- };
- defer global_cache_directory.handle.close();
+ // This `init` calls `fatal` on error.
+ var dirs: Compilation.Directories = .init(
+ arena,
+ override_lib_dir,
+ override_global_cache_dir,
+ .global,
+ if (native_os == .wasi) wasi_preopens,
+ self_exe_path,
+ );
+ defer dirs.deinit();
var thread_pool: ThreadPool = undefined;
try thread_pool.init(.{
@@ -5680,10 +5516,7 @@ fn jitCmd(
// big block here to ensure the cleanup gets run when we extract out our argv.
{
const main_mod_paths: Package.Module.CreateOptions.Paths = .{
- .root = .{
- .root_dir = zig_lib_directory,
- .sub_path = "compiler",
- },
+ .root = try .fromRoot(arena, dirs, .zig_lib, "compiler"),
.root_src_path = options.root_src_path,
};
@@ -5698,7 +5531,6 @@ fn jitCmd(
});
const root_mod = try Package.Module.create(arena, .{
- .global_cache_directory = global_cache_directory,
.paths = main_mod_paths,
.fully_qualified_name = "root",
.cc_argv = &.{},
@@ -5709,18 +5541,12 @@ fn jitCmd(
},
.global = config,
.parent = null,
- .builtin_mod = null,
- .builtin_modules = null, // all modules will inherit this one's builtin
});
if (options.depend_on_aro) {
const aro_mod = try Package.Module.create(arena, .{
- .global_cache_directory = global_cache_directory,
.paths = .{
- .root = .{
- .root_dir = zig_lib_directory,
- .sub_path = "compiler/aro",
- },
+ .root = try .fromRoot(arena, dirs, .zig_lib, "compiler/aro"),
.root_src_path = "aro.zig",
},
.fully_qualified_name = "aro",
@@ -5732,16 +5558,12 @@ fn jitCmd(
},
.global = config,
.parent = null,
- .builtin_mod = root_mod.getBuiltinDependency(),
- .builtin_modules = null, // `builtin_mod` is specified
});
try root_mod.deps.put(arena, "aro", aro_mod);
}
const comp = Compilation.create(gpa, arena, .{
- .zig_lib_directory = zig_lib_directory,
- .local_cache_directory = global_cache_directory,
- .global_cache_directory = global_cache_directory,
+ .dirs = dirs,
.root_name = options.cmd_name,
.config = config,
.root_mod = root_mod,
@@ -5778,16 +5600,16 @@ fn jitCmd(
};
}
- const exe_path = try global_cache_directory.join(arena, &.{comp.cache_use.whole.bin_sub_path.?});
+ const exe_path = try dirs.global_cache.join(arena, &.{comp.cache_use.whole.bin_sub_path.?});
child_argv.appendAssumeCapacity(exe_path);
}
if (options.prepend_zig_lib_dir_path)
- child_argv.appendAssumeCapacity(zig_lib_directory.path.?);
+ child_argv.appendAssumeCapacity(dirs.zig_lib.path.?);
if (options.prepend_zig_exe_path)
child_argv.appendAssumeCapacity(self_exe_path);
if (options.prepend_global_cache_path)
- child_argv.appendAssumeCapacity(global_cache_directory.path.?);
+ child_argv.appendAssumeCapacity(dirs.global_cache.path.?);
child_argv.appendSliceAssumeCapacity(args);
@@ -6270,7 +6092,6 @@ const usage_ast_check =
;
fn cmdAstCheck(
- gpa: Allocator,
arena: Allocator,
args: []const []const u8,
) !void {
@@ -6281,7 +6102,7 @@ fn cmdAstCheck(
var color: Color = .auto;
var want_output_text = false;
var force_zon = false;
- var zig_source_file: ?[]const u8 = null;
+ var zig_source_path: ?[]const u8 = null;
var i: usize = 0;
while (i < args.len) : (i += 1) {
@@ -6306,96 +6127,55 @@ fn cmdAstCheck(
} else {
fatal("unrecognized parameter: '{s}'", .{arg});
}
- } else if (zig_source_file == null) {
- zig_source_file = arg;
+ } else if (zig_source_path == null) {
+ zig_source_path = arg;
} else {
fatal("extra positional parameter: '{s}'", .{arg});
}
}
- var file: Zcu.File = .{
- .status = .never_loaded,
- .sub_file_path = undefined,
- .stat = undefined,
- .source = null,
- .tree = null,
- .zir = null,
- .zoir = null,
- .mod = undefined,
- };
- if (zig_source_file) |file_name| {
- var f = fs.cwd().openFile(file_name, .{}) catch |err| {
- fatal("unable to open file for ast-check '{s}': {s}", .{ file_name, @errorName(err) });
- };
- defer f.close();
-
- const stat = try f.stat();
-
- if (stat.size > std.zig.max_src_size)
- return error.FileTooBig;
-
- const source = try arena.allocSentinel(u8, @as(usize, @intCast(stat.size)), 0);
- const amt = try f.readAll(source);
- if (amt != stat.size)
- return error.UnexpectedEndOfFile;
-
- file.sub_file_path = file_name;
- file.source = source;
- file.stat = .{
- .size = stat.size,
- .inode = stat.inode,
- .mtime = stat.mtime,
- };
- } else {
- const stdin = io.getStdIn();
- const source = std.zig.readSourceFileToEndAlloc(arena, stdin, null) catch |err| {
- fatal("unable to read stdin: {}", .{err});
+ const display_path = zig_source_path orelse "<stdin>";
+ const source: [:0]const u8 = s: {
+ var f = if (zig_source_path) |p| file: {
+ break :file fs.cwd().openFile(p, .{}) catch |err| {
+ fatal("unable to open file '{s}' for ast-check: {s}", .{ display_path, @errorName(err) });
+ };
+ } else io.getStdIn();
+ defer if (zig_source_path != null) f.close();
+ break :s std.zig.readSourceFileToEndAlloc(arena, f, null) catch |err| {
+ fatal("unable to load file '{s}' for ast-check: {s}", .{ display_path, @errorName(err) });
};
- file.sub_file_path = "<stdin>";
- file.source = source;
- file.stat.size = source.len;
- }
+ };
const mode: Ast.Mode = mode: {
if (force_zon) break :mode .zon;
- if (zig_source_file) |name| {
- if (mem.endsWith(u8, name, ".zon")) {
+ if (zig_source_path) |path| {
+ if (mem.endsWith(u8, path, ".zon")) {
break :mode .zon;
}
}
break :mode .zig;
};
- file.mod = try Package.Module.createLimited(arena, .{
- .root = Path.cwd(),
- .root_src_path = file.sub_file_path,
- .fully_qualified_name = "root",
- });
-
- file.tree = try Ast.parse(gpa, file.source.?, mode);
- defer file.tree.?.deinit(gpa);
+ const tree = try Ast.parse(arena, source, mode);
switch (mode) {
.zig => {
- file.zir = try AstGen.generate(gpa, file.tree.?);
- defer file.zir.?.deinit(gpa);
+ const zir = try AstGen.generate(arena, tree);
- if (file.zir.?.hasCompileErrors()) {
+ if (zir.hasCompileErrors()) {
var wip_errors: std.zig.ErrorBundle.Wip = undefined;
- try wip_errors.init(gpa);
- defer wip_errors.deinit();
- try Compilation.addZirErrorMessages(&wip_errors, &file);
+ try wip_errors.init(arena);
+ try wip_errors.addZirErrorMessages(zir, tree, source, display_path);
var error_bundle = try wip_errors.toOwnedBundle("");
- defer error_bundle.deinit(gpa);
error_bundle.renderToStdErr(color.renderOptions());
-
- if (file.zir.?.loweringFailed()) {
+ if (zir.loweringFailed()) {
process.exit(1);
}
}
if (!want_output_text) {
- if (file.zir.?.hasCompileErrors()) {
+ if (zir.hasCompileErrors()) {
process.exit(1);
} else {
return cleanExit();
@@ -6407,20 +6187,20 @@ fn cmdAstCheck(
{
const token_bytes = @sizeOf(Ast.TokenList) +
- file.tree.?.tokens.len * (@sizeOf(std.zig.Token.Tag) + @sizeOf(Ast.ByteOffset));
- const tree_bytes = @sizeOf(Ast) + file.tree.?.nodes.len *
+ tree.tokens.len * (@sizeOf(std.zig.Token.Tag) + @sizeOf(Ast.ByteOffset));
+ const tree_bytes = @sizeOf(Ast) + tree.nodes.len *
(@sizeOf(Ast.Node.Tag) +
@sizeOf(Ast.TokenIndex) +
// Here we don't use @sizeOf(Ast.Node.Data) because it would include
// the debug safety tag but we want to measure release size.
8);
- const instruction_bytes = file.zir.?.instructions.len *
+ const instruction_bytes = zir.instructions.len *
// Here we don't use @sizeOf(Zir.Inst.Data) because it would include
// the debug safety tag but we want to measure release size.
(@sizeOf(Zir.Inst.Tag) + 8);
- const extra_bytes = file.zir.?.extra.len * @sizeOf(u32);
+ const extra_bytes = zir.extra.len * @sizeOf(u32);
const total_bytes = @sizeOf(Zir) + instruction_bytes + extra_bytes +
- file.zir.?.string_bytes.len * @sizeOf(u8);
+ zir.string_bytes.len * @sizeOf(u8);
const stdout = io.getStdOut();
const fmtIntSizeBin = std.fmt.fmtIntSizeBin;
// zig fmt: off
@@ -6434,44 +6214,33 @@ fn cmdAstCheck(
\\# Extra Data Items: {d} ({})
\\
, .{
- fmtIntSizeBin(file.source.?.len),
- file.tree.?.tokens.len, fmtIntSizeBin(token_bytes),
- file.tree.?.nodes.len, fmtIntSizeBin(tree_bytes),
+ fmtIntSizeBin(source.len),
+ tree.tokens.len, fmtIntSizeBin(token_bytes),
+ tree.nodes.len, fmtIntSizeBin(tree_bytes),
fmtIntSizeBin(total_bytes),
- file.zir.?.instructions.len, fmtIntSizeBin(instruction_bytes),
- fmtIntSizeBin(file.zir.?.string_bytes.len),
- file.zir.?.extra.len, fmtIntSizeBin(extra_bytes),
+ zir.instructions.len, fmtIntSizeBin(instruction_bytes),
+ fmtIntSizeBin(zir.string_bytes.len),
+ zir.extra.len, fmtIntSizeBin(extra_bytes),
});
// zig fmt: on
}
- try @import("print_zir.zig").renderAsTextToFile(gpa, &file, io.getStdOut());
+ try @import("print_zir.zig").renderAsTextToFile(arena, tree, zir, io.getStdOut());
- if (file.zir.?.hasCompileErrors()) {
+ if (zir.hasCompileErrors()) {
process.exit(1);
} else {
return cleanExit();
}
},
.zon => {
- const zoir = try ZonGen.generate(gpa, file.tree.?, .{});
- defer zoir.deinit(gpa);
-
+ const zoir = try ZonGen.generate(arena, tree, .{});
if (zoir.hasCompileErrors()) {
var wip_errors: std.zig.ErrorBundle.Wip = undefined;
- try wip_errors.init(gpa);
- defer wip_errors.deinit();
-
- {
- const src_path = try file.fullPath(gpa);
- defer gpa.free(src_path);
- try wip_errors.addZoirErrorMessages(zoir, file.tree.?, file.source.?, src_path);
- }
-
+ try wip_errors.init(arena);
+ try wip_errors.addZoirErrorMessages(zoir, tree, source, display_path);
var error_bundle = try wip_errors.toOwnedBundle("");
- defer error_bundle.deinit(gpa);
error_bundle.renderToStdErr(color.renderOptions());
-
process.exit(1);
}
@@ -6489,16 +6258,9 @@ fn cmdAstCheck(
}
}
-fn cmdDetectCpu(
- gpa: Allocator,
- arena: Allocator,
- args: []const []const u8,
-) !void {
+fn cmdDetectCpu(args: []const []const u8) !void {
dev.check(.detect_cpu_command);
- _ = gpa;
- _ = arena;
-
const detect_cpu_usage =
\\Usage: zig detect-cpu [--llvm]
\\
@@ -6676,13 +6438,11 @@ fn cmdDumpLlvmInts(
/// This is only enabled for debug builds.
fn cmdDumpZir(
- gpa: Allocator,
arena: Allocator,
args: []const []const u8,
) !void {
dev.check(.dump_zir_command);
- _ = arena;
const Zir = std.zig.Zir;
const cache_file = args[0];
@@ -6692,26 +6452,16 @@ fn cmdDumpZir(
};
defer f.close();
- var file: Zcu.File = .{
- .status = .never_loaded,
- .sub_file_path = undefined,
- .stat = undefined,
- .source = null,
- .tree = null,
- .zir = try Zcu.loadZirCache(gpa, f),
- .zoir = null,
- .mod = undefined,
- };
- defer file.zir.?.deinit(gpa);
+ const zir = try Zcu.loadZirCache(arena, f);
{
- const instruction_bytes = file.zir.?.instructions.len *
+ const instruction_bytes = zir.instructions.len *
// Here we don't use @sizeOf(Zir.Inst.Data) because it would include
// the debug safety tag but we want to measure release size.
(@sizeOf(Zir.Inst.Tag) + 8);
- const extra_bytes = file.zir.?.extra.len * @sizeOf(u32);
+ const extra_bytes = zir.extra.len * @sizeOf(u32);
const total_bytes = @sizeOf(Zir) + instruction_bytes + extra_bytes +
- file.zir.?.string_bytes.len * @sizeOf(u8);
+ zir.string_bytes.len * @sizeOf(u8);
const stdout = io.getStdOut();
const fmtIntSizeBin = std.fmt.fmtIntSizeBin;
// zig fmt: off
@@ -6723,19 +6473,18 @@ fn cmdDumpZir(
\\
, .{
fmtIntSizeBin(total_bytes),
- file.zir.?.instructions.len, fmtIntSizeBin(instruction_bytes),
- fmtIntSizeBin(file.zir.?.string_bytes.len),
- file.zir.?.extra.len, fmtIntSizeBin(extra_bytes),
+ zir.instructions.len, fmtIntSizeBin(instruction_bytes),
+ fmtIntSizeBin(zir.string_bytes.len),
+ zir.extra.len, fmtIntSizeBin(extra_bytes),
});
// zig fmt: on
}
- return @import("print_zir.zig").renderAsTextToFile(gpa, &file, io.getStdOut());
+ return @import("print_zir.zig").renderAsTextToFile(arena, null, zir, io.getStdOut());
}
/// This is only enabled for debug builds.
fn cmdChangelist(
- gpa: Allocator,
arena: Allocator,
args: []const []const u8,
) !void {
@@ -6744,101 +6493,50 @@ fn cmdChangelist(
const color: Color = .auto;
const Zir = std.zig.Zir;
- const old_source_file = args[0];
- const new_source_file = args[1];
+ const old_source_path = args[0];
+ const new_source_path = args[1];
- var f = fs.cwd().openFile(old_source_file, .{}) catch |err| {
- fatal("unable to open old source file for comparison '{s}': {s}", .{ old_source_file, @errorName(err) });
+ const old_source = source: {
+ var f = fs.cwd().openFile(old_source_path, .{}) catch |err|
+ fatal("unable to open old source file '{s}': {s}", .{ old_source_path, @errorName(err) });
+ defer f.close();
+ break :source std.zig.readSourceFileToEndAlloc(arena, f, std.zig.max_src_size) catch |err|
+ fatal("unable to read old source file '{s}': {s}", .{ old_source_path, @errorName(err) });
};
- defer f.close();
-
- const stat = try f.stat();
-
- if (stat.size > std.zig.max_src_size)
- return error.FileTooBig;
-
- var file: Zcu.File = .{
- .status = .never_loaded,
- .sub_file_path = old_source_file,
- .stat = .{
- .size = stat.size,
- .inode = stat.inode,
- .mtime = stat.mtime,
- },
- .source = null,
- .tree = null,
- .zir = null,
- .zoir = null,
- .mod = undefined,
+ const new_source = source: {
+ var f = fs.cwd().openFile(new_source_path, .{}) catch |err|
+ fatal("unable to open new source file '{s}': {s}", .{ new_source_path, @errorName(err) });
+ defer f.close();
+ break :source std.zig.readSourceFileToEndAlloc(arena, f, std.zig.max_src_size) catch |err|
+ fatal("unable to read new source file '{s}': {s}", .{ new_source_path, @errorName(err) });
};
- file.mod = try Package.Module.createLimited(arena, .{
- .root = Path.cwd(),
- .root_src_path = file.sub_file_path,
- .fully_qualified_name = "root",
- });
-
- const source = try arena.allocSentinel(u8, @as(usize, @intCast(stat.size)), 0);
- const amt = try f.readAll(source);
- if (amt != stat.size)
- return error.UnexpectedEndOfFile;
- file.source = source;
-
- file.tree = try Ast.parse(gpa, file.source.?, .zig);
- defer file.tree.?.deinit(gpa);
-
- file.zir = try AstGen.generate(gpa, file.tree.?);
- defer file.zir.?.deinit(gpa);
+ const old_tree = try Ast.parse(arena, old_source, .zig);
+ const old_zir = try AstGen.generate(arena, old_tree);
- if (file.zir.?.loweringFailed()) {
+ if (old_zir.loweringFailed()) {
var wip_errors: std.zig.ErrorBundle.Wip = undefined;
- try wip_errors.init(gpa);
- defer wip_errors.deinit();
- try Compilation.addZirErrorMessages(&wip_errors, &file);
+ try wip_errors.init(arena);
+ try wip_errors.addZirErrorMessages(old_zir, old_tree, old_source, old_source_path);
var error_bundle = try wip_errors.toOwnedBundle("");
- defer error_bundle.deinit(gpa);
error_bundle.renderToStdErr(color.renderOptions());
process.exit(1);
}
- var new_f = fs.cwd().openFile(new_source_file, .{}) catch |err| {
- fatal("unable to open new source file for comparison '{s}': {s}", .{ new_source_file, @errorName(err) });
- };
- defer new_f.close();
-
- const new_stat = try new_f.stat();
-
- if (new_stat.size > std.zig.max_src_size)
- return error.FileTooBig;
-
- const new_source = try arena.allocSentinel(u8, @as(usize, @intCast(new_stat.size)), 0);
- const new_amt = try new_f.readAll(new_source);
- if (new_amt != new_stat.size)
- return error.UnexpectedEndOfFile;
+ const new_tree = try Ast.parse(arena, new_source, .zig);
+ const new_zir = try AstGen.generate(arena, new_tree);
- var new_tree = try Ast.parse(gpa, new_source, .zig);
- defer new_tree.deinit(gpa);
-
- var old_zir = file.zir.?;
- defer old_zir.deinit(gpa);
- file.zir = null;
- file.zir = try AstGen.generate(gpa, new_tree);
-
- if (file.zir.?.loweringFailed()) {
+ if (new_zir.loweringFailed()) {
var wip_errors: std.zig.ErrorBundle.Wip = undefined;
- try wip_errors.init(gpa);
- defer wip_errors.deinit();
- try Compilation.addZirErrorMessages(&wip_errors, &file);
+ try wip_errors.init(arena);
+ try wip_errors.addZirErrorMessages(new_zir, new_tree, new_source, new_source_path);
var error_bundle = try wip_errors.toOwnedBundle("");
- defer error_bundle.deinit(gpa);
error_bundle.renderToStdErr(color.renderOptions());
process.exit(1);
}
var inst_map: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .empty;
- defer inst_map.deinit(gpa);
-
- try Zcu.mapOldZirToNew(gpa, old_zir, file.zir.?, &inst_map);
+ try Zcu.mapOldZirToNew(arena, old_zir, new_zir, &inst_map);
var bw = io.bufferedWriter(io.getStdOut().writer());
const stdout = bw.writer();
@@ -7315,7 +7013,7 @@ fn cmdFetch(
},
};
- const cwd_path = try process.getCwdAlloc(arena);
+ const cwd_path = try introspect.getResolvedCwd(arena);
var build_root = try findBuildRoot(arena, .{
.cwd_path = cwd_path,
@@ -7447,9 +7145,7 @@ fn cmdFetch(
fn createEmptyDependenciesModule(
arena: Allocator,
main_mod: *Package.Module,
- global_cache_directory: Cache.Directory,
- local_cache_directory: Cache.Directory,
- builtin_mod: *Package.Module,
+ dirs: Compilation.Directories,
global_options: Compilation.Config,
) !void {
var source = std.ArrayList(u8).init(arena);
@@ -7458,9 +7154,7 @@ fn createEmptyDependenciesModule(
arena,
source.items,
main_mod,
- global_cache_directory,
- local_cache_directory,
- builtin_mod,
+ dirs,
global_options,
);
}
@@ -7471,9 +7165,7 @@ fn createDependenciesModule(
arena: Allocator,
source: []const u8,
main_mod: *Package.Module,
- global_cache_directory: Cache.Directory,
- local_cache_directory: Cache.Directory,
- builtin_mod: *Package.Module,
+ dirs: Compilation.Directories,
global_options: Compilation.Config,
) !*Package.Module {
// Atomically create the file in a directory named after the hash of its contents.
@@ -7481,7 +7173,7 @@ fn createDependenciesModule(
const rand_int = std.crypto.random.int(u64);
const tmp_dir_sub_path = "tmp" ++ fs.path.sep_str ++ std.fmt.hex(rand_int);
{
- var tmp_dir = try local_cache_directory.handle.makeOpenPath(tmp_dir_sub_path, .{});
+ var tmp_dir = try dirs.local_cache.handle.makeOpenPath(tmp_dir_sub_path, .{});
defer tmp_dir.close();
try tmp_dir.writeFile(.{ .sub_path = basename, .data = source });
}
@@ -7493,18 +7185,14 @@ fn createDependenciesModule(
const o_dir_sub_path = try arena.dupe(u8, "o" ++ fs.path.sep_str ++ hex_digest);
try Package.Fetch.renameTmpIntoCache(
- local_cache_directory.handle,
+ dirs.local_cache.handle,
tmp_dir_sub_path,
o_dir_sub_path,
);
const deps_mod = try Package.Module.create(arena, .{
- .global_cache_directory = global_cache_directory,
.paths = .{
- .root = .{
- .root_dir = local_cache_directory,
- .sub_path = o_dir_sub_path,
- },
+ .root = try .fromRoot(arena, dirs, .local_cache, o_dir_sub_path),
.root_src_path = basename,
},
.fully_qualified_name = "root.@dependencies",
@@ -7512,8 +7200,6 @@ fn createDependenciesModule(
.cc_argv = &.{},
.inherited = .{},
.global = global_options,
- .builtin_mod = builtin_mod,
- .builtin_modules = null, // `builtin_mod` is specified
});
try main_mod.deps.put(arena, "@dependencies", deps_mod);
return deps_mod;
@@ -7536,7 +7222,7 @@ const FindBuildRootOptions = struct {
};
fn findBuildRoot(arena: Allocator, options: FindBuildRootOptions) !BuildRoot {
- const cwd_path = options.cwd_path orelse try process.getCwdAlloc(arena);
+ const cwd_path = options.cwd_path orelse try introspect.getResolvedCwd(arena);
const build_zig_basename = if (options.build_file) |bf|
fs.path.basename(bf)
else
@@ -7723,10 +7409,13 @@ const Templates = struct {
};
fn findTemplates(gpa: Allocator, arena: Allocator) Templates {
- const self_exe_path = introspect.findZigExePath(arena) catch |err| {
+ const cwd_path = introspect.getResolvedCwd(arena) catch |err| {
+ fatal("unable to get cwd: {s}", .{@errorName(err)});
+ };
+ const self_exe_path = fs.selfExePathAlloc(arena) catch |err| {
fatal("unable to find self exe path: {s}", .{@errorName(err)});
};
- var zig_lib_directory = introspect.findZigLibDirFromSelfExe(arena, self_exe_path) catch |err| {
+ var zig_lib_directory = introspect.findZigLibDirFromSelfExe(arena, cwd_path, self_exe_path) catch |err| {
fatal("unable to find zig installation directory '{s}': {s}", .{ self_exe_path, @errorName(err) });
};
@@ -7783,8 +7472,8 @@ fn handleModArg(
const gop = try create_module.modules.getOrPut(arena, mod_name);
if (gop.found_existing) {
- fatal("unable to add module '{s}': already exists as '{s}'", .{
- mod_name, gop.value_ptr.paths.root_src_path,
+ fatal("unable to add module '{s}': already exists as '{s}{c}{s}'", .{
+ mod_name, gop.value_ptr.root_path, fs.path.sep, gop.value_ptr.root_src_path,
});
}
@@ -7811,24 +7500,14 @@ fn handleModArg(
if (mod_opts.error_tracing == true)
create_module.opts.any_error_tracing = true;
+ const root_path: []const u8, const root_src_path: []const u8 = if (opt_root_src_orig) |path| root: {
+ create_module.opts.have_zcu = true;
+ break :root .{ fs.path.dirname(path) orelse ".", fs.path.basename(path) };
+ } else .{ ".", "" };
+
gop.value_ptr.* = .{
- .paths = p: {
- if (opt_root_src_orig) |root_src_orig| {
- create_module.opts.have_zcu = true;
- const root_src = try introspect.resolvePath(arena, root_src_orig);
- break :p .{
- .root = .{
- .root_dir = Cache.Directory.cwd(),
- .sub_path = fs.path.dirname(root_src) orelse "",
- },
- .root_src_path = fs.path.basename(root_src),
- };
- }
- break :p .{
- .root = .{ .root_dir = Cache.Directory.cwd() },
- .root_src_path = "",
- };
- },
+ .root_path = root_path,
+ .root_src_path = root_src_path,
.cc_argv = try cc_argv.toOwnedSlice(arena),
.inherited = mod_opts.*,
.target_arch_os_abi = target_arch_os_abi.*,
src/print_env.zig
@@ -2,13 +2,14 @@ const std = @import("std");
const build_options = @import("build_options");
const introspect = @import("introspect.zig");
const Allocator = std.mem.Allocator;
-const fatal = @import("main.zig").fatal;
+const fatal = std.process.fatal;
pub fn cmdEnv(arena: Allocator, args: []const []const u8, stdout: std.fs.File.Writer) !void {
_ = args;
- const self_exe_path = try introspect.findZigExePath(arena);
+ const cwd_path = try introspect.getResolvedCwd(arena);
+ const self_exe_path = try std.fs.selfExePathAlloc(arena);
- var zig_lib_directory = introspect.findZigLibDirFromSelfExe(arena, self_exe_path) catch |err| {
+ var zig_lib_directory = introspect.findZigLibDirFromSelfExe(arena, cwd_path, self_exe_path) catch |err| {
fatal("unable to find zig installation directory: {s}\n", .{@errorName(err)});
};
defer zig_lib_directory.handle.close();
src/print_targets.zig
@@ -3,13 +3,13 @@ const fs = std.fs;
const io = std.io;
const mem = std.mem;
const meta = std.meta;
+const fatal = std.process.fatal;
const Allocator = std.mem.Allocator;
const Target = std.Target;
const target = @import("target.zig");
const assert = std.debug.assert;
const glibc = @import("libs/glibc.zig");
const introspect = @import("introspect.zig");
-const fatal = @import("main.zig").fatal;
pub fn cmdTargets(
allocator: Allocator,
src/print_zir.zig
@@ -12,7 +12,8 @@ const LazySrcLoc = Zcu.LazySrcLoc;
/// Write human-readable, debug formatted ZIR code to a file.
pub fn renderAsTextToFile(
gpa: Allocator,
- scope_file: *Zcu.File,
+ tree: ?Ast,
+ zir: Zir,
fs_file: std.fs.File,
) !void {
var arena = std.heap.ArenaAllocator.init(gpa);
@@ -21,8 +22,8 @@ pub fn renderAsTextToFile(
var writer: Writer = .{
.gpa = gpa,
.arena = arena.allocator(),
- .file = scope_file,
- .code = scope_file.zir.?,
+ .tree = tree,
+ .code = zir,
.indent = 0,
.parent_decl_node = .root,
.recurse_decls = true,
@@ -36,18 +37,18 @@ pub fn renderAsTextToFile(
try stream.print("%{d} ", .{@intFromEnum(main_struct_inst)});
try writer.writeInstToStream(stream, main_struct_inst);
try stream.writeAll("\n");
- const imports_index = scope_file.zir.?.extra[@intFromEnum(Zir.ExtraIndex.imports)];
+ const imports_index = zir.extra[@intFromEnum(Zir.ExtraIndex.imports)];
if (imports_index != 0) {
try stream.writeAll("Imports:\n");
- const extra = scope_file.zir.?.extraData(Zir.Inst.Imports, imports_index);
+ const extra = zir.extraData(Zir.Inst.Imports, imports_index);
var extra_index = extra.end;
for (0..extra.data.imports_len) |_| {
- const item = scope_file.zir.?.extraData(Zir.Inst.Imports.Item, extra_index);
+ const item = zir.extraData(Zir.Inst.Imports.Item, extra_index);
extra_index = item.end;
- const import_path = scope_file.zir.?.nullTerminatedString(item.data.name);
+ const import_path = zir.nullTerminatedString(item.data.name);
try stream.print(" @import(\"{}\") ", .{
std.zig.fmtEscapes(import_path),
});
@@ -74,7 +75,7 @@ pub fn renderInstructionContext(
var writer: Writer = .{
.gpa = gpa,
.arena = arena.allocator(),
- .file = scope_file,
+ .tree = scope_file.tree,
.code = scope_file.zir.?,
.indent = if (indent < 2) 2 else indent,
.parent_decl_node = parent_decl_node,
@@ -106,7 +107,7 @@ pub fn renderSingleInstruction(
var writer: Writer = .{
.gpa = gpa,
.arena = arena.allocator(),
- .file = scope_file,
+ .tree = scope_file.tree,
.code = scope_file.zir.?,
.indent = indent,
.parent_decl_node = parent_decl_node,
@@ -121,7 +122,7 @@ pub fn renderSingleInstruction(
const Writer = struct {
gpa: Allocator,
arena: Allocator,
- file: *Zcu.File,
+ tree: ?Ast,
code: Zir,
indent: u32,
parent_decl_node: Ast.Node.Index,
@@ -2761,7 +2762,7 @@ const Writer = struct {
}
fn writeSrcNode(self: *Writer, stream: anytype, src_node: Ast.Node.Offset) !void {
- const tree = self.file.tree orelse return;
+ const tree = self.tree orelse return;
const abs_node = src_node.toAbsolute(self.parent_decl_node);
const src_span = tree.nodeToSpan(abs_node);
const start = self.line_col_cursor.find(tree.source, src_span.start);
@@ -2773,7 +2774,7 @@ const Writer = struct {
}
fn writeSrcTok(self: *Writer, stream: anytype, src_tok: Ast.TokenOffset) !void {
- const tree = self.file.tree orelse return;
+ const tree = self.tree orelse return;
const abs_tok = src_tok.toAbsolute(tree.firstToken(self.parent_decl_node));
const span_start = tree.tokenStart(abs_tok);
const span_end = span_start + @as(u32, @intCast(tree.tokenSlice(abs_tok).len));
@@ -2786,7 +2787,7 @@ const Writer = struct {
}
fn writeSrcTokAbs(self: *Writer, stream: anytype, src_tok: Ast.TokenIndex) !void {
- const tree = self.file.tree orelse return;
+ const tree = self.tree orelse return;
const span_start = tree.tokenStart(src_tok);
const span_end = span_start + @as(u32, @intCast(tree.tokenSlice(src_tok).len));
const start = self.line_col_cursor.find(tree.source, span_start);
src/Sema.zig
@@ -829,7 +829,7 @@ pub const Block = struct {
pub fn ownerModule(block: Block) *Package.Module {
const zcu = block.sema.pt.zcu;
- return zcu.namespacePtr(block.namespace).fileScope(zcu).mod;
+ return zcu.namespacePtr(block.namespace).fileScope(zcu).mod.?;
}
fn trackZir(block: *Block, inst: Zir.Inst.Index) Allocator.Error!InternPool.TrackedInst.Index {
@@ -1127,10 +1127,10 @@ fn analyzeBodyInner(
// The hashmap lookup in here is a little expensive, and LLVM fails to optimize it away.
if (build_options.enable_logging) {
- std.log.scoped(.sema_zir).debug("sema ZIR {s} %{d}", .{ sub_file_path: {
+ std.log.scoped(.sema_zir).debug("sema ZIR {} %{d}", .{ path: {
const file_index = block.src_base_inst.resolveFile(&zcu.intern_pool);
const file = zcu.fileByIndex(file_index);
- break :sub_file_path file.sub_file_path;
+ break :path file.path.fmt(zcu.comp);
}, inst });
}
@@ -6162,50 +6162,67 @@ fn zirCImport(sema: *Sema, parent_block: *Block, inst: Zir.Inst.Index) CompileEr
}
const parent_mod = parent_block.ownerModule();
const digest = Cache.binToHex(c_import_res.digest);
- const c_import_zig_path = try comp.arena.dupe(u8, "o" ++ std.fs.path.sep_str ++ digest);
- const c_import_mod = Package.Module.create(comp.arena, .{
- .global_cache_directory = comp.global_cache_directory,
- .paths = .{
- .root = .{
- .root_dir = comp.local_cache_directory,
- .sub_path = c_import_zig_path,
- },
- .root_src_path = "cimport.zig",
- },
- .fully_qualified_name = c_import_zig_path,
- .cc_argv = parent_mod.cc_argv,
- .inherited = .{},
- .global = comp.config,
- .parent = parent_mod,
- .builtin_mod = parent_mod.getBuiltinDependency(),
- .builtin_modules = null, // `builtin_mod` is set
- }) catch |err| switch (err) {
- // None of these are possible because we are creating a package with
- // the exact same configuration as the parent package, which already
- // passed these checks.
- error.ValgrindUnsupportedOnTarget => unreachable,
- error.TargetRequiresSingleThreaded => unreachable,
- error.BackendRequiresSingleThreaded => unreachable,
- error.TargetRequiresPic => unreachable,
- error.PieRequiresPic => unreachable,
- error.DynamicLinkingRequiresPic => unreachable,
- error.TargetHasNoRedZone => unreachable,
- error.StackCheckUnsupportedByTarget => unreachable,
- error.StackProtectorUnsupportedByTarget => unreachable,
- error.StackProtectorUnavailableWithoutLibC => unreachable,
- else => |e| return e,
- };
+ const new_file_index = file: {
+ const c_import_zig_path = try comp.arena.dupe(u8, "o" ++ std.fs.path.sep_str ++ digest);
+ const c_import_mod = Package.Module.create(comp.arena, .{
+ .paths = .{
+ .root = try .fromRoot(comp.arena, comp.dirs, .local_cache, c_import_zig_path),
+ .root_src_path = "cimport.zig",
+ },
+ .fully_qualified_name = c_import_zig_path,
+ .cc_argv = parent_mod.cc_argv,
+ .inherited = .{},
+ .global = comp.config,
+ .parent = parent_mod,
+ }) catch |err| switch (err) {
+ // None of these are possible because we are creating a package with
+ // the exact same configuration as the parent package, which already
+ // passed these checks.
+ error.ValgrindUnsupportedOnTarget => unreachable,
+ error.TargetRequiresSingleThreaded => unreachable,
+ error.BackendRequiresSingleThreaded => unreachable,
+ error.TargetRequiresPic => unreachable,
+ error.PieRequiresPic => unreachable,
+ error.DynamicLinkingRequiresPic => unreachable,
+ error.TargetHasNoRedZone => unreachable,
+ error.StackCheckUnsupportedByTarget => unreachable,
+ error.StackProtectorUnsupportedByTarget => unreachable,
+ error.StackProtectorUnavailableWithoutLibC => unreachable,
- const result = pt.importPkg(c_import_mod) catch |err|
- return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)});
-
- const path_digest = zcu.filePathDigest(result.file_index);
- pt.updateFile(result.file, path_digest) catch |err|
+ else => |e| return e,
+ };
+ const c_import_file_path: Compilation.Path = try c_import_mod.root.join(gpa, comp.dirs, "cimport.zig");
+ errdefer c_import_file_path.deinit(gpa);
+ const c_import_file = try gpa.create(Zcu.File);
+ errdefer gpa.destroy(c_import_file);
+ const c_import_file_index = try zcu.intern_pool.createFile(gpa, pt.tid, .{
+ .bin_digest = c_import_file_path.digest(),
+ .file = c_import_file,
+ .root_type = .none,
+ });
+ c_import_file.* = .{
+ .status = .never_loaded,
+ .stat = undefined,
+ .is_builtin = false,
+ .path = c_import_file_path,
+ .source = null,
+ .tree = null,
+ .zir = null,
+ .zoir = null,
+ .mod = c_import_mod,
+ .sub_file_path = "cimport.zig",
+ .module_changed = false,
+ .prev_zir = null,
+ .zoir_invalidated = false,
+ };
+ break :file c_import_file_index;
+ };
+ pt.updateFile(new_file_index, zcu.fileByIndex(new_file_index)) catch |err|
return sema.fail(&child_block, src, "C import failed: {s}", .{@errorName(err)});
- try pt.ensureFileAnalyzed(result.file_index);
- const ty = zcu.fileRootType(result.file_index);
+ try pt.ensureFileAnalyzed(new_file_index);
+ const ty = zcu.fileRootType(new_file_index);
try sema.declareDependency(.{ .interned = ty });
try sema.addTypeReferenceEntry(src, ty);
return Air.internedToRef(ty);
@@ -14097,25 +14114,19 @@ fn zirImport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
const operand_src = block.tokenOffset(inst_data.src_tok);
const operand = sema.code.nullTerminatedString(extra.path);
- const result = pt.importFile(block.getFileScope(zcu), operand) catch |err| switch (err) {
- error.ImportOutsideModulePath => {
- return sema.fail(block, operand_src, "import of file outside module path: '{s}'", .{operand});
- },
- error.ModuleNotFound => {
- return sema.fail(block, operand_src, "no module named '{s}' available within module {s}", .{
- operand, block.getFileScope(zcu).mod.fully_qualified_name,
- });
- },
- else => {
- // TODO: these errors are file system errors; make sure an update() will
- // retry this and not cache the file system error, which may be transient.
- return sema.fail(block, operand_src, "unable to open '{s}': {s}", .{ operand, @errorName(err) });
- },
+ const result = pt.doImport(block.getFileScope(zcu), operand) catch |err| switch (err) {
+ error.ModuleNotFound => return sema.fail(block, operand_src, "no module named '{s}' available within module '{s}'", .{
+ operand, block.getFileScope(zcu).mod.?.fully_qualified_name,
+ }),
+ error.IllegalZigImport => unreachable, // caught before semantic analysis
+ error.OutOfMemory => |e| return e,
};
- switch (result.file.getMode()) {
+ const file_index = result.file;
+ const file = zcu.fileByIndex(file_index);
+ switch (file.getMode()) {
.zig => {
- try pt.ensureFileAnalyzed(result.file_index);
- const ty = zcu.fileRootType(result.file_index);
+ try pt.ensureFileAnalyzed(file_index);
+ const ty = zcu.fileRootType(file_index);
try sema.declareDependency(.{ .interned = ty });
try sema.addTypeReferenceEntry(operand_src, ty);
return Air.internedToRef(ty);
@@ -14129,11 +14140,11 @@ fn zirImport(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
break :b res_ty.toIntern();
};
- try sema.declareDependency(.{ .zon_file = result.file_index });
+ try sema.declareDependency(.{ .zon_file = file_index });
const interned = try LowerZon.run(
sema,
- result.file,
- result.file_index,
+ file,
+ file_index,
res_ty,
operand_src,
block,
@@ -17290,10 +17301,10 @@ fn zirClosureGet(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat
const name = name: {
// TODO: we should probably store this name in the ZIR to avoid this complexity.
const file, const src_base_node = Zcu.LazySrcLoc.resolveBaseNode(block.src_base_inst, zcu).?;
- const tree = file.getTree(sema.gpa) catch |err| {
+ const tree = file.getTree(zcu) catch |err| {
// In this case we emit a warning + a less precise source location.
- log.warn("unable to load {s}: {s}", .{
- file.sub_file_path, @errorName(err),
+ log.warn("unable to load {}: {s}", .{
+ file.path.fmt(zcu.comp), @errorName(err),
});
break :name null;
};
@@ -17318,10 +17329,10 @@ fn zirClosureGet(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstDat
const msg = msg: {
const name = name: {
const file, const src_base_node = Zcu.LazySrcLoc.resolveBaseNode(block.src_base_inst, zcu).?;
- const tree = file.getTree(sema.gpa) catch |err| {
+ const tree = file.getTree(zcu) catch |err| {
// In this case we emit a warning + a less precise source location.
- log.warn("unable to load {s}: {s}", .{
- file.sub_file_path, @errorName(err),
+ log.warn("unable to load {}: {s}", .{
+ file.path.fmt(zcu.comp), @errorName(err),
});
break :name null;
};
@@ -17415,7 +17426,7 @@ fn zirBuiltinSrc(
};
const module_name_val = v: {
- const module_name = file_scope.mod.fully_qualified_name;
+ const module_name = file_scope.mod.?.fully_qualified_name;
const array_ty = try pt.intern(.{ .array_type = .{
.len = module_name.len,
.sentinel = .zero_u8,
src/Zcu.zig
@@ -72,9 +72,9 @@ sema_prog_node: std.Progress.Node = std.Progress.Node.none,
codegen_prog_node: std.Progress.Node = std.Progress.Node.none,
/// Used by AstGen worker to load and store ZIR cache.
-global_zir_cache: Compilation.Directory,
+global_zir_cache: Cache.Directory,
/// Used by AstGen worker to load and store ZIR cache.
-local_zir_cache: Compilation.Directory,
+local_zir_cache: Cache.Directory,
/// This is where all `Export` values are stored. Not all values here are necessarily valid exports;
/// to enumerate all exports, `single_exports` and `multi_exports` must be consulted.
@@ -93,27 +93,72 @@ multi_exports: std.AutoArrayHashMapUnmanaged(AnalUnit, extern struct {
len: u32,
}) = .{},
+/// Key is the digest returned by `Builtin.hash`; value is the corresponding module.
+builtin_modules: std.AutoArrayHashMapUnmanaged(Cache.BinDigest, *Package.Module) = .empty,
+
+/// Populated as soon as the `Compilation` is created. Guaranteed to contain all modules, even builtin ones.
+/// Modules whose root file is not a Zig or ZON file have the value `.none`.
+module_roots: std.AutoArrayHashMapUnmanaged(*Package.Module, File.Index.Optional) = .empty,
+
/// The set of all the Zig source files in the Zig Compilation Unit. Tracked in
/// order to iterate over it and check which source files have been modified on
/// the file system when an update is requested, as well as to cache `@import`
/// results.
///
-/// Keys are fully resolved file paths. This table owns the keys and values.
+/// Always accessed through `ImportTableAdapter`, where keys are fully resolved
+/// file paths in order to ensure files are properly deduplicated. This table owns
+/// the keys and values.
///
/// Protected by Compilation's mutex.
///
/// Not serialized. This state is reconstructed during the first call to
/// `Compilation.update` of the process for a given `Compilation`.
-///
-/// Indexes correspond 1:1 to `files`.
-import_table: std.StringArrayHashMapUnmanaged(File.Index) = .empty,
+import_table: std.ArrayHashMapUnmanaged(
+ File.Index,
+ void,
+ struct {
+ pub const hash = @compileError("all accesses should be through ImportTableAdapter");
+ pub const eql = @compileError("all accesses should be through ImportTableAdapter");
+ },
+ true, // This is necessary! Without it, the map tries to use its Context to rehash. #21918
+) = .empty,
+
+/// The set of all files in `import_table` which are "alive" this update, meaning
+/// they are reachable by traversing imports starting from an analysis root. This
+/// is usually all files in `import_table`, but some could be omitted if an incremental
+/// update removes an import, or if a module specified on the CLI is never imported.
+/// Reconstructed on every update, after AstGen and before Sema.
+/// Value is why the file is alive.
+alive_files: std.AutoArrayHashMapUnmanaged(File.Index, File.Reference) = .empty,
+
+/// If this is populated, a "file exists in multiple modules" error should be emitted.
+/// This causes file errors to not be shown, because we don't really know which files
+/// should be alive (because the user has messed up their imports somewhere!).
+/// Cleared and recomputed every update, after AstGen and before Sema.
+multi_module_err: ?struct {
+ file: File.Index,
+ modules: [2]*Package.Module,
+ refs: [2]File.Reference,
+} = null,
/// The set of all the files which have been loaded with `@embedFile` in the Module.
/// We keep track of this in order to iterate over it and check which files have been
/// modified on the file system when an update is requested, as well as to cache
/// `@embedFile` results.
-/// Keys are fully resolved file paths. This table owns the keys and values.
-embed_table: std.StringArrayHashMapUnmanaged(*EmbedFile) = .empty,
+///
+/// Like `import_table`, this is accessed through `EmbedTableAdapter`, so that it is keyed
+/// on the `Compilation.Path` of the `EmbedFile`.
+///
+/// This table owns all of the `*EmbedFile` memory, which is allocated into gpa.
+embed_table: std.ArrayHashMapUnmanaged(
+ *EmbedFile,
+ void,
+ struct {
+ pub const hash = @compileError("all accesses should be through EmbedTableAdapter");
+ pub const eql = @compileError("all accesses should be through EmbedTableAdapter");
+ },
+ true, // This is necessary! Without it, the map tries to use its Context to rehash. #21918
+) = .empty,
/// Stores all Type and Value objects.
/// The idea is that this will be periodically garbage-collected, but such logic
@@ -147,9 +192,41 @@ compile_logs: std.AutoArrayHashMapUnmanaged(AnalUnit, extern struct {
}) = .empty,
compile_log_lines: std.ArrayListUnmanaged(CompileLogLine) = .empty,
free_compile_log_lines: std.ArrayListUnmanaged(CompileLogLine.Index) = .empty,
-/// Using a map here for consistency with the other fields here.
-/// The ErrorMsg memory is owned by the `File`, using Module's general purpose allocator.
-failed_files: std.AutoArrayHashMapUnmanaged(*File, ?*ErrorMsg) = .empty,
+/// This tracks files which triggered errors when generating AST/ZIR/ZOIR.
+/// If not `null`, the value is a retryable error (the file status is guaranteed
+/// to be `.retryable_failure`). Otherwise, the file status is `.astgen_failure`
+/// or `.success`, and there are ZIR/ZOIR errors which should be printed.
+/// We just store a `[]u8` instead of a full `*ErrorMsg`, because the source
+/// location is always the entire file. The `[]u8` memory is owned by the map
+/// and allocated into `gpa`.
+failed_files: std.AutoArrayHashMapUnmanaged(File.Index, ?[]u8) = .empty,
+/// AstGen is not aware of modules, and so cannot determine whether an import
+/// string makes sense. That is the job of a traversal after AstGen.
+///
+/// There are several ways in which an import can fail:
+///
+/// * It is an import of a file which does not exist. This case is not handled
+/// by this field, but with a `failed_files` entry on the *imported* file.
+/// * It is an import of a module which does not exist in the current module's
+/// dependency table. This happens at `Sema` time, so is not tracked by this
+/// field.
+/// * It is an import which reaches outside of the current module's root
+/// directory. This is tracked by this field.
+/// * It is an import which reaches into an "illegal import directory". Right now,
+/// the only such directory is 'global_cache/b/', but in general, these are
+/// directories the compiler treats specially. This is tracked by this field.
+///
+/// This is a flat array containing all of the relevant errors. It is cleared and
+/// recomputed on every update. The errors here are fatal, i.e. they block any
+/// semantic analysis this update.
+///
+/// Allocated into gpa.
+failed_imports: std.ArrayListUnmanaged(struct {
+ file_index: File.Index,
+ import_string: Zir.NullTerminatedString,
+ import_token: Ast.TokenIndex,
+ kind: enum { file_outside_module_root, illegal_zig_import },
+}) = .empty,
failed_exports: std.AutoArrayHashMapUnmanaged(Export.Index, *ErrorMsg) = .empty,
/// If analysis failed due to a cimport error, the corresponding Clang errors
/// are stored here.
@@ -235,6 +312,32 @@ generation: u32 = 0,
pub const PerThread = @import("Zcu/PerThread.zig");
+pub const ImportTableAdapter = struct {
+ zcu: *const Zcu,
+ pub fn hash(ctx: ImportTableAdapter, path: Compilation.Path) u32 {
+ _ = ctx;
+ return @truncate(std.hash.Wyhash.hash(@intFromEnum(path.root), path.sub_path));
+ }
+ pub fn eql(ctx: ImportTableAdapter, a_path: Compilation.Path, b_file: File.Index, b_index: usize) bool {
+ _ = b_index;
+ const b_path = ctx.zcu.fileByIndex(b_file).path;
+ return a_path.root == b_path.root and mem.eql(u8, a_path.sub_path, b_path.sub_path);
+ }
+};
+
+pub const EmbedTableAdapter = struct {
+ pub fn hash(ctx: EmbedTableAdapter, path: Compilation.Path) u32 {
+ _ = ctx;
+ return @truncate(std.hash.Wyhash.hash(@intFromEnum(path.root), path.sub_path));
+ }
+ pub fn eql(ctx: EmbedTableAdapter, a_path: Compilation.Path, b_file: *EmbedFile, b_index: usize) bool {
+ _ = ctx;
+ _ = b_index;
+ const b_path = b_file.path;
+ return a_path.root == b_path.root and mem.eql(u8, a_path.sub_path, b_path.sub_path);
+ }
+};
+
/// Names of declarations in `std.builtin` whose values are memoized in a `BuiltinDecl.Memoized`.
/// The name must exactly match the declaration name, as comptime logic is used to compute the namespace accesses.
/// Parent namespaces must be before their children in this enum. For instance, `.Type` must be before `.@"Type.Fn"`.
@@ -732,41 +835,61 @@ pub const Namespace = struct {
};
pub const File = struct {
- /// Relative to the owning package's root source directory.
- /// Memory is stored in gpa, owned by File.
- sub_file_path: []const u8,
-
status: enum {
/// We have not yet attempted to load this file.
/// `stat` is not populated and may be `undefined`.
never_loaded,
/// A filesystem access failed. It should be retried on the next update.
- /// There is a `failed_files` entry containing a non-`null` message.
+ /// There is guaranteed to be a `failed_files` entry with at least one message.
+ /// ZIR/ZOIR errors should not be emitted as `zir`/`zoir` is not up-to-date.
/// `stat` is not populated and may be `undefined`.
retryable_failure,
- /// Parsing/AstGen/ZonGen of this file has failed.
- /// There is an error in `zir` or `zoir`.
- /// There is a `failed_files` entry (with a `null` message).
+ /// This file has failed parsing, AstGen, or ZonGen.
+ /// There is guaranteed to be a `failed_files` entry, which may or may not have messages.
+ /// ZIR/ZOIR errors *should* be emitted as `zir`/`zoir` is up-to-date.
/// `stat` is populated.
astgen_failure,
/// Parsing and AstGen/ZonGen of this file has succeeded.
+ /// There may still be a `failed_files` entry, e.g. for non-fatal AstGen errors.
/// `stat` is populated.
success,
},
/// Whether this is populated depends on `status`.
stat: Cache.File.Stat,
+ /// Whether this file is the generated file of a "builtin" module. This matters because those
+ /// files are generated and stored in-nemory rather than being read off-disk. The rest of the
+ /// pipeline generally shouldn't care about this.
+ is_builtin: bool,
+
+ /// The path of this file. It is important that this path has a "canonical form" because files
+ /// are deduplicated based on path; `Compilation.Path` guarantees this. Owned by this `File`,
+ /// allocated into `gpa`.
+ path: Compilation.Path,
+
source: ?[:0]const u8,
tree: ?Ast,
zir: ?Zir,
zoir: ?Zoir,
/// Module that this file is a part of, managed externally.
- mod: *Package.Module,
- /// Whether this file is a part of multiple packages. This is an error condition which will be reported after AstGen.
- multi_pkg: bool = false,
- /// List of references to this file, used for multi-package errors.
- references: std.ArrayListUnmanaged(File.Reference) = .empty,
+ /// This is initially `null`. After AstGen, a pass is run to determine which module each
+ /// file belongs to, at which point this field is set. It is never set to `null` again;
+ /// this is so that if the file starts belonging to a different module instead, we can
+ /// tell, and invalidate dependencies as needed (see `module_changed`).
+ /// During semantic analysis, this is always non-`null` for alive files (i.e. those which
+ /// have imports targeting them).
+ mod: ?*Package.Module,
+ /// Relative to the root directory of `mod`. If `mod == null`, this field is `undefined`.
+ /// This memory is managed externally and must not be directly freed.
+ /// Its lifetime is at least equal to that of this `File`.
+ sub_file_path: []const u8,
+
+ /// If this file's module identity changes on an incremental update, this flag is set to signal
+ /// to `Zcu.updateZirRefs` that all references to this file must be invalidated. This matters
+ /// because changing your module changes things like your optimization mode and codegen flags,
+ /// so everything needs to be re-done. `updateZirRefs` is responsible for resetting this flag.
+ module_changed: bool,
/// The ZIR for this file from the last update with no file failures. As such, this ZIR is never
/// failed (although it may have compile errors).
@@ -777,7 +900,7 @@ pub const File = struct {
///
/// In other words, if `TrackedInst`s are tied to ZIR other than what's in the `zir` field, this
/// field is populated with that old ZIR.
- prev_zir: ?*Zir = null,
+ prev_zir: ?*Zir,
/// This field serves a similar purpose to `prev_zir`, but for ZOIR. However, since we do not
/// need to map old ZOIR to new ZOIR -- instead only invalidating dependencies if the ZOIR
@@ -785,27 +908,42 @@ pub const File = struct {
///
/// When `zoir` is updated, this field is set to `true`. In `updateZirRefs`, if this is `true`,
/// we invalidate the corresponding `zon_file` dependency, and reset it to `false`.
- zoir_invalidated: bool = false,
+ zoir_invalidated: bool,
+
+ pub const Path = struct {
+ root: enum {
+ cwd,
+ fs_root,
+ local_cache,
+ global_cache,
+ lib_dir,
+ },
+ };
/// A single reference to a file.
pub const Reference = union(enum) {
- /// The file is imported directly (i.e. not as a package) with @import.
+ analysis_root: *Package.Module,
import: struct {
- file: File.Index,
- token: Ast.TokenIndex,
+ importer: Zcu.File.Index,
+ tok: Ast.TokenIndex,
+ /// If the file is imported as the root of a module, this is that module.
+ /// `null` means the file was imported directly by path.
+ module: ?*Package.Module,
},
- /// The file is the root of a module.
- root: *Package.Module,
};
pub fn getMode(self: File) Ast.Mode {
- if (std.mem.endsWith(u8, self.sub_file_path, ".zon")) {
+ // We never create a `File` whose path doesn't give a mode.
+ return modeFromPath(self.path.sub_path).?;
+ }
+
+ pub fn modeFromPath(path: []const u8) ?Ast.Mode {
+ if (std.mem.endsWith(u8, path, ".zon")) {
return .zon;
- } else if (std.mem.endsWith(u8, self.sub_file_path, ".zig")) {
+ } else if (std.mem.endsWith(u8, path, ".zig")) {
return .zig;
} else {
- // `Module.importFile` rejects all other extensions
- unreachable;
+ return null;
}
}
@@ -842,15 +980,18 @@ pub const File = struct {
stat: Cache.File.Stat,
};
- pub fn getSource(file: *File, gpa: Allocator) !Source {
+ pub fn getSource(file: *File, zcu: *const Zcu) !Source {
+ const gpa = zcu.gpa;
+
if (file.source) |source| return .{
.bytes = source,
.stat = file.stat,
};
- // Keep track of inode, file size, mtime, hash so we can detect which files
- // have been modified when an incremental update is requested.
- var f = try file.mod.root.openFile(file.sub_file_path, .{});
+ var f = f: {
+ const dir, const sub_path = file.path.openInfo(zcu.comp.dirs);
+ break :f try dir.openFile(sub_path, .{});
+ };
defer f.close();
const stat = try f.stat();
@@ -882,28 +1023,14 @@ pub const File = struct {
};
}
- pub fn getTree(file: *File, gpa: Allocator) !*const Ast {
+ pub fn getTree(file: *File, zcu: *const Zcu) !*const Ast {
if (file.tree) |*tree| return tree;
- const source = try file.getSource(gpa);
- file.tree = try .parse(gpa, source.bytes, file.getMode());
+ const source = try file.getSource(zcu);
+ file.tree = try .parse(zcu.gpa, source.bytes, file.getMode());
return &file.tree.?;
}
- pub fn getZoir(file: *File, zcu: *Zcu) !*const Zoir {
- if (file.zoir) |*zoir| return zoir;
-
- const tree = file.tree.?;
- assert(tree.mode == .zon);
-
- file.zoir = try ZonGen.generate(zcu.gpa, tree, .{});
- if (file.zoir.?.hasCompileErrors()) {
- try zcu.failed_files.putNoClobber(zcu.gpa, file, null);
- return error.AnalysisFail;
- }
- return &file.zoir.?;
- }
-
pub fn fullyQualifiedNameLen(file: File) usize {
const ext = std.fs.path.extension(file.sub_file_path);
return file.sub_file_path.len - ext.len;
@@ -937,85 +1064,49 @@ pub const File = struct {
return ip.getOrPutTrailingString(gpa, pt.tid, @intCast(slice[0].len), .no_embedded_nulls);
}
- pub fn fullPath(file: File, ally: Allocator) ![]u8 {
- return file.mod.root.joinString(ally, file.sub_file_path);
- }
-
- pub fn dumpSrc(file: *File, src: LazySrcLoc) void {
- const loc = std.zig.findLineColumn(file.source.bytes, src);
- std.debug.print("{s}:{d}:{d}\n", .{ file.sub_file_path, loc.line + 1, loc.column + 1 });
- }
-
- /// Add a reference to this file during AstGen.
- pub fn addReference(file: *File, zcu: *Zcu, ref: File.Reference) !void {
- // Don't add the same module root twice. Note that since we always add module roots at the
- // front of the references array (see below), this loop is actually O(1) on valid code.
- if (ref == .root) {
- for (file.references.items) |other| {
- switch (other) {
- .root => |r| if (ref.root == r) return,
- else => break, // reached the end of the "is-root" references
- }
- }
- }
-
- switch (ref) {
- // We put root references at the front of the list both to make the above loop fast and
- // to make multi-module errors more helpful (since "root-of" notes are generally more
- // informative than "imported-from" notes). This path is hit very rarely, so the speed
- // of the insert operation doesn't matter too much.
- .root => try file.references.insert(zcu.gpa, 0, ref),
-
- // Other references we'll just put at the end.
- else => try file.references.append(zcu.gpa, ref),
- }
+ pub const Index = InternPool.FileIndex;
- const mod = switch (ref) {
- .import => |import| zcu.fileByIndex(import.file).mod,
- .root => |mod| mod,
- };
- if (mod != file.mod) file.multi_pkg = true;
+ pub fn errorBundleWholeFileSrc(
+ file: *File,
+ zcu: *const Zcu,
+ eb: *std.zig.ErrorBundle.Wip,
+ ) !std.zig.ErrorBundle.SourceLocationIndex {
+ return eb.addSourceLocation(.{
+ .src_path = try eb.printString("{}", .{file.path.fmt(zcu.comp)}),
+ .span_start = 0,
+ .span_main = 0,
+ .span_end = 0,
+ .line = 0,
+ .column = 0,
+ .source_line = 0,
+ });
}
-
- /// Mark this file and every file referenced by it as multi_pkg and report an
- /// astgen_failure error for them. AstGen must have completed in its entirety.
- pub fn recursiveMarkMultiPkg(file: *File, pt: Zcu.PerThread) void {
- file.multi_pkg = true;
- file.status = .astgen_failure;
-
- // We can only mark children as failed if the ZIR is loaded, which may not
- // be the case if there were other astgen failures in this file
- if (file.zir == null) return;
-
- const imports_index = file.zir.?.extra[@intFromEnum(Zir.ExtraIndex.imports)];
- if (imports_index == 0) return;
- const extra = file.zir.?.extraData(Zir.Inst.Imports, imports_index);
-
- var extra_index = extra.end;
- for (0..extra.data.imports_len) |_| {
- const item = file.zir.?.extraData(Zir.Inst.Imports.Item, extra_index);
- extra_index = item.end;
-
- const import_path = file.zir.?.nullTerminatedString(item.data.name);
- if (mem.eql(u8, import_path, "builtin")) continue;
-
- const res = pt.importFile(file, import_path) catch continue;
- if (!res.is_pkg and !res.file.multi_pkg) {
- res.file.recursiveMarkMultiPkg(pt);
- }
- }
+ pub fn errorBundleTokenSrc(
+ file: *File,
+ tok: Ast.TokenIndex,
+ zcu: *const Zcu,
+ eb: *std.zig.ErrorBundle.Wip,
+ ) !std.zig.ErrorBundle.SourceLocationIndex {
+ const source = try file.getSource(zcu);
+ const tree = try file.getTree(zcu);
+ const start = tree.tokenStart(tok);
+ const end = start + tree.tokenSlice(tok).len;
+ const loc = std.zig.findLineColumn(source.bytes, start);
+ return eb.addSourceLocation(.{
+ .src_path = try eb.printString("{}", .{file.path.fmt(zcu.comp)}),
+ .span_start = start,
+ .span_main = start,
+ .span_end = @intCast(end),
+ .line = @intCast(loc.line),
+ .column = @intCast(loc.column),
+ .source_line = try eb.addString(loc.source_line),
+ });
}
-
- pub const Index = InternPool.FileIndex;
};
/// Represents the contents of a file loaded with `@embedFile`.
pub const EmbedFile = struct {
- /// Module that this file is a part of, managed externally.
- owner: *Package.Module,
- /// Relative to the owning module's root directory.
- sub_file_path: InternPool.NullTerminatedString,
-
+ path: Compilation.Path,
/// `.none` means the file was not loaded, so `stat` is undefined.
val: InternPool.Index,
/// If this is `null` and `val` is `.none`, the file has never been loaded.
@@ -1025,7 +1116,7 @@ pub const EmbedFile = struct {
pub const Index = enum(u32) {
_,
pub fn get(idx: Index, zcu: *const Zcu) *EmbedFile {
- return zcu.embed_table.values()[@intFromEnum(idx)];
+ return zcu.embed_table.keys()[@intFromEnum(idx)];
}
};
};
@@ -1103,32 +1194,31 @@ pub const SrcLoc = struct {
pub const Span = Ast.Span;
- pub fn span(src_loc: SrcLoc, gpa: Allocator) !Span {
+ pub fn span(src_loc: SrcLoc, zcu: *const Zcu) !Span {
switch (src_loc.lazy) {
.unneeded => unreachable,
- .entire_file => return Span{ .start = 0, .end = 1, .main = 0 },
.byte_abs => |byte_index| return Span{ .start = byte_index, .end = byte_index + 1, .main = byte_index },
.token_abs => |tok_index| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const start = tree.tokenStart(tok_index);
const end = start + @as(u32, @intCast(tree.tokenSlice(tok_index).len));
return Span{ .start = start, .end = end, .main = start };
},
.node_abs => |node| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
return tree.nodeToSpan(node);
},
.byte_offset => |byte_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const tok_index = src_loc.baseSrcToken();
const start = tree.tokenStart(tok_index) + byte_off;
const end = start + @as(u32, @intCast(tree.tokenSlice(tok_index).len));
return Span{ .start = start, .end = end, .main = start };
},
.token_offset => |tok_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const tok_index = tok_off.toAbsolute(src_loc.baseSrcToken());
const start = tree.tokenStart(tok_index);
const end = start + @as(u32, @intCast(tree.tokenSlice(tok_index).len));
@@ -1136,23 +1226,23 @@ pub const SrcLoc = struct {
},
.node_offset => |traced_off| {
const node_off = traced_off.x;
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
return tree.nodeToSpan(node);
},
.node_offset_main_token => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
const main_token = tree.nodeMainToken(node);
return tree.tokensToSpan(main_token, main_token, main_token);
},
.node_offset_bin_op => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
return tree.nodeToSpan(node);
},
.node_offset_initializer => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
return tree.tokensToSpan(
tree.firstToken(node) - 3,
@@ -1161,7 +1251,7 @@ pub const SrcLoc = struct {
);
},
.node_offset_var_decl_ty => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
const full = switch (tree.nodeTag(node)) {
.global_var_decl,
@@ -1183,7 +1273,7 @@ pub const SrcLoc = struct {
return Span{ .start = start, .end = end, .main = start };
},
.node_offset_var_decl_align => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
var buf: [1]Ast.Node.Index = undefined;
const align_node = if (tree.fullVarDecl(node)) |v|
@@ -1195,7 +1285,7 @@ pub const SrcLoc = struct {
return tree.nodeToSpan(align_node);
},
.node_offset_var_decl_section => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
var buf: [1]Ast.Node.Index = undefined;
const section_node = if (tree.fullVarDecl(node)) |v|
@@ -1207,7 +1297,7 @@ pub const SrcLoc = struct {
return tree.nodeToSpan(section_node);
},
.node_offset_var_decl_addrspace => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
var buf: [1]Ast.Node.Index = undefined;
const addrspace_node = if (tree.fullVarDecl(node)) |v|
@@ -1219,7 +1309,7 @@ pub const SrcLoc = struct {
return tree.nodeToSpan(addrspace_node);
},
.node_offset_var_decl_init => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
const init_node = switch (tree.nodeTag(node)) {
.global_var_decl,
@@ -1233,14 +1323,14 @@ pub const SrcLoc = struct {
return tree.nodeToSpan(init_node);
},
.node_offset_builtin_call_arg => |builtin_arg| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const node = builtin_arg.builtin_call_node.toAbsolute(src_loc.base_node);
var buf: [2]Ast.Node.Index = undefined;
const params = tree.builtinCallParams(&buf, node).?;
return tree.nodeToSpan(params[builtin_arg.arg_index]);
},
.node_offset_ptrcast_operand => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
var node = node_off.toAbsolute(src_loc.base_node);
while (true) {
@@ -1273,7 +1363,7 @@ pub const SrcLoc = struct {
return tree.nodeToSpan(node);
},
.node_offset_array_access_index => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
return tree.nodeToSpan(tree.nodeData(node).node_and_node[1]);
},
@@ -1282,7 +1372,7 @@ pub const SrcLoc = struct {
.node_offset_slice_end,
.node_offset_slice_sentinel,
=> |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
const full = tree.fullSlice(node).?;
const part_node = switch (src_loc.lazy) {
@@ -1295,14 +1385,14 @@ pub const SrcLoc = struct {
return tree.nodeToSpan(part_node);
},
.node_offset_call_func => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
var buf: [1]Ast.Node.Index = undefined;
const full = tree.fullCall(&buf, node).?;
return tree.nodeToSpan(full.ast.fn_expr);
},
.node_offset_field_name => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
var buf: [1]Ast.Node.Index = undefined;
const tok_index = switch (tree.nodeTag(node)) {
@@ -1326,7 +1416,7 @@ pub const SrcLoc = struct {
return Span{ .start = start, .end = end, .main = start };
},
.node_offset_field_name_init => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
const tok_index = tree.firstToken(node) - 2;
const start = tree.tokenStart(tok_index);
@@ -1334,18 +1424,18 @@ pub const SrcLoc = struct {
return Span{ .start = start, .end = end, .main = start };
},
.node_offset_deref_ptr => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
return tree.nodeToSpan(node);
},
.node_offset_asm_source => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
const full = tree.fullAsm(node).?;
return tree.nodeToSpan(full.ast.template);
},
.node_offset_asm_ret_ty => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
const full = tree.fullAsm(node).?;
const asm_output = full.outputs[0];
@@ -1353,7 +1443,7 @@ pub const SrcLoc = struct {
},
.node_offset_if_cond => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
const src_node = switch (tree.nodeTag(node)) {
.if_simple,
@@ -1381,14 +1471,14 @@ pub const SrcLoc = struct {
return tree.nodeToSpan(src_node);
},
.for_input => |for_input| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const node = for_input.for_node_offset.toAbsolute(src_loc.base_node);
const for_full = tree.fullFor(node).?;
const src_node = for_full.ast.inputs[for_input.input_index];
return tree.nodeToSpan(src_node);
},
.for_capture_from_input => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const input_node = node_off.toAbsolute(src_loc.base_node);
// We have to actually linear scan the whole AST to find the for loop
// that contains this input.
@@ -1429,7 +1519,7 @@ pub const SrcLoc = struct {
} else unreachable;
},
.call_arg => |call_arg| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const node = call_arg.call_node_offset.toAbsolute(src_loc.base_node);
var buf: [2]Ast.Node.Index = undefined;
const call_full = tree.fullCall(buf[0..1], node) orelse {
@@ -1466,7 +1556,7 @@ pub const SrcLoc = struct {
return tree.nodeToSpan(call_full.ast.params[call_arg.arg_index]);
},
.fn_proto_param, .fn_proto_param_type => |fn_proto_param| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const node = fn_proto_param.fn_proto_node_offset.toAbsolute(src_loc.base_node);
var buf: [1]Ast.Node.Index = undefined;
const full = tree.fullFnProto(&buf, node).?;
@@ -1494,17 +1584,17 @@ pub const SrcLoc = struct {
unreachable;
},
.node_offset_bin_lhs => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
return tree.nodeToSpan(tree.nodeData(node).node_and_node[0]);
},
.node_offset_bin_rhs => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
return tree.nodeToSpan(tree.nodeData(node).node_and_node[1]);
},
.array_cat_lhs, .array_cat_rhs => |cat| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const node = cat.array_cat_offset.toAbsolute(src_loc.base_node);
const arr_node = if (src_loc.lazy == .array_cat_lhs)
tree.nodeData(node).node_and_node[0]
@@ -1530,20 +1620,20 @@ pub const SrcLoc = struct {
},
.node_offset_try_operand => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
return tree.nodeToSpan(tree.nodeData(node).node);
},
.node_offset_switch_operand => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
const condition, _ = tree.nodeData(node).node_and_extra;
return tree.nodeToSpan(condition);
},
.node_offset_switch_special_prong => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const switch_node = node_off.toAbsolute(src_loc.base_node);
_, const extra_index = tree.nodeData(switch_node).node_and_extra;
const case_nodes = tree.extraDataSlice(tree.extraData(extra_index, Ast.Node.SubRange), Ast.Node.Index);
@@ -1560,7 +1650,7 @@ pub const SrcLoc = struct {
},
.node_offset_switch_range => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const switch_node = node_off.toAbsolute(src_loc.base_node);
_, const extra_index = tree.nodeData(switch_node).node_and_extra;
const case_nodes = tree.extraDataSlice(tree.extraData(extra_index, Ast.Node.SubRange), Ast.Node.Index);
@@ -1580,28 +1670,28 @@ pub const SrcLoc = struct {
} else unreachable;
},
.node_offset_fn_type_align => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
var buf: [1]Ast.Node.Index = undefined;
const full = tree.fullFnProto(&buf, node).?;
return tree.nodeToSpan(full.ast.align_expr.unwrap().?);
},
.node_offset_fn_type_addrspace => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
var buf: [1]Ast.Node.Index = undefined;
const full = tree.fullFnProto(&buf, node).?;
return tree.nodeToSpan(full.ast.addrspace_expr.unwrap().?);
},
.node_offset_fn_type_section => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
var buf: [1]Ast.Node.Index = undefined;
const full = tree.fullFnProto(&buf, node).?;
return tree.nodeToSpan(full.ast.section_expr.unwrap().?);
},
.node_offset_fn_type_cc => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
var buf: [1]Ast.Node.Index = undefined;
const full = tree.fullFnProto(&buf, node).?;
@@ -1609,14 +1699,14 @@ pub const SrcLoc = struct {
},
.node_offset_fn_type_ret_ty => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
var buf: [1]Ast.Node.Index = undefined;
const full = tree.fullFnProto(&buf, node).?;
return tree.nodeToSpan(full.ast.return_type.unwrap().?);
},
.node_offset_param => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
var first_tok = tree.firstToken(node);
@@ -1631,7 +1721,7 @@ pub const SrcLoc = struct {
);
},
.token_offset_param => |token_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const main_token = tree.nodeMainToken(src_loc.base_node);
const tok_index = token_off.toAbsolute(main_token);
@@ -1648,14 +1738,14 @@ pub const SrcLoc = struct {
},
.node_offset_anyframe_type => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const parent_node = node_off.toAbsolute(src_loc.base_node);
_, const child_type = tree.nodeData(parent_node).token_and_node;
return tree.nodeToSpan(child_type);
},
.node_offset_lib_name => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const parent_node = node_off.toAbsolute(src_loc.base_node);
var buf: [1]Ast.Node.Index = undefined;
const full = tree.fullFnProto(&buf, parent_node).?;
@@ -1666,75 +1756,75 @@ pub const SrcLoc = struct {
},
.node_offset_array_type_len => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const parent_node = node_off.toAbsolute(src_loc.base_node);
const full = tree.fullArrayType(parent_node).?;
return tree.nodeToSpan(full.ast.elem_count);
},
.node_offset_array_type_sentinel => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const parent_node = node_off.toAbsolute(src_loc.base_node);
const full = tree.fullArrayType(parent_node).?;
return tree.nodeToSpan(full.ast.sentinel.unwrap().?);
},
.node_offset_array_type_elem => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const parent_node = node_off.toAbsolute(src_loc.base_node);
const full = tree.fullArrayType(parent_node).?;
return tree.nodeToSpan(full.ast.elem_type);
},
.node_offset_un_op => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
return tree.nodeToSpan(tree.nodeData(node).node);
},
.node_offset_ptr_elem => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const parent_node = node_off.toAbsolute(src_loc.base_node);
const full = tree.fullPtrType(parent_node).?;
return tree.nodeToSpan(full.ast.child_type);
},
.node_offset_ptr_sentinel => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const parent_node = node_off.toAbsolute(src_loc.base_node);
const full = tree.fullPtrType(parent_node).?;
return tree.nodeToSpan(full.ast.sentinel.unwrap().?);
},
.node_offset_ptr_align => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const parent_node = node_off.toAbsolute(src_loc.base_node);
const full = tree.fullPtrType(parent_node).?;
return tree.nodeToSpan(full.ast.align_node.unwrap().?);
},
.node_offset_ptr_addrspace => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const parent_node = node_off.toAbsolute(src_loc.base_node);
const full = tree.fullPtrType(parent_node).?;
return tree.nodeToSpan(full.ast.addrspace_node.unwrap().?);
},
.node_offset_ptr_bitoffset => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const parent_node = node_off.toAbsolute(src_loc.base_node);
const full = tree.fullPtrType(parent_node).?;
return tree.nodeToSpan(full.ast.bit_range_start.unwrap().?);
},
.node_offset_ptr_hostsize => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const parent_node = node_off.toAbsolute(src_loc.base_node);
const full = tree.fullPtrType(parent_node).?;
return tree.nodeToSpan(full.ast.bit_range_end.unwrap().?);
},
.node_offset_container_tag => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const parent_node = node_off.toAbsolute(src_loc.base_node);
switch (tree.nodeTag(parent_node)) {
@@ -1757,7 +1847,7 @@ pub const SrcLoc = struct {
}
},
.node_offset_field_default => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const parent_node = node_off.toAbsolute(src_loc.base_node);
const full: Ast.full.ContainerField = switch (tree.nodeTag(parent_node)) {
@@ -1768,7 +1858,7 @@ pub const SrcLoc = struct {
return tree.nodeToSpan(full.ast.value_expr.unwrap().?);
},
.node_offset_init_ty => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const parent_node = node_off.toAbsolute(src_loc.base_node);
var buf: [2]Ast.Node.Index = undefined;
@@ -1779,7 +1869,7 @@ pub const SrcLoc = struct {
return tree.nodeToSpan(type_expr);
},
.node_offset_store_ptr => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
switch (tree.nodeTag(node)) {
@@ -1806,7 +1896,7 @@ pub const SrcLoc = struct {
}
},
.node_offset_store_operand => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
switch (tree.nodeTag(node)) {
@@ -1833,7 +1923,7 @@ pub const SrcLoc = struct {
}
},
.node_offset_return_operand => |node_off| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const node = node_off.toAbsolute(src_loc.base_node);
if (tree.nodeTag(node) == .@"return") {
if (tree.nodeData(node).opt_node.unwrap()) |lhs| {
@@ -1847,7 +1937,7 @@ pub const SrcLoc = struct {
.container_field_type,
.container_field_align,
=> |field_idx| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const node = src_loc.base_node;
var buf: [2]Ast.Node.Index = undefined;
const container_decl = tree.fullContainerDecl(&buf, node) orelse
@@ -1875,7 +1965,7 @@ pub const SrcLoc = struct {
} else unreachable;
},
.tuple_field_type, .tuple_field_init => |field_info| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const node = src_loc.base_node;
var buf: [2]Ast.Node.Index = undefined;
const container_decl = tree.fullContainerDecl(&buf, node) orelse
@@ -1889,7 +1979,7 @@ pub const SrcLoc = struct {
});
},
.init_elem => |init_elem| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const init_node = init_elem.init_node_offset.toAbsolute(src_loc.base_node);
var buf: [2]Ast.Node.Index = undefined;
if (tree.fullArrayInit(&buf, init_node)) |full| {
@@ -1928,7 +2018,7 @@ pub const SrcLoc = struct {
.init_field_dll_import => "dll_import",
else => unreachable,
};
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const node = builtin_call_node.toAbsolute(src_loc.base_node);
var builtin_buf: [2]Ast.Node.Index = undefined;
const args = tree.builtinCallParams(&builtin_buf, node).?;
@@ -1967,7 +2057,7 @@ pub const SrcLoc = struct {
else => unreachable,
};
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
const switch_node = switch_node_offset.toAbsolute(src_loc.base_node);
_, const extra_index = tree.nodeData(switch_node).node_and_extra;
const case_nodes = tree.extraDataSlice(tree.extraData(extra_index, Ast.Node.SubRange), Ast.Node.Index);
@@ -2062,7 +2152,7 @@ pub const SrcLoc = struct {
}
},
.func_decl_param_comptime => |param_idx| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
var buf: [1]Ast.Node.Index = undefined;
const full = tree.fullFnProto(&buf, src_loc.base_node).?;
var param_it = full.iterate(tree);
@@ -2071,7 +2161,7 @@ pub const SrcLoc = struct {
return tree.tokenToSpan(param.comptime_noalias.?);
},
.func_decl_param_ty => |param_idx| {
- const tree = try src_loc.file_scope.getTree(gpa);
+ const tree = try src_loc.file_scope.getTree(zcu);
var buf: [1]Ast.Node.Index = undefined;
const full = tree.fullFnProto(&buf, src_loc.base_node).?;
var param_it = full.iterate(tree);
@@ -2100,9 +2190,6 @@ pub const LazySrcLoc = struct {
/// value is being set to this tag.
/// `base_node_inst` is unused.
unneeded,
- /// Means the source location points to an entire file; not any particular
- /// location within the file. `file_scope` union field will be active.
- entire_file,
/// The source location points to a byte offset within a source file,
/// offset from 0. The source file is determined contextually.
byte_abs: u32,
@@ -2521,10 +2608,7 @@ pub const LazySrcLoc = struct {
/// Like `upgrade`, but returns `null` if the source location has been lost across incremental updates.
pub fn upgradeOrLost(lazy: LazySrcLoc, zcu: *Zcu) ?SrcLoc {
- const file, const base_node: Ast.Node.Index = if (lazy.offset == .entire_file) .{
- zcu.fileByIndex(lazy.base_node_inst.resolveFile(&zcu.intern_pool)),
- .root,
- } else resolveBaseNode(lazy.base_node_inst, zcu) orelse return null;
+ const file, const base_node: Ast.Node.Index = resolveBaseNode(lazy.base_node_inst, zcu) orelse return null;
return .{
.file_scope = file,
.base_node = base_node,
@@ -2544,15 +2628,16 @@ pub const LazySrcLoc = struct {
return true;
};
if (lhs_src.file_scope != rhs_src.file_scope) {
- return std.mem.order(
- u8,
- lhs_src.file_scope.sub_file_path,
- rhs_src.file_scope.sub_file_path,
- ).compare(.lt);
+ const lhs_path = lhs_src.file_scope.path;
+ const rhs_path = rhs_src.file_scope.path;
+ if (lhs_path.root != rhs_path.root) {
+ return @intFromEnum(lhs_path.root) < @intFromEnum(rhs_path.root);
+ }
+ return std.mem.order(u8, lhs_path.sub_path, rhs_path.sub_path).compare(.lt);
}
- const lhs_span = try lhs_src.span(zcu.gpa);
- const rhs_span = try rhs_src.span(zcu.gpa);
+ const lhs_span = try lhs_src.span(zcu);
+ const rhs_span = try rhs_src.span(zcu);
return lhs_span.main < rhs_span.main;
}
};
@@ -2583,16 +2668,16 @@ pub fn deinit(zcu: *Zcu) void {
if (zcu.llvm_object) |llvm_object| llvm_object.deinit();
- for (zcu.import_table.keys()) |key| {
- gpa.free(key);
- }
- for (zcu.import_table.values()) |file_index| {
+ zcu.builtin_modules.deinit(gpa);
+ zcu.module_roots.deinit(gpa);
+ for (zcu.import_table.keys()) |file_index| {
pt.destroyFile(file_index);
}
zcu.import_table.deinit(gpa);
+ zcu.alive_files.deinit(gpa);
- for (zcu.embed_table.keys(), zcu.embed_table.values()) |path, embed_file| {
- gpa.free(path);
+ for (zcu.embed_table.keys()) |embed_file| {
+ embed_file.path.deinit(gpa);
gpa.destroy(embed_file);
}
zcu.embed_table.deinit(gpa);
@@ -2610,9 +2695,10 @@ pub fn deinit(zcu: *Zcu) void {
zcu.failed_types.deinit(gpa);
for (zcu.failed_files.values()) |value| {
- if (value) |msg| msg.destroy(gpa);
+ if (value) |msg| gpa.free(msg);
}
zcu.failed_files.deinit(gpa);
+ zcu.failed_imports.deinit(gpa);
for (zcu.failed_exports.values()) |value| {
value.destroy(gpa);
@@ -3404,27 +3490,21 @@ pub fn ensureNavValAnalysisQueued(zcu: *Zcu, nav_id: InternPool.Nav.Index) !void
zcu.nav_val_analysis_queued.putAssumeCapacityNoClobber(nav_id, {});
}
-pub const ImportFileResult = struct {
- file: *File,
- file_index: File.Index,
+pub const ImportResult = struct {
+ /// Whether `file` has been newly created; in other words, whether this is the first import of
+ /// this file. This should only be `true` when importing files during AstGen. After that, all
+ /// files should have already been discovered.
is_new: bool,
- is_pkg: bool,
-};
-pub fn computePathDigest(zcu: *Zcu, mod: *Package.Module, sub_file_path: []const u8) Cache.BinDigest {
- const want_local_cache = mod == zcu.main_mod;
- var path_hash: Cache.HashHelper = .{};
- path_hash.addBytes(build_options.version);
- path_hash.add(builtin.zig_backend);
- if (!want_local_cache) {
- path_hash.addOptionalBytes(mod.root.root_dir.path);
- path_hash.addBytes(mod.root.sub_path);
- }
- path_hash.addBytes(sub_file_path);
- var bin: Cache.BinDigest = undefined;
- path_hash.hasher.final(&bin);
- return bin;
-}
+ /// `file.mod` is not populated by this function, so if `is_new`, then it is `undefined`.
+ file: *Zcu.File,
+ file_index: File.Index,
+
+ /// If this import was a simple file path, this is `null`; the imported file should exist within
+ /// the importer's module. Otherwise, it's the module which the import resolved to. This module
+ /// could match the module of `cur_file`, since a module can depend on itself.
+ module: ?*Package.Module,
+};
/// Delete all the Export objects that are caused by this `AnalUnit`. Re-analysis of
/// this `AnalUnit` will cause them to be re-created (or not).
@@ -3938,15 +4018,7 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolv
try type_queue.ensureTotalCapacity(gpa, zcu.analysis_roots.len);
for (zcu.analysis_roots.slice()) |mod| {
- // Logic ripped from `Zcu.PerThread.importPkg`.
- // TODO: this is silly, `Module` should just store a reference to its root `File`.
- const resolved_path = try std.fs.path.resolve(gpa, &.{
- mod.root.root_dir.path orelse ".",
- mod.root.sub_path,
- mod.root_src_path,
- });
- defer gpa.free(resolved_path);
- const file = zcu.import_table.get(resolved_path).?;
+ const file = zcu.module_roots.get(mod).?.unwrap() orelse continue;
const root_ty = zcu.fileRootType(file);
if (root_ty == .none) continue;
type_queue.putAssumeCapacityNoClobber(root_ty, null);
@@ -4226,8 +4298,8 @@ fn formatAnalUnit(data: struct { unit: AnalUnit, zcu: *Zcu }, comptime fmt: []co
.@"comptime" => |cu_id| {
const cu = ip.getComptimeUnit(cu_id);
if (cu.zir_index.resolveFull(ip)) |resolved| {
- const file_path = zcu.fileByIndex(resolved.file).sub_file_path;
- return writer.print("comptime(inst=('{s}', %{}) [{}])", .{ file_path, @intFromEnum(resolved.inst), @intFromEnum(cu_id) });
+ const file_path = zcu.fileByIndex(resolved.file).path;
+ return writer.print("comptime(inst=('{}', %{}) [{}])", .{ file_path.fmt(zcu.comp), @intFromEnum(resolved.inst), @intFromEnum(cu_id) });
} else {
return writer.print("comptime(inst=<lost> [{}])", .{@intFromEnum(cu_id)});
}
@@ -4251,8 +4323,8 @@ fn formatDependee(data: struct { dependee: InternPool.Dependee, zcu: *Zcu }, com
const info = ti.resolveFull(ip) orelse {
return writer.writeAll("inst(<lost>)");
};
- const file_path = zcu.fileByIndex(info.file).sub_file_path;
- return writer.print("inst('{s}', %{d})", .{ file_path, @intFromEnum(info.inst) });
+ const file_path = zcu.fileByIndex(info.file).path;
+ return writer.print("inst('{}', %{d})", .{ file_path.fmt(zcu.comp), @intFromEnum(info.inst) });
},
.nav_val => |nav| {
const fqn = ip.getNav(nav).fqn;
@@ -4268,30 +4340,26 @@ fn formatDependee(data: struct { dependee: InternPool.Dependee, zcu: *Zcu }, com
else => unreachable,
},
.zon_file => |file| {
- const file_path = zcu.fileByIndex(file).sub_file_path;
- return writer.print("zon_file('{s}')", .{file_path});
+ const file_path = zcu.fileByIndex(file).path;
+ return writer.print("zon_file('{}')", .{file_path.fmt(zcu.comp)});
},
.embed_file => |ef_idx| {
const ef = ef_idx.get(zcu);
- return writer.print("embed_file('{s}')", .{std.fs.path.fmtJoin(&.{
- ef.owner.root.root_dir.path orelse "",
- ef.owner.root.sub_path,
- ef.sub_file_path.toSlice(ip),
- })});
+ return writer.print("embed_file('{}')", .{ef.path.fmt(zcu.comp)});
},
.namespace => |ti| {
const info = ti.resolveFull(ip) orelse {
return writer.writeAll("namespace(<lost>)");
};
- const file_path = zcu.fileByIndex(info.file).sub_file_path;
- return writer.print("namespace('{s}', %{d})", .{ file_path, @intFromEnum(info.inst) });
+ const file_path = zcu.fileByIndex(info.file).path;
+ return writer.print("namespace('{}', %{d})", .{ file_path.fmt(zcu.comp), @intFromEnum(info.inst) });
},
.namespace_name => |k| {
const info = k.namespace.resolveFull(ip) orelse {
return writer.print("namespace(<lost>, '{}')", .{k.name.fmt(ip)});
};
- const file_path = zcu.fileByIndex(info.file).sub_file_path;
- return writer.print("namespace('{s}', %{d}, '{}')", .{ file_path, @intFromEnum(info.inst), k.name.fmt(ip) });
+ const file_path = zcu.fileByIndex(info.file).path;
+ return writer.print("namespace('{}', %{d}, '{}')", .{ file_path.fmt(zcu.comp), @intFromEnum(info.inst), k.name.fmt(ip) });
},
.memoized_state => return writer.writeAll("memoized_state"),
}
@@ -4508,3 +4576,114 @@ pub fn codegenFailTypeMsg(zcu: *Zcu, ty_index: InternPool.Index, msg: *ErrorMsg)
zcu.failed_types.putAssumeCapacityNoClobber(ty_index, msg);
return error.CodegenFail;
}
+
+/// Asserts that `zcu.multi_module_err != null`.
+pub fn addFileInMultipleModulesError(
+ zcu: *Zcu,
+ eb: *std.zig.ErrorBundle.Wip,
+) !void {
+ const gpa = zcu.gpa;
+
+ const info = zcu.multi_module_err.?;
+ const file = info.file;
+
+ // error: file exists in modules 'root.foo' and 'root.bar'
+ // note: files must belong to only one module
+ // note: file is imported here
+ // note: which is imported here
+ // note: which is the root of module 'root.foo' imported here
+ // note: file is the root of module 'root.bar' imported here
+
+ const file_src = try zcu.fileByIndex(file).errorBundleWholeFileSrc(zcu, eb);
+ const root_msg = try eb.printString("file exists in modules '{s}' and '{s}'", .{
+ info.modules[0].fully_qualified_name,
+ info.modules[1].fully_qualified_name,
+ });
+
+ var notes: std.ArrayListUnmanaged(std.zig.ErrorBundle.MessageIndex) = .empty;
+ defer notes.deinit(gpa);
+
+ try notes.append(gpa, try eb.addErrorMessage(.{
+ .msg = try eb.addString("files must belong to only one module"),
+ .src_loc = file_src,
+ }));
+
+ try zcu.explainWhyFileIsInModule(eb, ¬es, file, info.modules[0], info.refs[0]);
+ try zcu.explainWhyFileIsInModule(eb, ¬es, file, info.modules[1], info.refs[1]);
+
+ try eb.addRootErrorMessage(.{
+ .msg = root_msg,
+ .src_loc = file_src,
+ .notes_len = @intCast(notes.items.len),
+ });
+ const notes_start = try eb.reserveNotes(@intCast(notes.items.len));
+ const notes_slice: []std.zig.ErrorBundle.MessageIndex = @ptrCast(eb.extra.items[notes_start..]);
+ @memcpy(notes_slice, notes.items);
+}
+
+fn explainWhyFileIsInModule(
+ zcu: *Zcu,
+ eb: *std.zig.ErrorBundle.Wip,
+ notes_out: *std.ArrayListUnmanaged(std.zig.ErrorBundle.MessageIndex),
+ file: File.Index,
+ in_module: *Package.Module,
+ ref: File.Reference,
+) !void {
+ const gpa = zcu.gpa;
+
+ // error: file is the root of module 'foo'
+ //
+ // error: file is imported here by the root of module 'foo'
+ //
+ // error: file is imported here
+ // note: which is imported here
+ // note: which is imported here by the root of module 'foo'
+
+ var import = switch (ref) {
+ .analysis_root => |mod| {
+ assert(mod == in_module);
+ try notes_out.append(gpa, try eb.addErrorMessage(.{
+ .msg = try eb.printString("file is the root of module '{s}'", .{mod.fully_qualified_name}),
+ .src_loc = try zcu.fileByIndex(file).errorBundleWholeFileSrc(zcu, eb),
+ }));
+ return;
+ },
+ .import => |import| if (import.module) |mod| {
+ assert(mod == in_module);
+ try notes_out.append(gpa, try eb.addErrorMessage(.{
+ .msg = try eb.printString("file is the root of module '{s}'", .{mod.fully_qualified_name}),
+ .src_loc = try zcu.fileByIndex(file).errorBundleWholeFileSrc(zcu, eb),
+ }));
+ return;
+ } else import,
+ };
+
+ var is_first = true;
+ while (true) {
+ const thing: []const u8 = if (is_first) "file" else "which";
+ is_first = false;
+
+ const import_src = try zcu.fileByIndex(import.importer).errorBundleTokenSrc(import.tok, zcu, eb);
+
+ const importer_ref = zcu.alive_files.get(import.importer).?;
+ const importer_root: ?*Package.Module = switch (importer_ref) {
+ .analysis_root => |mod| mod,
+ .import => |i| i.module,
+ };
+
+ if (importer_root) |m| {
+ try notes_out.append(gpa, try eb.addErrorMessage(.{
+ .msg = try eb.printString("{s} is imported here by the root of module '{s}'", .{ thing, m.fully_qualified_name }),
+ .src_loc = import_src,
+ }));
+ return;
+ }
+
+ try notes_out.append(gpa, try eb.addErrorMessage(.{
+ .msg = try eb.printString("{s} is imported here", .{thing}),
+ .src_loc = import_src,
+ }));
+
+ import = importer_ref.import;
+ }
+}
test/cases/compile_errors/bad_import.zig
@@ -3,7 +3,6 @@ const bogus = @import(
);
// error
-// backend=stage2
-// target=native
//
-// bogus-does-not-exist.zig': FileNotFound
+// bogus-does-not-exist.zig:1:1: error: unable to load 'bogus-does-not-exist.zig': FileNotFound
+// :2:5: note: file imported here
test/cases/compile_errors/import_of_missing_module.zig
@@ -0,0 +1,8 @@
+const foo = @import("foo");
+comptime {
+ _ = foo;
+}
+
+// error
+//
+// :1:21: error: no module named 'foo' available within module 'root'
test/cases/compile_errors/import_of_missing_package.zig
@@ -1,10 +0,0 @@
-const foo = @import("foo");
-comptime {
- _ = foo;
-}
-
-// error
-// backend=stage2
-// target=native
-//
-// :1:21: error: no module named 'foo' available within module root
test/cases/compile_errors/import_outside_module_path.zig
@@ -0,0 +1,7 @@
+comptime {
+ _ = @import("../a.zig");
+}
+
+// error
+//
+// :2:17: error: import of file outside module path
test/cases/compile_errors/import_outside_package.zig
@@ -1,8 +0,0 @@
-export fn a() usize {
- return @import("../../above.zig").len;
-}
-
-// error
-// target=native
-//
-// :2:20: error: import of file outside module path: '../../above.zig'
test/cases/compile_errors/import_outside_package_path.zig
@@ -1,9 +0,0 @@
-comptime {
- _ = @import("../a.zig");
-}
-
-// error
-// backend=stage2
-// target=native
-//
-// :2:17: error: import of file outside module path: '../a.zig'
test/incremental/bad_import
@@ -0,0 +1,35 @@
+#target=x86_64-linux-selfhosted
+#target=x86_64-linux-cbe
+#target=x86_64-windows-cbe
+#target=wasm32-wasi-selfhosted
+
+#update=initial version
+#file=main.zig
+pub fn main() !void {
+ _ = @import("foo.zig");
+ try std.io.getStdOut().writeAll("success\n");
+}
+const std = @import("std");
+#file=foo.zig
+comptime {
+ _ = @import("bad.zig");
+}
+#expect_error=bad.zig:1:1: error: unable to load 'bad.zig': FileNotFound
+#expect_error=foo.zig:2:17: note: file imported here
+
+#update=change bad import
+#file=foo.zig
+comptime {
+ _ = @import("this_is/not_real.zig");
+}
+#expect_error=this_is/not_real.zig:1:1: error: unable to load 'not_real.zig': FileNotFound
+#expect_error=foo.zig:2:17: note: file imported here
+
+#update=remove import of 'foo.zig'
+#file=main.zig
+pub fn main() !void {
+ //_ = @import("foo.zig");
+ try std.io.getStdOut().writeAll("success\n");
+}
+const std = @import("std");
+#expect_stdout="success\n"
test/incremental/change_module
@@ -0,0 +1,65 @@
+#target=x86_64-linux-selfhosted
+#target=x86_64-linux-cbe
+#target=x86_64-windows-cbe
+#target=wasm32-wasi-selfhosted
+#module=foo=foo.zig
+
+#update=initial version
+#file=main.zig
+pub fn main() void {
+ _ = @import("foo");
+ //_ = @import("other.zig");
+}
+#file=foo.zig
+comptime {
+ _ = @import("other.zig");
+}
+#file=other.zig
+fn f() void {
+ @compileLog(@src().module);
+}
+comptime {
+ f();
+}
+#expect_error=other.zig:2:5: error: found compile log statement
+#expect_compile_log=@as([:0]const u8, "foo"[0..3])
+
+#update=change module of other.zig
+#file=main.zig
+pub fn main() void {
+ _ = @import("foo");
+ _ = @import("other.zig");
+}
+#file=foo.zig
+comptime {
+ //_ = @import("other.zig");
+}
+#expect_error=other.zig:2:5: error: found compile log statement
+#expect_compile_log=@as([:0]const u8, "root"[0..4])
+
+#update=put other.zig in both modules
+#file=main.zig
+pub fn main() void {
+ _ = @import("foo");
+ _ = @import("other.zig");
+}
+#file=foo.zig
+comptime {
+ _ = @import("other.zig");
+}
+#expect_error=foo.zig:1:1: error: file exists in modules 'root' and 'foo'
+#expect_error=foo.zig:1:1: note: files must belong to only one module
+#expect_error=main.zig:3:17: note: file is imported here by the root of module 'root'
+#expect_error=foo.zig:2:17: note: file is imported here by the root of module 'foo'
+
+#update=put other.zig in no modules
+#file=main.zig
+pub fn main() void {
+ _ = @import("foo");
+ //_ = @import("other.zig");
+}
+#file=foo.zig
+comptime {
+ //_ = @import("other.zig");
+}
+#expect_stdout=""
test/incremental/change_zon_file
@@ -20,7 +20,8 @@ pub fn main() !void {
#update=delete file
#rm_file=message.zon
-#expect_error=message.zon:1:1: error: unable to load './message.zon': FileNotFound
+#expect_error=message.zon:1:1: error: unable to load 'message.zon': FileNotFound
+#expect_error=main.zig:2:37: note: file imported here
#update=remove reference to ZON file
#file=main.zig
@@ -29,7 +30,8 @@ const message: []const u8 = @import("message.zon");
pub fn main() !void {
try std.io.getStdOut().writeAll("a hardcoded string\n");
}
-#expect_error=message.zon:1:1: error: unable to load './message.zon': FileNotFound
+#expect_error=message.zon:1:1: error: unable to load 'message.zon': FileNotFound
+#expect_error=main.zig:2:37: note: file imported here
#update=recreate ZON file
#file=message.zon
test/compile_errors.zig
@@ -126,9 +126,10 @@ pub fn addCases(ctx: *Cases, b: *std.Build) !void {
\\ _ = @import("foo.zig");
\\}
, &[_][]const u8{
- ":1:1: error: file exists in multiple modules",
- ":1:1: note: root of module foo",
- ":3:17: note: imported from module root",
+ ":1:1: error: file exists in modules 'foo' and 'root'",
+ ":1:1: note: files must belong to only one module",
+ ":1:1: note: file is the root of module 'foo'",
+ ":3:17: note: file is imported here by the root of module 'root'",
});
case.addSourceFile("foo.zig",
\\const dummy = 0;