Commit 2a8fc1a18e
Changed files (25)
lib/std/zig/cross_target.zig
@@ -375,7 +375,7 @@ pub const CrossTarget = struct {
// `Target.current.os` works when doing `zig build` because Zig generates a build executable using
// native OS version range. However this will not be accurate otherwise, and
// will need to be integrated with `std.zig.system.NativeTargetInfo.detect`.
- var adjusted_os = if (self.os_tag) |os_tag| Target.Os.defaultVersionRange(os_tag) else Target.current.os;
+ var adjusted_os = if (self.os_tag) |os_tag| os_tag.defaultVersionRange() else Target.current.os;
if (self.os_version_min) |min| switch (min) {
.none => {},
lib/std/zig/system.zig
@@ -203,7 +203,7 @@ pub const NativeTargetInfo = struct {
/// deinitialization method.
/// TODO Remove the Allocator requirement from this function.
pub fn detect(allocator: *Allocator, cross_target: CrossTarget) DetectError!NativeTargetInfo {
- var os = Target.Os.defaultVersionRange(cross_target.getOsTag());
+ var os = cross_target.getOsTag().defaultVersionRange();
if (cross_target.os_tag == null) {
switch (Target.current.os.tag) {
.linux => {
lib/std/cache_hash.zig
@@ -82,7 +82,7 @@ pub const HashHelper = struct {
}
pub fn addListOfBytes(hh: *HashHelper, list_of_bytes: []const []const u8) void {
- hh.add(list_of_bytes.items.len);
+ hh.add(list_of_bytes.len);
for (list_of_bytes) |bytes| hh.addBytes(bytes);
}
lib/std/target.zig
@@ -75,6 +75,13 @@ pub const Target = struct {
else => return ".so",
}
}
+
+ pub fn defaultVersionRange(tag: Tag) Os {
+ return .{
+ .tag = tag,
+ .version_range = VersionRange.default(tag),
+ };
+ }
};
/// Based on NTDDI version constants from
@@ -290,11 +297,32 @@ pub const Target = struct {
}
};
- pub fn defaultVersionRange(tag: Tag) Os {
- return .{
- .tag = tag,
- .version_range = VersionRange.default(tag),
- };
+ pub const TaggedVersionRange = union(enum) {
+ none: void,
+ semver: Version.Range,
+ linux: LinuxVersionRange,
+ windows: WindowsVersion.Range,
+ };
+
+ /// Provides a tagged union. `Target` does not store the tag because it is
+ /// redundant with the OS tag; this function abstracts that part away.
+ pub fn getVersionRange(self: Os) TaggedVersionRange {
+ switch (self.tag) {
+ .linux => return TaggedVersionRange{ .linux = self.version_range.linux },
+ .windows => return TaggedVersionRange{ .windows = self.version_range.windows },
+
+ .freebsd,
+ .macosx,
+ .ios,
+ .tvos,
+ .watchos,
+ .netbsd,
+ .openbsd,
+ .dragonfly,
+ => return TaggedVersionRange{ .semver = self.version_range.semver },
+
+ else => return .none,
+ }
}
/// Checks if system is guaranteed to be at least `version` or older than `version`.
src-self-hosted/codegen/c.zig
@@ -1,7 +1,7 @@
const std = @import("std");
const link = @import("../link.zig");
-const Module = @import("../Module.zig");
+const Module = @import("../ZigModule.zig");
const Inst = @import("../ir.zig").Inst;
const Value = @import("../value.zig").Value;
src-self-hosted/codegen/wasm.zig
@@ -5,7 +5,8 @@ const assert = std.debug.assert;
const leb = std.debug.leb;
const mem = std.mem;
-const Decl = @import("../Module.zig").Decl;
+const Module = @import("../ZigModule.zig");
+const Decl = Module.Decl;
const Inst = @import("../ir.zig").Inst;
const Type = @import("../type.zig").Type;
const Value = @import("../value.zig").Value;
src-self-hosted/link/C.zig
@@ -2,7 +2,8 @@ const std = @import("std");
const mem = std.mem;
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
-const Module = @import("../Module.zig");
+const Module = @import("../ZigModule.zig");
+const Compilation = @import("../Module.zig");
const fs = std.fs;
const codegen = @import("../codegen/c.zig");
const link = @import("../link.zig");
@@ -20,7 +21,7 @@ main: std.ArrayList(u8),
called: std.StringHashMap(void),
need_stddef: bool = false,
need_stdint: bool = false,
-error_msg: *Module.ErrorMsg = undefined,
+error_msg: *Compilation.ErrorMsg = undefined,
pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Options) !*File {
assert(options.object_format == .c);
@@ -51,7 +52,7 @@ pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Optio
}
pub fn fail(self: *C, src: usize, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } {
- self.error_msg = try Module.ErrorMsg.create(self.base.allocator, src, format, args);
+ self.error_msg = try Compilation.ErrorMsg.create(self.base.allocator, src, format, args);
return error.AnalysisFail;
}
@@ -71,7 +72,7 @@ pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void {
};
}
-pub fn flush(self: *C, module: *Module) !void {
+pub fn flush(self: *C, comp: *Compilation) !void {
const writer = self.base.file.?.writer();
try writer.writeAll(@embedFile("cbe.h"));
var includes = false;
src-self-hosted/link/Coff.zig
@@ -7,7 +7,8 @@ const assert = std.debug.assert;
const fs = std.fs;
const trace = @import("../tracy.zig").trace;
-const Module = @import("../Module.zig");
+const Module = @import("../ZigModule.zig");
+const Compilation = @import("../Module.zig");
const codegen = @import("../codegen.zig");
const link = @import("../link.zig");
@@ -732,7 +733,7 @@ pub fn updateDeclExports(self: *Coff, module: *Module, decl: *const Module.Decl,
try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.items().len + 1);
module.failed_exports.putAssumeCapacityNoClobber(
exp,
- try Module.ErrorMsg.create(self.base.allocator, 0, "Unimplemented: ExportOptions.section", .{}),
+ try Compilation.ErrorMsg.create(self.base.allocator, 0, "Unimplemented: ExportOptions.section", .{}),
);
continue;
}
@@ -743,14 +744,14 @@ pub fn updateDeclExports(self: *Coff, module: *Module, decl: *const Module.Decl,
try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.items().len + 1);
module.failed_exports.putAssumeCapacityNoClobber(
exp,
- try Module.ErrorMsg.create(self.base.allocator, 0, "Unimplemented: Exports other than '_start'", .{}),
+ try Compilation.ErrorMsg.create(self.base.allocator, 0, "Unimplemented: Exports other than '_start'", .{}),
);
continue;
}
}
}
-pub fn flush(self: *Coff, module: *Module) !void {
+pub fn flush(self: *Coff, comp: *Compilation) !void {
if (self.text_section_size_dirty) {
// Write the new raw size in the .text header
var buf: [4]u8 = undefined;
src-self-hosted/link/Elf.zig
@@ -3,7 +3,8 @@ const mem = std.mem;
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const ir = @import("../ir.zig");
-const Module = @import("../Module.zig");
+const Module = @import("../ZigModule.zig");
+const Compilation = @import("../Module.zig");
const fs = std.fs;
const elf = std.elf;
const codegen = @import("../codegen.zig");
@@ -122,6 +123,9 @@ dbg_info_decl_free_list: std.AutoHashMapUnmanaged(*TextBlock, void) = .{},
dbg_info_decl_first: ?*TextBlock = null,
dbg_info_decl_last: ?*TextBlock = null,
+/// Prevents other processes from clobbering the output file this is linking.
+lock: ?std.cache_hash.Lock = null,
+
/// `alloc_num / alloc_den` is the factor of padding when allocating.
const alloc_num = 4;
const alloc_den = 3;
@@ -285,7 +289,21 @@ fn createFile(allocator: *Allocator, file: fs.File, options: link.Options) !Elf
return self;
}
+pub fn releaseLock(self: *Elf) void {
+ if (self.lock) |*lock| {
+ lock.release();
+ self.lock = null;
+ }
+}
+
+pub fn toOwnedLock(self: *Elf) std.cache_hash.Lock {
+ const lock = self.lock.?;
+ self.lock = null;
+ return lock;
+}
+
pub fn deinit(self: *Elf) void {
+ self.releaseLock();
self.sections.deinit(self.base.allocator);
self.program_headers.deinit(self.base.allocator);
self.shstrtab.deinit(self.base.allocator);
@@ -709,20 +727,24 @@ pub const abbrev_base_type = 4;
pub const abbrev_pad1 = 5;
pub const abbrev_parameter = 6;
-pub fn flush(self: *Elf, module: *Module) !void {
+pub fn flush(self: *Elf, comp: *Compilation) !void {
if (build_options.have_llvm and self.base.options.use_lld) {
- return self.linkWithLLD(module);
+ return self.linkWithLLD(comp);
} else {
switch (self.base.options.effectiveOutputMode()) {
.Exe, .Obj => {},
.Lib => return error.TODOImplementWritingLibFiles,
}
- return self.flushInner(module);
+ return self.flushInner(comp);
}
}
/// Commit pending changes and write headers.
-fn flushInner(self: *Elf, module: *Module) !void {
+fn flushInner(self: *Elf, comp: *Compilation) !void {
+ // TODO This linker code currently assumes there is only 1 compilation unit and it corresponds to the
+ // Zig source code.
+ const zig_module = self.base.options.zig_module orelse return error.LinkingWithoutZigSourceUnimplemented;
+
const target_endian = self.base.options.target.cpu.arch.endian();
const foreign_endian = target_endian != std.Target.current.cpu.arch.endian();
const ptr_width_bytes: u8 = self.ptrWidthBytes();
@@ -844,8 +866,8 @@ fn flushInner(self: *Elf, module: *Module) !void {
},
}
// Write the form for the compile unit, which must match the abbrev table above.
- const name_strp = try self.makeDebugString(self.base.options.root_pkg.?.root_src_path);
- const comp_dir_strp = try self.makeDebugString(self.base.options.root_pkg.?.root_src_directory.path.?);
+ const name_strp = try self.makeDebugString(zig_module.root_pkg.root_src_path);
+ const comp_dir_strp = try self.makeDebugString(zig_module.root_pkg.root_src_directory.path.?);
const producer_strp = try self.makeDebugString(link.producer_string);
// Currently only one compilation unit is supported, so the address range is simply
// identical to the main program header virtual address and memory size.
@@ -1014,7 +1036,7 @@ fn flushInner(self: *Elf, module: *Module) !void {
0, // include_directories (none except the compilation unit cwd)
});
// file_names[0]
- di_buf.appendSliceAssumeCapacity(self.base.options.root_pkg.?.root_src_path); // relative path name
+ di_buf.appendSliceAssumeCapacity(zig_module.root_pkg.root_src_path); // relative path name
di_buf.appendSliceAssumeCapacity(&[_]u8{
0, // null byte for the relative path name
0, // directory_index
@@ -1199,11 +1221,105 @@ fn flushInner(self: *Elf, module: *Module) !void {
assert(!self.debug_strtab_dirty);
}
-fn linkWithLLD(self: *Elf, module: *Module) !void {
+fn linkWithLLD(self: *Elf, comp: *Compilation) !void {
var arena_allocator = std.heap.ArenaAllocator.init(self.base.allocator);
defer arena_allocator.deinit();
const arena = &arena_allocator.allocator;
+ const directory = self.base.options.directory; // Just an alias to make it shorter to type.
+
+ // If there is no Zig code to compile, then we should skip flushing the output file because it
+ // will not be part of the linker line anyway.
+ const zig_module_obj_path: ?[]const u8 = if (self.base.options.zig_module) |module| blk: {
+ try self.flushInner(comp);
+
+ const obj_basename = self.base.intermediary_basename.?;
+ const full_obj_path = if (directory.path) |dir_path|
+ try std.fs.path.join(arena, &[_][]const u8{dir_path, obj_basename})
+ else
+ obj_basename;
+ break :blk full_obj_path;
+ } else null;
+
+ // Here we want to determine whether we can save time by not invoking LLD when the
+ // output is unchanged. None of the linker options or the object files that are being
+ // linked are in the hash that namespaces the directory we are outputting to. Therefore,
+ // we must hash those now, and the resulting digest will form the "id" of the linking
+ // job we are about to perform.
+ // After a successful link, we store the id in the metadata of a symlink named "id.txt" in
+ // the artifact directory. So, now, we check if this symlink exists, and if it matches
+ // our digest. If so, we can skip linking. Otherwise, we proceed with invoking LLD.
+ const id_symlink_basename = "id.txt";
+
+ // We are about to obtain this lock, so here we give other processes a chance first.
+ self.releaseLock();
+
+ var ch = comp.cache_parent.obtain();
+ defer ch.deinit();
+
+ const is_lib = self.base.options.output_mode == .Lib;
+ const is_dyn_lib = self.base.options.link_mode == .Dynamic and is_lib;
+ const have_dynamic_linker = self.base.options.link_libc and
+ self.base.options.link_mode == .Dynamic and (is_dyn_lib or self.base.options.output_mode == .Exe);
+
+ try ch.addOptionalFile(self.base.options.linker_script);
+ try ch.addOptionalFile(self.base.options.version_script);
+ try ch.addListOfFiles(self.base.options.objects);
+ for (comp.c_object_table.items()) |entry| switch (entry.key.status) {
+ .new => unreachable,
+ .failure => return error.NotAllCSourceFilesAvailableToLink,
+ .success => |success| _ = try ch.addFile(success.object_path, null),
+ };
+ try ch.addOptionalFile(zig_module_obj_path);
+ // We can skip hashing libc and libc++ components that we are in charge of building from Zig
+ // installation sources because they are always a product of the compiler version + target information.
+ ch.hash.addOptional(self.base.options.stack_size_override);
+ ch.hash.addOptional(self.base.options.gc_sections);
+ ch.hash.add(self.base.options.eh_frame_hdr);
+ ch.hash.add(self.base.options.rdynamic);
+ ch.hash.addListOfBytes(self.base.options.extra_lld_args);
+ ch.hash.addListOfBytes(self.base.options.lib_dirs);
+ ch.hash.add(self.base.options.z_nodelete);
+ ch.hash.add(self.base.options.z_defs);
+ if (self.base.options.link_libc) {
+ ch.hash.add(self.base.options.libc_installation != null);
+ if (self.base.options.libc_installation) |libc_installation| {
+ ch.hash.addBytes(libc_installation.crt_dir.?);
+ }
+ if (have_dynamic_linker) {
+ ch.hash.addOptionalBytes(self.base.options.dynamic_linker);
+ }
+ }
+ if (is_dyn_lib) {
+ ch.hash.addOptionalBytes(self.base.options.override_soname);
+ ch.hash.addOptional(self.base.options.version);
+ }
+ ch.hash.addListOfBytes(self.base.options.system_libs);
+ ch.hash.addOptional(self.base.options.allow_shlib_undefined);
+ ch.hash.add(self.base.options.bind_global_refs_locally);
+
+ // We don't actually care whether it's a cache hit or miss; we just need the digest and the lock.
+ _ = try ch.hit();
+ const digest = ch.final();
+
+ var prev_digest_buf: [digest.len]u8 = undefined;
+ const prev_digest: []u8 = directory.handle.readLink(id_symlink_basename, &prev_digest_buf) catch blk: {
+ // Handle this as a cache miss.
+ mem.set(u8, &prev_digest_buf, 0);
+ break :blk &prev_digest_buf;
+ };
+ if (mem.eql(u8, prev_digest, &digest)) {
+ // Hot diggity dog! The output binary is already there.
+ self.lock = ch.toOwnedLock();
+ return;
+ }
+
+ // We are about to change the output file to be different, so we invalidate the build hash now.
+ directory.handle.deleteFile(id_symlink_basename) catch |err| switch (err) {
+ error.FileNotFound => {},
+ else => |e| return e,
+ };
+
const target = self.base.options.target;
const is_obj = self.base.options.output_mode == .Obj;
@@ -1272,8 +1388,6 @@ fn linkWithLLD(self: *Elf, module: *Module) !void {
try argv.append(arg);
}
- const is_lib = self.base.options.output_mode == .Lib;
- const is_dyn_lib = self.base.options.link_mode == .Dynamic and is_lib;
if (self.base.options.link_mode == .Static) {
if (target.cpu.arch.isARM() or target.cpu.arch.isThumb()) {
try argv.append("-Bstatic");
@@ -1288,7 +1402,7 @@ fn linkWithLLD(self: *Elf, module: *Module) !void {
try argv.append("-pie");
}
- const full_out_path = if (self.base.options.directory.path) |dir_path|
+ const full_out_path = if (directory.path) |dir_path|
try std.fs.path.join(arena, &[_][]const u8{dir_path, self.base.options.sub_path})
else
self.base.options.sub_path;
@@ -1311,13 +1425,14 @@ fn linkWithLLD(self: *Elf, module: *Module) !void {
break :o "Scrt1.o";
}
};
- try argv.append(try module.get_libc_crt_file(arena, crt1o));
+ try argv.append(try comp.get_libc_crt_file(arena, crt1o));
if (target_util.libc_needs_crti_crtn(target)) {
- try argv.append(try module.get_libc_crt_file(arena, "crti.o"));
+ try argv.append(try comp.get_libc_crt_file(arena, "crti.o"));
}
}
// TODO rpaths
+ // TODO add to cache hash above too
//for (size_t i = 0; i < g->rpath_list.length; i += 1) {
// Buf *rpath = g->rpath_list.at(i);
// add_rpath(lj, rpath);
@@ -1354,7 +1469,7 @@ fn linkWithLLD(self: *Elf, module: *Module) !void {
try argv.append(libc_installation.crt_dir.?);
}
- if (self.base.options.link_mode == .Dynamic and (is_dyn_lib or self.base.options.output_mode == .Exe)) {
+ if (have_dynamic_linker) {
if (self.base.options.dynamic_linker) |dynamic_linker| {
try argv.append("-dynamic-linker");
try argv.append(dynamic_linker);
@@ -1363,9 +1478,10 @@ fn linkWithLLD(self: *Elf, module: *Module) !void {
}
if (is_dyn_lib) {
- const soname = self.base.options.override_soname orelse
- try std.fmt.allocPrint(arena, "lib{}.so.{}", .{self.base.options.root_name,
- self.base.options.version.major,});
+ const soname = self.base.options.override_soname orelse if (self.base.options.version) |ver|
+ try std.fmt.allocPrint(arena, "lib{}.so.{}", .{self.base.options.root_name, ver.major})
+ else
+ try std.fmt.allocPrint(arena, "lib{}.so", .{self.base.options.root_name});
try argv.append("-soname");
try argv.append(soname);
@@ -1378,28 +1494,14 @@ fn linkWithLLD(self: *Elf, module: *Module) !void {
// Positional arguments to the linker such as object files.
try argv.appendSlice(self.base.options.objects);
- for (module.c_object_table.items()) |entry| {
- const c_object = entry.key;
- switch (c_object.status) {
- .new => unreachable,
- .failure => return error.NotAllCSourceFilesAvailableToLink,
- .success => |full_obj_path| {
- try argv.append(full_obj_path);
- },
- }
- }
-
- // If there is no Zig code to compile, then we should skip flushing the output file because it
- // will not be part of the linker line anyway.
- if (module.root_pkg != null) {
- try self.flushInner(module);
+ for (comp.c_object_table.items()) |entry| switch (entry.key.status) {
+ .new => unreachable,
+ .failure => unreachable, // Checked during cache hashing.
+ .success => |success| try argv.append(success.object_path),
+ };
- const obj_basename = self.base.intermediary_basename.?;
- const full_obj_path = if (self.base.options.directory.path) |dir_path|
- try std.fs.path.join(arena, &[_][]const u8{dir_path, obj_basename})
- else
- obj_basename;
- try argv.append(full_obj_path);
+ if (zig_module_obj_path) |p| {
+ try argv.append(p);
}
// TODO compiler-rt and libc
@@ -1419,7 +1521,7 @@ fn linkWithLLD(self: *Elf, module: *Module) !void {
// By this time, we depend on these libs being dynamically linked libraries and not static libraries
// (the check for that needs to be earlier), but they could be full paths to .so files, in which
// case we want to avoid prepending "-l".
- const ext = Module.classifyFileExt(link_lib);
+ const ext = Compilation.classifyFileExt(link_lib);
const arg = if (ext == .so) link_lib else try std.fmt.allocPrint(arena, "-l{}", .{link_lib});
argv.appendAssumeCapacity(arg);
}
@@ -1427,8 +1529,8 @@ fn linkWithLLD(self: *Elf, module: *Module) !void {
if (!is_obj) {
// libc++ dep
if (self.base.options.link_libcpp) {
- try argv.append(module.libcxxabi_static_lib.?);
- try argv.append(module.libcxx_static_lib.?);
+ try argv.append(comp.libcxxabi_static_lib.?);
+ try argv.append(comp.libcxx_static_lib.?);
}
// libc dep
@@ -1448,15 +1550,15 @@ fn linkWithLLD(self: *Elf, module: *Module) !void {
try argv.append("-lpthread");
}
} else if (target.isGnuLibC()) {
- try argv.append(module.libunwind_static_lib.?);
+ try argv.append(comp.libunwind_static_lib.?);
// TODO here we need to iterate over the glibc libs and add the .so files to the linker line.
std.log.warn("TODO port add_glibc_libs to stage2", .{});
- try argv.append(try module.get_libc_crt_file(arena, "libc_nonshared.a"));
+ try argv.append(try comp.get_libc_crt_file(arena, "libc_nonshared.a"));
} else if (target.isMusl()) {
- try argv.append(module.libunwind_static_lib.?);
- try argv.append(module.libc_static_lib.?);
+ try argv.append(comp.libunwind_static_lib.?);
+ try argv.append(comp.libc_static_lib.?);
} else if (self.base.options.link_libcpp) {
- try argv.append(module.libunwind_static_lib.?);
+ try argv.append(comp.libunwind_static_lib.?);
} else {
unreachable; // Compiler was supposed to emit an error for not being able to provide libc.
}
@@ -1466,9 +1568,9 @@ fn linkWithLLD(self: *Elf, module: *Module) !void {
// crt end
if (link_in_crt) {
if (target.isAndroid()) {
- try argv.append(try module.get_libc_crt_file(arena, "crtend_android.o"));
+ try argv.append(try comp.get_libc_crt_file(arena, "crtend_android.o"));
} else if (target_util.libc_needs_crti_crtn(target)) {
- try argv.append(try module.get_libc_crt_file(arena, "crtn.o"));
+ try argv.append(try comp.get_libc_crt_file(arena, "crtn.o"));
}
}
@@ -1500,6 +1602,19 @@ fn linkWithLLD(self: *Elf, module: *Module) !void {
const ZigLLDLink = @import("../llvm.zig").ZigLLDLink;
const ok = ZigLLDLink(.ELF, new_argv.ptr, new_argv.len, append_diagnostic, 0, 0);
if (!ok) return error.LLDReportedFailure;
+
+ // Update the dangling symlink "id.txt" with the digest. If it fails we can continue; it only
+ // means that the next invocation will have an unnecessary cache miss.
+ directory.handle.symLink(&digest, id_symlink_basename, .{}) catch |err| {
+ std.log.warn("failed to save linking hash digest symlink: {}", .{@errorName(err)});
+ };
+ // Again failure here only means an unnecessary cache miss.
+ ch.writeManifest() catch |err| {
+ std.log.warn("failed to write cache manifest when linking: {}", .{ @errorName(err) });
+ };
+ // We hang on to this lock so that the output file path can be used without
+ // other processes clobbering it.
+ self.lock = ch.toOwnedLock();
}
fn append_diagnostic(context: usize, ptr: [*]const u8, len: usize) callconv(.C) void {
@@ -2396,7 +2511,7 @@ pub fn updateDeclExports(
try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.items().len + 1);
module.failed_exports.putAssumeCapacityNoClobber(
exp,
- try Module.ErrorMsg.create(self.base.allocator, 0, "Unimplemented: ExportOptions.section", .{}),
+ try Compilation.ErrorMsg.create(self.base.allocator, 0, "Unimplemented: ExportOptions.section", .{}),
);
continue;
}
@@ -2414,7 +2529,7 @@ pub fn updateDeclExports(
try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.items().len + 1);
module.failed_exports.putAssumeCapacityNoClobber(
exp,
- try Module.ErrorMsg.create(self.base.allocator, 0, "Unimplemented: GlobalLinkage.LinkOnce", .{}),
+ try Compilation.ErrorMsg.create(self.base.allocator, 0, "Unimplemented: GlobalLinkage.LinkOnce", .{}),
);
continue;
},
@@ -2722,8 +2837,8 @@ fn dbgLineNeededHeaderBytes(self: Elf) u32 {
directory_count * 8 + file_name_count * 8 +
// These are encoded as DW.FORM_string rather than DW.FORM_strp as we would like
// because of a workaround for readelf and gdb failing to understand DWARFv5 correctly.
- self.base.options.root_pkg.?.root_src_directory.path.?.len +
- self.base.options.root_pkg.?.root_src_path.len);
+ self.base.options.zig_module.?.root_pkg.root_src_directory.path.?.len +
+ self.base.options.zig_module.?.root_pkg.root_src_path.len);
}
fn dbgInfoNeededHeaderBytes(self: Elf) u32 {
src-self-hosted/link/MachO.zig
@@ -12,7 +12,8 @@ const mem = std.mem;
const trace = @import("../tracy.zig").trace;
const Type = @import("../type.zig").Type;
-const Module = @import("../Module.zig");
+const Module = @import("../ZigModule.zig");
+const Compilation = @import("../Module.zig");
const link = @import("../link.zig");
const File = link.File;
@@ -205,7 +206,7 @@ fn createFile(allocator: *Allocator, file: fs.File, options: link.Options) !Mach
return self;
}
-pub fn flush(self: *MachO, module: *Module) !void {
+pub fn flush(self: *MachO, comp: *Compilation) !void {
switch (self.base.options.output_mode) {
.Exe => {
var last_cmd_offset: usize = @sizeOf(macho.mach_header_64);
src-self-hosted/link/Wasm.zig
@@ -6,7 +6,8 @@ const assert = std.debug.assert;
const fs = std.fs;
const leb = std.debug.leb;
-const Module = @import("../Module.zig");
+const Module = @import("../ZigModule.zig");
+const Compilation = @import("../Module.zig");
const codegen = @import("../codegen/wasm.zig");
const link = @import("../link.zig");
@@ -126,7 +127,7 @@ pub fn freeDecl(self: *Wasm, decl: *Module.Decl) void {
decl.fn_link.wasm = null;
}
-pub fn flush(self: *Wasm, module: *Module) !void {
+pub fn flush(self: *Wasm, comp: *Compilation) !void {
const file = self.base.file.?;
const header_size = 5 + 1;
@@ -164,7 +165,7 @@ pub fn flush(self: *Wasm, module: *Module) !void {
}
// Export section
- {
+ if (self.base.options.zig_module) |module| {
const header_offset = try reserveVecSectionHeader(file);
const writer = file.writer();
var count: u32 = 0;
src-self-hosted/astgen.zig
@@ -6,7 +6,7 @@ const Type = @import("type.zig").Type;
const TypedValue = @import("TypedValue.zig");
const assert = std.debug.assert;
const zir = @import("zir.zig");
-const Module = @import("Module.zig");
+const Module = @import("ZigModule.zig");
const ast = std.zig.ast;
const trace = @import("tracy.zig").trace;
const Scope = Module.Scope;
src-self-hosted/codegen.zig
@@ -7,8 +7,9 @@ const Type = @import("type.zig").Type;
const Value = @import("value.zig").Value;
const TypedValue = @import("TypedValue.zig");
const link = @import("link.zig");
-const Module = @import("Module.zig");
-const ErrorMsg = Module.ErrorMsg;
+const Module = @import("ZigModule.zig");
+const Compilation = @import("Module.zig");
+const ErrorMsg = Compilation.ErrorMsg;
const Target = std.Target;
const Allocator = mem.Allocator;
const trace = @import("tracy.zig").trace;
@@ -50,7 +51,7 @@ pub const Result = union(enum) {
appended: void,
/// The value is available externally, `code` is unused.
externally_managed: []const u8,
- fail: *Module.ErrorMsg,
+ fail: *ErrorMsg,
};
pub const GenerateSymbolError = error{
src-self-hosted/glibc.zig
@@ -5,6 +5,7 @@ const mem = std.mem;
const Module = @import("Module.zig");
const path = std.fs.path;
const build_options = @import("build_options");
+const trace = @import("tracy.zig").trace;
pub const Lib = struct {
name: []const u8,
@@ -54,6 +55,9 @@ pub const LoadMetaDataError = error{
/// This function will emit a log error when there is a problem with the zig installation and then return
/// `error.ZigInstallationCorrupt`.
pub fn loadMetaData(gpa: *Allocator, zig_lib_dir: std.fs.Dir) LoadMetaDataError!*ABI {
+ const tracy = trace(@src());
+ defer tracy.end();
+
var arena_allocator = std.heap.ArenaAllocator.init(gpa);
errdefer arena_allocator.deinit();
const arena = &arena_allocator.allocator;
@@ -584,6 +588,9 @@ fn lib_path(mod: *Module, arena: *Allocator, sub_path: []const u8) ![]const u8 {
}
fn build_libc_object(mod: *Module, basename: []const u8, c_source_file: Module.CSourceFile) !void {
+ const tracy = trace(@src());
+ defer tracy.end();
+
// TODO: This is extracted into a local variable to work around a stage1 miscompilation.
const emit_bin = Module.EmitLoc{
.directory = null, // Put it in the cache directory.
@@ -618,8 +625,11 @@ fn build_libc_object(mod: *Module, basename: []const u8, c_source_file: Module.C
try sub_module.update();
try mod.crt_files.ensureCapacity(mod.gpa, mod.crt_files.count() + 1);
- const artifact_path = try std.fs.path.join(mod.gpa, &[_][]const u8{
- sub_module.zig_cache_artifact_directory.path.?, basename,
- });
+ const artifact_path = if (sub_module.bin_file.options.directory.path) |p|
+ try std.fs.path.join(mod.gpa, &[_][]const u8{ p, basename })
+ else
+ try mod.gpa.dupe(u8, basename);
+
+ // TODO obtain a lock on the artifact and put that in crt_files as well.
mod.crt_files.putAssumeCapacityNoClobber(basename, artifact_path);
}
src-self-hosted/ir.zig
@@ -1,7 +1,7 @@
const std = @import("std");
const Value = @import("value.zig").Value;
const Type = @import("type.zig").Type;
-const Module = @import("Module.zig");
+const Module = @import("ZigModule.zig");
const assert = std.debug.assert;
const codegen = @import("codegen.zig");
const ast = std.zig.ast;
src-self-hosted/link.zig
@@ -1,6 +1,7 @@
const std = @import("std");
const Allocator = std.mem.Allocator;
-const Module = @import("Module.zig");
+const Compilation = @import("Module.zig");
+const ZigModule = @import("ZigModule.zig");
const fs = std.fs;
const trace = @import("tracy.zig").trace;
const Package = @import("Package.zig");
@@ -12,7 +13,7 @@ pub const producer_string = if (std.builtin.is_test) "zig test" else "zig " ++ b
pub const Options = struct {
/// Where the output will go.
- directory: Module.Directory,
+ directory: Compilation.Directory,
/// Path to the output file, relative to `directory`.
sub_path: []const u8,
target: std.Target,
@@ -21,7 +22,9 @@ pub const Options = struct {
object_format: std.builtin.ObjectFormat,
optimize_mode: std.builtin.Mode,
root_name: []const u8,
- root_pkg: ?*const Package,
+ /// Not every Compilation compiles .zig code! For example you could do `zig build-exe foo.o`.
+ /// TODO rename Module to Compilation and then (as a separate commit) ZigModule to Module.
+ zig_module: ?*ZigModule,
dynamic_linker: ?[]const u8 = null,
/// Used for calculating how much space to reserve for symbols in case the binary file
/// does not already have a symbol table.
@@ -71,7 +74,7 @@ pub const Options = struct {
lib_dirs: []const []const u8 = &[0][]const u8{},
rpath_list: []const []const u8 = &[0][]const u8{},
- version: std.builtin.Version,
+ version: ?std.builtin.Version,
libc_installation: ?*const LibCInstallation,
pub fn effectiveOutputMode(options: Options) std.builtin.OutputMode {
@@ -184,7 +187,7 @@ pub const File = struct {
/// May be called before or after updateDeclExports but must be called
/// after allocateDeclIndexes for any given Decl.
- pub fn updateDecl(base: *File, module: *Module, decl: *Module.Decl) !void {
+ pub fn updateDecl(base: *File, module: *ZigModule, decl: *ZigModule.Decl) !void {
switch (base.tag) {
.coff => return @fieldParentPtr(Coff, "base", base).updateDecl(module, decl),
.elf => return @fieldParentPtr(Elf, "base", base).updateDecl(module, decl),
@@ -194,7 +197,7 @@ pub const File = struct {
}
}
- pub fn updateDeclLineNumber(base: *File, module: *Module, decl: *Module.Decl) !void {
+ pub fn updateDeclLineNumber(base: *File, module: *ZigModule, decl: *ZigModule.Decl) !void {
switch (base.tag) {
.coff => return @fieldParentPtr(Coff, "base", base).updateDeclLineNumber(module, decl),
.elf => return @fieldParentPtr(Elf, "base", base).updateDeclLineNumber(module, decl),
@@ -205,7 +208,7 @@ pub const File = struct {
/// Must be called before any call to updateDecl or updateDeclExports for
/// any given Decl.
- pub fn allocateDeclIndexes(base: *File, decl: *Module.Decl) !void {
+ pub fn allocateDeclIndexes(base: *File, decl: *ZigModule.Decl) !void {
switch (base.tag) {
.coff => return @fieldParentPtr(Coff, "base", base).allocateDeclIndexes(decl),
.elf => return @fieldParentPtr(Elf, "base", base).allocateDeclIndexes(decl),
@@ -256,20 +259,20 @@ pub const File = struct {
}
}
- pub fn flush(base: *File, module: *Module) !void {
+ pub fn flush(base: *File, comp: *Compilation) !void {
const tracy = trace(@src());
defer tracy.end();
try switch (base.tag) {
- .coff => @fieldParentPtr(Coff, "base", base).flush(module),
- .elf => @fieldParentPtr(Elf, "base", base).flush(module),
- .macho => @fieldParentPtr(MachO, "base", base).flush(module),
- .c => @fieldParentPtr(C, "base", base).flush(module),
- .wasm => @fieldParentPtr(Wasm, "base", base).flush(module),
+ .coff => @fieldParentPtr(Coff, "base", base).flush(comp),
+ .elf => @fieldParentPtr(Elf, "base", base).flush(comp),
+ .macho => @fieldParentPtr(MachO, "base", base).flush(comp),
+ .c => @fieldParentPtr(C, "base", base).flush(comp),
+ .wasm => @fieldParentPtr(Wasm, "base", base).flush(comp),
};
}
- pub fn freeDecl(base: *File, decl: *Module.Decl) void {
+ pub fn freeDecl(base: *File, decl: *ZigModule.Decl) void {
switch (base.tag) {
.coff => @fieldParentPtr(Coff, "base", base).freeDecl(decl),
.elf => @fieldParentPtr(Elf, "base", base).freeDecl(decl),
@@ -293,9 +296,9 @@ pub const File = struct {
/// allocateDeclIndexes for any given Decl.
pub fn updateDeclExports(
base: *File,
- module: *Module,
- decl: *const Module.Decl,
- exports: []const *Module.Export,
+ module: *ZigModule,
+ decl: *const ZigModule.Decl,
+ exports: []const *ZigModule.Export,
) !void {
switch (base.tag) {
.coff => return @fieldParentPtr(Coff, "base", base).updateDeclExports(module, decl, exports),
@@ -306,7 +309,7 @@ pub const File = struct {
}
}
- pub fn getDeclVAddr(base: *File, decl: *const Module.Decl) u64 {
+ pub fn getDeclVAddr(base: *File, decl: *const ZigModule.Decl) u64 {
switch (base.tag) {
.coff => return @fieldParentPtr(Coff, "base", base).getDeclVAddr(decl),
.elf => return @fieldParentPtr(Elf, "base", base).getDeclVAddr(decl),
src-self-hosted/main.zig
@@ -268,6 +268,7 @@ pub fn buildOutputType(
var link_mode: ?std.builtin.LinkMode = null;
var root_src_file: ?[]const u8 = null;
var version: std.builtin.Version = .{ .major = 0, .minor = 0, .patch = 0 };
+ var have_version = false;
var strip = false;
var single_threaded = false;
var watch = false;
@@ -445,6 +446,7 @@ pub fn buildOutputType(
version = std.builtin.Version.parse(args[i]) catch |err| {
fatal("unable to parse --version '{}': {}", .{ args[i], @errorName(err) });
};
+ have_version = true;
} else if (mem.eql(u8, arg, "-target")) {
if (i + 1 >= args.len) fatal("expected parameter after {}", .{arg});
i += 1;
@@ -799,6 +801,7 @@ pub fn buildOutputType(
version.major = std.fmt.parseInt(u32, linker_args.items[i], 10) catch |err| {
fatal("unable to parse '{}': {}", .{ arg, @errorName(err) });
};
+ have_version = true;
} else if (mem.eql(u8, arg, "--minor-image-version")) {
i += 1;
if (i >= linker_args.items.len) {
@@ -807,6 +810,7 @@ pub fn buildOutputType(
version.minor = std.fmt.parseInt(u32, linker_args.items[i], 10) catch |err| {
fatal("unable to parse '{}': {}", .{ arg, @errorName(err) });
};
+ have_version = true;
} else if (mem.eql(u8, arg, "--stack")) {
i += 1;
if (i >= linker_args.items.len) {
@@ -1161,7 +1165,7 @@ pub fn buildOutputType(
.self_exe_path = self_exe_path,
.rand = &default_prng.random,
.clang_passthrough_mode = arg_mode != .build,
- .version = version,
+ .version = if (have_version) version else null,
.libc_installation = if (libc_installation) |*lci| lci else null,
.debug_cc = debug_cc,
.debug_link = debug_link,
@@ -1228,7 +1232,9 @@ fn updateModule(gpa: *Allocator, module: *Module, zir_out_path: ?[]const u8) !vo
}
if (zir_out_path) |zop| {
- var new_zir_module = try zir.emit(gpa, module);
+ const zig_module = module.bin_file.options.zig_module orelse
+ fatal("-femit-zir with no zig source code", .{});
+ var new_zir_module = try zir.emit(gpa, zig_module);
defer new_zir_module.deinit(gpa);
const baf = try io.BufferedAtomicFile.create(gpa, fs.cwd(), zop, .{});
src-self-hosted/Module.zig
@@ -1,90 +1,39 @@
+//! TODO This is going to get renamed from Module to Compilation.
+const Module = @This();
+const Compilation = @This();
+
const std = @import("std");
const mem = std.mem;
const Allocator = std.mem.Allocator;
-const ArrayListUnmanaged = std.ArrayListUnmanaged;
const Value = @import("value.zig").Value;
-const Type = @import("type.zig").Type;
-const TypedValue = @import("TypedValue.zig");
const assert = std.debug.assert;
-const log = std.log.scoped(.module);
-const BigIntConst = std.math.big.int.Const;
-const BigIntMutable = std.math.big.int.Mutable;
+const log = std.log.scoped(.compilation);
const Target = std.Target;
const target_util = @import("target.zig");
const Package = @import("Package.zig");
const link = @import("link.zig");
-const ir = @import("ir.zig");
-const zir = @import("zir.zig");
-const Module = @This();
-const Inst = ir.Inst;
-const Body = ir.Body;
-const ast = std.zig.ast;
const trace = @import("tracy.zig").trace;
const liveness = @import("liveness.zig");
-const astgen = @import("astgen.zig");
-const zir_sema = @import("zir_sema.zig");
const build_options = @import("build_options");
const LibCInstallation = @import("libc_installation.zig").LibCInstallation;
const glibc = @import("glibc.zig");
const fatal = @import("main.zig").fatal;
+const ZigModule = @import("ZigModule.zig");
/// General-purpose allocator. Used for both temporary and long-term storage.
gpa: *Allocator,
/// Arena-allocated memory used during initialization. Should be untouched until deinit.
arena_state: std.heap.ArenaAllocator.State,
-/// Pointer to externally managed resource. `null` if there is no zig file being compiled.
-root_pkg: ?*Package,
-/// Module owns this resource.
-/// The `Scope` is either a `Scope.ZIRModule` or `Scope.File`.
-root_scope: *Scope,
bin_file: *link.File,
-/// It's rare for a decl to be exported, so we save memory by having a sparse map of
-/// Decl pointers to details about them being exported.
-/// The Export memory is owned by the `export_owners` table; the slice itself is owned by this table.
-decl_exports: std.AutoArrayHashMapUnmanaged(*Decl, []*Export) = .{},
-/// We track which export is associated with the given symbol name for quick
-/// detection of symbol collisions.
-symbol_exports: std.StringArrayHashMapUnmanaged(*Export) = .{},
-/// This models the Decls that perform exports, so that `decl_exports` can be updated when a Decl
-/// is modified. Note that the key of this table is not the Decl being exported, but the Decl that
-/// is performing the export of another Decl.
-/// This table owns the Export memory.
-export_owners: std.AutoArrayHashMapUnmanaged(*Decl, []*Export) = .{},
-/// Maps fully qualified namespaced names to the Decl struct for them.
-decl_table: std.ArrayHashMapUnmanaged(Scope.NameHash, *Decl, Scope.name_hash_hash, Scope.name_hash_eql, false) = .{},
-
c_object_table: std.AutoArrayHashMapUnmanaged(*CObject, void) = .{},
link_error_flags: link.File.ErrorFlags = .{},
work_queue: std.fifo.LinearFifo(WorkItem, .Dynamic),
-/// We optimize memory usage for a compilation with no compile errors by storing the
-/// error messages and mapping outside of `Decl`.
-/// The ErrorMsg memory is owned by the decl, using Module's general purpose allocator.
-/// Note that a Decl can succeed but the Fn it represents can fail. In this case,
-/// a Decl can have a failed_decls entry but have analysis status of success.
-failed_decls: std.AutoArrayHashMapUnmanaged(*Decl, *ErrorMsg) = .{},
-/// Using a map here for consistency with the other fields here.
-/// The ErrorMsg memory is owned by the `Scope`, using Module's general purpose allocator.
-failed_files: std.AutoArrayHashMapUnmanaged(*Scope, *ErrorMsg) = .{},
-/// Using a map here for consistency with the other fields here.
-/// The ErrorMsg memory is owned by the `Export`, using Module's general purpose allocator.
-failed_exports: std.AutoArrayHashMapUnmanaged(*Export, *ErrorMsg) = .{},
/// The ErrorMsg memory is owned by the `CObject`, using Module's general purpose allocator.
failed_c_objects: std.AutoArrayHashMapUnmanaged(*CObject, *ErrorMsg) = .{},
-/// Incrementing integer used to compare against the corresponding Decl
-/// field to determine whether a Decl's status applies to an ongoing update, or a
-/// previous analysis.
-generation: u32 = 0,
-
-next_anon_name_index: usize = 0,
-
-/// Candidates for deletion. After a semantic analysis update completes, this list
-/// contains Decls that need to be deleted if they end up having no references to them.
-deletion_set: std.ArrayListUnmanaged(*Decl) = .{},
-
keep_source_files_loaded: bool,
use_clang: bool,
sanitize_c: bool,
@@ -95,18 +44,15 @@ sanitize_c: bool,
clang_passthrough_mode: bool,
/// Whether to print clang argvs to stdout.
debug_cc: bool,
-
-/// Error tags and their values, tag names are duped with mod.gpa.
-global_error_set: std.StringHashMapUnmanaged(u16) = .{},
+disable_c_depfile: bool,
c_source_files: []const CSourceFile,
clang_argv: []const []const u8,
-cache: std.cache_hash.CacheHash,
+cache_parent: *std.cache_hash.Cache,
/// Path to own executable for invoking `zig clang`.
self_exe_path: ?[]const u8,
zig_lib_directory: Directory,
zig_cache_directory: Directory,
-zig_cache_artifact_directory: Directory,
libc_include_dir_list: []const []const u8,
rand: *std.rand.Random,
@@ -128,7 +74,10 @@ libc_static_lib: ?[]const u8 = null,
/// The key is the basename, and the value is the absolute path to the completed build artifact.
crt_files: std.StringHashMapUnmanaged([]const u8) = .{},
-pub const InnerError = error{ OutOfMemory, AnalysisFail };
+/// Keeping track of this possibly open resource so we can close it later.
+owned_link_dir: ?std.fs.Dir,
+
+pub const InnerError = ZigModule.InnerError;
/// For passing to a C compiler.
pub const CSourceFile = struct {
@@ -138,14 +87,14 @@ pub const CSourceFile = struct {
const WorkItem = union(enum) {
/// Write the machine code for a Decl to the output file.
- codegen_decl: *Decl,
+ codegen_decl: *ZigModule.Decl,
/// The Decl needs to be analyzed and possibly export itself.
/// It may have already be analyzed, or it may have been determined
/// to be outdated; in this case perform semantic analysis again.
- analyze_decl: *Decl,
+ analyze_decl: *ZigModule.Decl,
/// The source file containing the Decl has been updated, and so the
/// Decl may need its line number information updated in the debug info.
- update_line_number: *Decl,
+ update_line_number: *ZigModule.Decl,
/// Invoke the Clang compiler to create an object file, which gets linked
/// with the Module.
c_object: *CObject,
@@ -156,192 +105,6 @@ const WorkItem = union(enum) {
glibc_so: *const glibc.Lib,
};
-pub const Export = struct {
- options: std.builtin.ExportOptions,
- /// Byte offset into the file that contains the export directive.
- src: usize,
- /// Represents the position of the export, if any, in the output file.
- link: link.File.Elf.Export,
- /// The Decl that performs the export. Note that this is *not* the Decl being exported.
- owner_decl: *Decl,
- /// The Decl being exported. Note this is *not* the Decl performing the export.
- exported_decl: *Decl,
- status: enum {
- in_progress,
- failed,
- /// Indicates that the failure was due to a temporary issue, such as an I/O error
- /// when writing to the output file. Retrying the export may succeed.
- failed_retryable,
- complete,
- },
-};
-
-pub const Decl = struct {
- /// This name is relative to the containing namespace of the decl. It uses a null-termination
- /// to save bytes, since there can be a lot of decls in a compilation. The null byte is not allowed
- /// in symbol names, because executable file formats use null-terminated strings for symbol names.
- /// All Decls have names, even values that are not bound to a zig namespace. This is necessary for
- /// mapping them to an address in the output file.
- /// Memory owned by this decl, using Module's allocator.
- name: [*:0]const u8,
- /// The direct parent container of the Decl. This is either a `Scope.Container` or `Scope.ZIRModule`.
- /// Reference to externally owned memory.
- scope: *Scope,
- /// The AST Node decl index or ZIR Inst index that contains this declaration.
- /// Must be recomputed when the corresponding source file is modified.
- src_index: usize,
- /// The most recent value of the Decl after a successful semantic analysis.
- typed_value: union(enum) {
- never_succeeded: void,
- most_recent: TypedValue.Managed,
- },
- /// Represents the "shallow" analysis status. For example, for decls that are functions,
- /// the function type is analyzed with this set to `in_progress`, however, the semantic
- /// analysis of the function body is performed with this value set to `success`. Functions
- /// have their own analysis status field.
- analysis: enum {
- /// This Decl corresponds to an AST Node that has not been referenced yet, and therefore
- /// because of Zig's lazy declaration analysis, it will remain unanalyzed until referenced.
- unreferenced,
- /// Semantic analysis for this Decl is running right now. This state detects dependency loops.
- in_progress,
- /// This Decl might be OK but it depends on another one which did not successfully complete
- /// semantic analysis.
- dependency_failure,
- /// Semantic analysis failure.
- /// There will be a corresponding ErrorMsg in Module.failed_decls.
- sema_failure,
- /// There will be a corresponding ErrorMsg in Module.failed_decls.
- /// This indicates the failure was something like running out of disk space,
- /// and attempting semantic analysis again may succeed.
- sema_failure_retryable,
- /// There will be a corresponding ErrorMsg in Module.failed_decls.
- codegen_failure,
- /// There will be a corresponding ErrorMsg in Module.failed_decls.
- /// This indicates the failure was something like running out of disk space,
- /// and attempting codegen again may succeed.
- codegen_failure_retryable,
- /// Everything is done. During an update, this Decl may be out of date, depending
- /// on its dependencies. The `generation` field can be used to determine if this
- /// completion status occurred before or after a given update.
- complete,
- /// A Module update is in progress, and this Decl has been flagged as being known
- /// to require re-analysis.
- outdated,
- },
- /// This flag is set when this Decl is added to a check_for_deletion set, and cleared
- /// when removed.
- deletion_flag: bool,
- /// Whether the corresponding AST decl has a `pub` keyword.
- is_pub: bool,
-
- /// An integer that can be checked against the corresponding incrementing
- /// generation field of Module. This is used to determine whether `complete` status
- /// represents pre- or post- re-analysis.
- generation: u32,
-
- /// Represents the position of the code in the output file.
- /// This is populated regardless of semantic analysis and code generation.
- link: link.File.LinkBlock,
-
- /// Represents the function in the linked output file, if the `Decl` is a function.
- /// This is stored here and not in `Fn` because `Decl` survives across updates but
- /// `Fn` does not.
- /// TODO Look into making `Fn` a longer lived structure and moving this field there
- /// to save on memory usage.
- fn_link: link.File.LinkFn,
-
- contents_hash: std.zig.SrcHash,
-
- /// The shallow set of other decls whose typed_value could possibly change if this Decl's
- /// typed_value is modified.
- dependants: DepsTable = .{},
- /// The shallow set of other decls whose typed_value changing indicates that this Decl's
- /// typed_value may need to be regenerated.
- dependencies: DepsTable = .{},
-
- /// The reason this is not `std.AutoArrayHashMapUnmanaged` is a workaround for
- /// stage1 compiler giving me: `error: struct 'Module.Decl' depends on itself`
- pub const DepsTable = std.ArrayHashMapUnmanaged(*Decl, void, std.array_hash_map.getAutoHashFn(*Decl), std.array_hash_map.getAutoEqlFn(*Decl), false);
-
- pub fn destroy(self: *Decl, gpa: *Allocator) void {
- gpa.free(mem.spanZ(self.name));
- if (self.typedValueManaged()) |tvm| {
- tvm.deinit(gpa);
- }
- self.dependants.deinit(gpa);
- self.dependencies.deinit(gpa);
- gpa.destroy(self);
- }
-
- pub fn src(self: Decl) usize {
- switch (self.scope.tag) {
- .container => {
- const container = @fieldParentPtr(Scope.Container, "base", self.scope);
- const tree = container.file_scope.contents.tree;
- // TODO Container should have it's own decls()
- const decl_node = tree.root_node.decls()[self.src_index];
- return tree.token_locs[decl_node.firstToken()].start;
- },
- .zir_module => {
- const zir_module = @fieldParentPtr(Scope.ZIRModule, "base", self.scope);
- const module = zir_module.contents.module;
- const src_decl = module.decls[self.src_index];
- return src_decl.inst.src;
- },
- .none => unreachable,
- .file, .block => unreachable,
- .gen_zir => unreachable,
- .local_val => unreachable,
- .local_ptr => unreachable,
- .decl => unreachable,
- }
- }
-
- pub fn fullyQualifiedNameHash(self: Decl) Scope.NameHash {
- return self.scope.fullyQualifiedNameHash(mem.spanZ(self.name));
- }
-
- pub fn typedValue(self: *Decl) error{AnalysisFail}!TypedValue {
- const tvm = self.typedValueManaged() orelse return error.AnalysisFail;
- return tvm.typed_value;
- }
-
- pub fn value(self: *Decl) error{AnalysisFail}!Value {
- return (try self.typedValue()).val;
- }
-
- pub fn dump(self: *Decl) void {
- const loc = std.zig.findLineColumn(self.scope.source.bytes, self.src);
- std.debug.print("{}:{}:{} name={} status={}", .{
- self.scope.sub_file_path,
- loc.line + 1,
- loc.column + 1,
- mem.spanZ(self.name),
- @tagName(self.analysis),
- });
- if (self.typedValueManaged()) |tvm| {
- std.debug.print(" ty={} val={}", .{ tvm.typed_value.ty, tvm.typed_value.val });
- }
- std.debug.print("\n", .{});
- }
-
- pub fn typedValueManaged(self: *Decl) ?*TypedValue.Managed {
- switch (self.typed_value) {
- .most_recent => |*x| return x,
- .never_succeeded => return null,
- }
- }
-
- fn removeDependant(self: *Decl, other: *Decl) void {
- self.dependants.removeAssertDiscard(other);
- }
-
- fn removeDependency(self: *Decl, other: *Decl) void {
- self.dependencies.removeAssertDiscard(other);
- }
-};
-
pub const CObject = struct {
/// Relative to cwd. Owned by arena.
src_path: []const u8,
@@ -350,578 +113,39 @@ pub const CObject = struct {
arena: std.heap.ArenaAllocator.State,
status: union(enum) {
new,
- /// This is the output object path. Owned by gpa.
- success: []u8,
- /// There will be a corresponding ErrorMsg in Module.failed_c_objects.
- /// This is the C source file contents (used for printing error messages). Owned by gpa.
- failure: []u8,
+ success: struct {
+ /// The outputted result. Owned by gpa.
+ object_path: []u8,
+ /// This is a file system lock on the cache hash manifest representing this
+ /// object. It prevents other invocations of the Zig compiler from interfering
+ /// with this object until released.
+ lock: std.cache_hash.Lock,
+ },
+ /// There will be a corresponding ErrorMsg in Compilation.failed_c_objects.
+ failure,
},
- pub fn destroy(self: *CObject, gpa: *Allocator) void {
+ /// Returns if there was failure.
+ pub fn clearStatus(self: *CObject, gpa: *Allocator) bool {
switch (self.status) {
- .new => {},
- .failure, .success => |data| gpa.free(data),
- }
- self.arena.promote(gpa).deinit();
- }
-};
-
-/// Fn struct memory is owned by the Decl's TypedValue.Managed arena allocator.
-pub const Fn = struct {
- /// This memory owned by the Decl's TypedValue.Managed arena allocator.
- analysis: union(enum) {
- queued: *ZIR,
- in_progress,
- /// There will be a corresponding ErrorMsg in Module.failed_decls
- sema_failure,
- /// This Fn might be OK but it depends on another Decl which did not successfully complete
- /// semantic analysis.
- dependency_failure,
- success: Body,
- },
- owner_decl: *Decl,
-
- /// This memory is temporary and points to stack memory for the duration
- /// of Fn analysis.
- pub const Analysis = struct {
- inner_block: Scope.Block,
- };
-
- /// Contains un-analyzed ZIR instructions generated from Zig source AST.
- pub const ZIR = struct {
- body: zir.Module.Body,
- arena: std.heap.ArenaAllocator.State,
- };
-
- /// For debugging purposes.
- pub fn dump(self: *Fn, mod: Module) void {
- std.debug.print("Module.Function(name={}) ", .{self.owner_decl.name});
- switch (self.analysis) {
- .queued => {
- std.debug.print("queued\n", .{});
- },
- .in_progress => {
- std.debug.print("in_progress\n", .{});
- },
- else => {
- std.debug.print("\n", .{});
- zir.dumpFn(mod, self);
- },
- }
- }
-};
-
-pub const Var = struct {
- /// if is_extern == true this is undefined
- init: Value,
- owner_decl: *Decl,
-
- is_extern: bool,
- is_mutable: bool,
- is_threadlocal: bool,
-};
-
-pub const Scope = struct {
- tag: Tag,
-
- pub const NameHash = [16]u8;
-
- pub fn cast(base: *Scope, comptime T: type) ?*T {
- if (base.tag != T.base_tag)
- return null;
-
- return @fieldParentPtr(T, "base", base);
- }
-
- /// Asserts the scope has a parent which is a DeclAnalysis and
- /// returns the arena Allocator.
- pub fn arena(self: *Scope) *Allocator {
- switch (self.tag) {
- .block => return self.cast(Block).?.arena,
- .decl => return &self.cast(DeclAnalysis).?.arena.allocator,
- .gen_zir => return self.cast(GenZIR).?.arena,
- .local_val => return self.cast(LocalVal).?.gen_zir.arena,
- .local_ptr => return self.cast(LocalPtr).?.gen_zir.arena,
- .zir_module => return &self.cast(ZIRModule).?.contents.module.arena.allocator,
- .file => unreachable,
- .container => unreachable,
- .none => unreachable,
- }
- }
-
- /// If the scope has a parent which is a `DeclAnalysis`,
- /// returns the `Decl`, otherwise returns `null`.
- pub fn decl(self: *Scope) ?*Decl {
- return switch (self.tag) {
- .block => self.cast(Block).?.decl,
- .gen_zir => self.cast(GenZIR).?.decl,
- .local_val => self.cast(LocalVal).?.gen_zir.decl,
- .local_ptr => self.cast(LocalPtr).?.gen_zir.decl,
- .decl => self.cast(DeclAnalysis).?.decl,
- .zir_module => null,
- .file => null,
- .container => null,
- .none => unreachable,
- };
- }
-
- /// Asserts the scope has a parent which is a ZIRModule or Container and
- /// returns it.
- pub fn namespace(self: *Scope) *Scope {
- switch (self.tag) {
- .block => return self.cast(Block).?.decl.scope,
- .gen_zir => return self.cast(GenZIR).?.decl.scope,
- .local_val => return self.cast(LocalVal).?.gen_zir.decl.scope,
- .local_ptr => return self.cast(LocalPtr).?.gen_zir.decl.scope,
- .decl => return self.cast(DeclAnalysis).?.decl.scope,
- .file => return &self.cast(File).?.root_container.base,
- .zir_module, .container => return self,
- .none => unreachable,
- }
- }
-
- /// Must generate unique bytes with no collisions with other decls.
- /// The point of hashing here is only to limit the number of bytes of
- /// the unique identifier to a fixed size (16 bytes).
- pub fn fullyQualifiedNameHash(self: *Scope, name: []const u8) NameHash {
- switch (self.tag) {
- .block => unreachable,
- .gen_zir => unreachable,
- .local_val => unreachable,
- .local_ptr => unreachable,
- .decl => unreachable,
- .file => unreachable,
- .zir_module => return self.cast(ZIRModule).?.fullyQualifiedNameHash(name),
- .container => return self.cast(Container).?.fullyQualifiedNameHash(name),
- .none => unreachable,
- }
- }
-
- /// Asserts the scope is a child of a File and has an AST tree and returns the tree.
- pub fn tree(self: *Scope) *ast.Tree {
- switch (self.tag) {
- .file => return self.cast(File).?.contents.tree,
- .zir_module => unreachable,
- .none => unreachable,
- .decl => return self.cast(DeclAnalysis).?.decl.scope.cast(Container).?.file_scope.contents.tree,
- .block => return self.cast(Block).?.decl.scope.cast(Container).?.file_scope.contents.tree,
- .gen_zir => return self.cast(GenZIR).?.decl.scope.cast(Container).?.file_scope.contents.tree,
- .local_val => return self.cast(LocalVal).?.gen_zir.decl.scope.cast(Container).?.file_scope.contents.tree,
- .local_ptr => return self.cast(LocalPtr).?.gen_zir.decl.scope.cast(Container).?.file_scope.contents.tree,
- .container => return self.cast(Container).?.file_scope.contents.tree,
- }
- }
-
- /// Asserts the scope is a child of a `GenZIR` and returns it.
- pub fn getGenZIR(self: *Scope) *GenZIR {
- return switch (self.tag) {
- .block => unreachable,
- .gen_zir => self.cast(GenZIR).?,
- .local_val => return self.cast(LocalVal).?.gen_zir,
- .local_ptr => return self.cast(LocalPtr).?.gen_zir,
- .decl => unreachable,
- .zir_module => unreachable,
- .file => unreachable,
- .container => unreachable,
- .none => unreachable,
- };
- }
-
- /// Asserts the scope has a parent which is a ZIRModule, Contaienr or File and
- /// returns the sub_file_path field.
- pub fn subFilePath(base: *Scope) []const u8 {
- switch (base.tag) {
- .container => return @fieldParentPtr(Container, "base", base).file_scope.sub_file_path,
- .file => return @fieldParentPtr(File, "base", base).sub_file_path,
- .zir_module => return @fieldParentPtr(ZIRModule, "base", base).sub_file_path,
- .none => unreachable,
- .block => unreachable,
- .gen_zir => unreachable,
- .local_val => unreachable,
- .local_ptr => unreachable,
- .decl => unreachable,
- }
- }
-
- pub fn unload(base: *Scope, gpa: *Allocator) void {
- switch (base.tag) {
- .file => return @fieldParentPtr(File, "base", base).unload(gpa),
- .zir_module => return @fieldParentPtr(ZIRModule, "base", base).unload(gpa),
- .none => {},
- .block => unreachable,
- .gen_zir => unreachable,
- .local_val => unreachable,
- .local_ptr => unreachable,
- .decl => unreachable,
- .container => unreachable,
- }
- }
-
- pub fn getSource(base: *Scope, module: *Module) ![:0]const u8 {
- switch (base.tag) {
- .container => return @fieldParentPtr(Container, "base", base).file_scope.getSource(module),
- .file => return @fieldParentPtr(File, "base", base).getSource(module),
- .zir_module => return @fieldParentPtr(ZIRModule, "base", base).getSource(module),
- .none => unreachable,
- .gen_zir => unreachable,
- .local_val => unreachable,
- .local_ptr => unreachable,
- .block => unreachable,
- .decl => unreachable,
- }
- }
-
- /// Asserts the scope is a namespace Scope and removes the Decl from the namespace.
- pub fn removeDecl(base: *Scope, child: *Decl) void {
- switch (base.tag) {
- .container => return @fieldParentPtr(Container, "base", base).removeDecl(child),
- .zir_module => return @fieldParentPtr(ZIRModule, "base", base).removeDecl(child),
- .none => unreachable,
- .file => unreachable,
- .block => unreachable,
- .gen_zir => unreachable,
- .local_val => unreachable,
- .local_ptr => unreachable,
- .decl => unreachable,
- }
- }
-
- /// Asserts the scope is a File or ZIRModule and deinitializes it, then deallocates it.
- pub fn destroy(base: *Scope, gpa: *Allocator) void {
- switch (base.tag) {
- .file => {
- const scope_file = @fieldParentPtr(File, "base", base);
- scope_file.deinit(gpa);
- gpa.destroy(scope_file);
- },
- .zir_module => {
- const scope_zir_module = @fieldParentPtr(ZIRModule, "base", base);
- scope_zir_module.deinit(gpa);
- gpa.destroy(scope_zir_module);
+ .new => return false,
+ .failure => {
+ self.status = .new;
+ return true;
},
- .none => {
- const scope_none = @fieldParentPtr(None, "base", base);
- gpa.destroy(scope_none);
+ .success => |*success| {
+ gpa.free(success.object_path);
+ success.lock.release();
+ self.status = .new;
+ return false;
},
- .block => unreachable,
- .gen_zir => unreachable,
- .local_val => unreachable,
- .local_ptr => unreachable,
- .decl => unreachable,
- .container => unreachable,
}
}
- fn name_hash_hash(x: NameHash) u32 {
- return @truncate(u32, @bitCast(u128, x));
- }
-
- fn name_hash_eql(a: NameHash, b: NameHash) bool {
- return @bitCast(u128, a) == @bitCast(u128, b);
+ pub fn destroy(self: *CObject, gpa: *Allocator) void {
+ _ = self.clearStatus(gpa);
+ self.arena.promote(gpa).deinit();
}
-
- pub const Tag = enum {
- /// .zir source code.
- zir_module,
- /// .zig source code.
- file,
- /// There is no .zig or .zir source code being compiled in this Module.
- none,
- /// struct, enum or union, every .file contains one of these.
- container,
- block,
- decl,
- gen_zir,
- local_val,
- local_ptr,
- };
-
- pub const Container = struct {
- pub const base_tag: Tag = .container;
- base: Scope = Scope{ .tag = base_tag },
-
- file_scope: *Scope.File,
-
- /// Direct children of the file.
- decls: std.AutoArrayHashMapUnmanaged(*Decl, void),
-
- // TODO implement container types and put this in a status union
- // ty: Type
-
- pub fn deinit(self: *Container, gpa: *Allocator) void {
- self.decls.deinit(gpa);
- self.* = undefined;
- }
-
- pub fn removeDecl(self: *Container, child: *Decl) void {
- _ = self.decls.remove(child);
- }
-
- pub fn fullyQualifiedNameHash(self: *Container, name: []const u8) NameHash {
- // TODO container scope qualified names.
- return std.zig.hashSrc(name);
- }
- };
-
- pub const File = struct {
- pub const base_tag: Tag = .file;
- base: Scope = Scope{ .tag = base_tag },
-
- /// Relative to the owning package's root_src_dir.
- /// Reference to external memory, not owned by File.
- sub_file_path: []const u8,
- source: union(enum) {
- unloaded: void,
- bytes: [:0]const u8,
- },
- contents: union {
- not_available: void,
- tree: *ast.Tree,
- },
- status: enum {
- never_loaded,
- unloaded_success,
- unloaded_parse_failure,
- loaded_success,
- },
-
- root_container: Container,
-
- pub fn unload(self: *File, gpa: *Allocator) void {
- switch (self.status) {
- .never_loaded,
- .unloaded_parse_failure,
- .unloaded_success,
- => {},
-
- .loaded_success => {
- self.contents.tree.deinit();
- self.status = .unloaded_success;
- },
- }
- switch (self.source) {
- .bytes => |bytes| {
- gpa.free(bytes);
- self.source = .{ .unloaded = {} };
- },
- .unloaded => {},
- }
- }
-
- pub fn deinit(self: *File, gpa: *Allocator) void {
- self.root_container.deinit(gpa);
- self.unload(gpa);
- self.* = undefined;
- }
-
- pub fn dumpSrc(self: *File, src: usize) void {
- const loc = std.zig.findLineColumn(self.source.bytes, src);
- std.debug.print("{}:{}:{}\n", .{ self.sub_file_path, loc.line + 1, loc.column + 1 });
- }
-
- pub fn getSource(self: *File, module: *Module) ![:0]const u8 {
- switch (self.source) {
- .unloaded => {
- const source = try module.root_pkg.?.root_src_directory.handle.readFileAllocOptions(
- module.gpa,
- self.sub_file_path,
- std.math.maxInt(u32),
- null,
- 1,
- 0,
- );
- self.source = .{ .bytes = source };
- return source;
- },
- .bytes => |bytes| return bytes,
- }
- }
- };
-
- /// For when there is no top level scope because there are no .zig files being compiled.
- pub const None = struct {
- pub const base_tag: Tag = .none;
- base: Scope = Scope{ .tag = base_tag },
- };
-
- pub const ZIRModule = struct {
- pub const base_tag: Tag = .zir_module;
- base: Scope = Scope{ .tag = base_tag },
- /// Relative to the owning package's root_src_dir.
- /// Reference to external memory, not owned by ZIRModule.
- sub_file_path: []const u8,
- source: union(enum) {
- unloaded: void,
- bytes: [:0]const u8,
- },
- contents: union {
- not_available: void,
- module: *zir.Module,
- },
- status: enum {
- never_loaded,
- unloaded_success,
- unloaded_parse_failure,
- unloaded_sema_failure,
-
- loaded_sema_failure,
- loaded_success,
- },
-
- /// Even though .zir files only have 1 module, this set is still needed
- /// because of anonymous Decls, which can exist in the global set, but
- /// not this one.
- decls: ArrayListUnmanaged(*Decl),
-
- pub fn unload(self: *ZIRModule, gpa: *Allocator) void {
- switch (self.status) {
- .never_loaded,
- .unloaded_parse_failure,
- .unloaded_sema_failure,
- .unloaded_success,
- => {},
-
- .loaded_success => {
- self.contents.module.deinit(gpa);
- gpa.destroy(self.contents.module);
- self.contents = .{ .not_available = {} };
- self.status = .unloaded_success;
- },
- .loaded_sema_failure => {
- self.contents.module.deinit(gpa);
- gpa.destroy(self.contents.module);
- self.contents = .{ .not_available = {} };
- self.status = .unloaded_sema_failure;
- },
- }
- switch (self.source) {
- .bytes => |bytes| {
- gpa.free(bytes);
- self.source = .{ .unloaded = {} };
- },
- .unloaded => {},
- }
- }
-
- pub fn deinit(self: *ZIRModule, gpa: *Allocator) void {
- self.decls.deinit(gpa);
- self.unload(gpa);
- self.* = undefined;
- }
-
- pub fn removeDecl(self: *ZIRModule, child: *Decl) void {
- for (self.decls.items) |item, i| {
- if (item == child) {
- _ = self.decls.swapRemove(i);
- return;
- }
- }
- }
-
- pub fn dumpSrc(self: *ZIRModule, src: usize) void {
- const loc = std.zig.findLineColumn(self.source.bytes, src);
- std.debug.print("{}:{}:{}\n", .{ self.sub_file_path, loc.line + 1, loc.column + 1 });
- }
-
- pub fn getSource(self: *ZIRModule, module: *Module) ![:0]const u8 {
- switch (self.source) {
- .unloaded => {
- const source = try module.root_pkg.?.root_src_directory.handle.readFileAllocOptions(
- module.gpa,
- self.sub_file_path,
- std.math.maxInt(u32),
- null,
- 1,
- 0,
- );
- self.source = .{ .bytes = source };
- return source;
- },
- .bytes => |bytes| return bytes,
- }
- }
-
- pub fn fullyQualifiedNameHash(self: *ZIRModule, name: []const u8) NameHash {
- // ZIR modules only have 1 file with all decls global in the same namespace.
- return std.zig.hashSrc(name);
- }
- };
-
- /// This is a temporary structure, references to it are valid only
- /// during semantic analysis of the block.
- pub const Block = struct {
- pub const base_tag: Tag = .block;
- base: Scope = Scope{ .tag = base_tag },
- parent: ?*Block,
- func: ?*Fn,
- decl: *Decl,
- instructions: ArrayListUnmanaged(*Inst),
- /// Points to the arena allocator of DeclAnalysis
- arena: *Allocator,
- label: ?Label = null,
- is_comptime: bool,
-
- pub const Label = struct {
- zir_block: *zir.Inst.Block,
- results: ArrayListUnmanaged(*Inst),
- block_inst: *Inst.Block,
- };
- };
-
- /// This is a temporary structure, references to it are valid only
- /// during semantic analysis of the decl.
- pub const DeclAnalysis = struct {
- pub const base_tag: Tag = .decl;
- base: Scope = Scope{ .tag = base_tag },
- decl: *Decl,
- arena: std.heap.ArenaAllocator,
- };
-
- /// This is a temporary structure, references to it are valid only
- /// during semantic analysis of the decl.
- pub const GenZIR = struct {
- pub const base_tag: Tag = .gen_zir;
- base: Scope = Scope{ .tag = base_tag },
- /// Parents can be: `GenZIR`, `ZIRModule`, `File`
- parent: *Scope,
- decl: *Decl,
- arena: *Allocator,
- /// The first N instructions in a function body ZIR are arg instructions.
- instructions: std.ArrayListUnmanaged(*zir.Inst) = .{},
- label: ?Label = null,
-
- pub const Label = struct {
- token: ast.TokenIndex,
- block_inst: *zir.Inst.Block,
- result_loc: astgen.ResultLoc,
- };
- };
-
- /// This is always a `const` local and importantly the `inst` is a value type, not a pointer.
- /// This structure lives as long as the AST generation of the Block
- /// node that contains the variable.
- pub const LocalVal = struct {
- pub const base_tag: Tag = .local_val;
- base: Scope = Scope{ .tag = base_tag },
- /// Parents can be: `LocalVal`, `LocalPtr`, `GenZIR`.
- parent: *Scope,
- gen_zir: *GenZIR,
- name: []const u8,
- inst: *zir.Inst,
- };
-
- /// This could be a `const` or `var` local. It has a pointer instead of a value.
- /// This structure lives as long as the AST generation of the Block
- /// node that contains the variable.
- pub const LocalPtr = struct {
- pub const base_tag: Tag = .local_ptr;
- base: Scope = Scope{ .tag = base_tag },
- /// Parents can be: `LocalVal`, `LocalPtr`, `GenZIR`.
- parent: *Scope,
- gen_zir: *GenZIR,
- name: []const u8,
- ptr: *zir.Inst,
- };
};
pub const AllErrors = struct {
@@ -1029,12 +253,12 @@ pub const InitOptions = struct {
debug_link: bool = false,
stack_size_override: ?u64 = null,
self_exe_path: ?[]const u8 = null,
- version: std.builtin.Version = .{ .major = 0, .minor = 0, .patch = 0 },
+ version: ?std.builtin.Version = null,
libc_installation: ?*const LibCInstallation = null,
};
pub fn create(gpa: *Allocator, options: InitOptions) !*Module {
- const mod: *Module = mod: {
+ const comp: *Module = comp: {
// For allocations that have the same lifetime as Module. This arena is used only during this
// initialization and then is freed in deinit().
var arena_allocator = std.heap.ArenaAllocator.init(gpa);
@@ -1043,7 +267,7 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Module {
// We put the `Module` itself in the arena. Freeing the arena will free the module.
// It's initialized later after we prepare the initialization options.
- const mod = try arena.create(Module);
+ const comp = try arena.create(Module);
const root_name = try arena.dupe(u8, options.root_name);
const ofmt = options.object_format orelse options.target.getObjectFormat();
@@ -1153,75 +377,142 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Module {
// For example, one cannot change the target between updates, but one can change source files,
// so the target goes into the cache hash, but source files do not. This is so that we can
// find the same binary and incrementally update it even if there are modified source files.
- // We do this even if outputting to the current directory because (1) this cache_hash instance
- // will be the "parent" of other cache_hash instances such as for C objects, (2) we need
- // a place for intermediate build artifacts, such as a .o file to be linked with LLD, and (3)
- // we need somewhere to store serialization of incremental compilation metadata.
- var cache = try std.cache_hash.CacheHash.init(gpa, options.zig_cache_directory.handle, "h");
- errdefer cache.release();
-
- // Now we will prepare hash state initializations to avoid redundantly computing hashes.
- // First we add common things between things that apply to zig source and all c source files.
- cache.addBytes(build_options.version);
- cache.add(options.optimize_mode);
- cache.add(options.target.cpu.arch);
- cache.addBytes(options.target.cpu.model.name);
- cache.add(options.target.cpu.features.ints);
- cache.add(options.target.os.tag);
- switch (options.target.os.tag) {
- .linux => {
- cache.add(options.target.os.version_range.linux.range.min);
- cache.add(options.target.os.version_range.linux.range.max);
- cache.add(options.target.os.version_range.linux.glibc);
- },
- .windows => {
- cache.add(options.target.os.version_range.windows.min);
- cache.add(options.target.os.version_range.windows.max);
- },
- .freebsd,
- .macosx,
- .ios,
- .tvos,
- .watchos,
- .netbsd,
- .openbsd,
- .dragonfly,
- => {
- cache.add(options.target.os.version_range.semver.min);
- cache.add(options.target.os.version_range.semver.max);
- },
- else => {},
- }
- cache.add(options.target.abi);
- cache.add(ofmt);
- cache.add(pic);
- cache.add(stack_check);
- cache.add(sanitize_c);
- cache.add(valgrind);
- cache.add(link_mode);
- cache.add(options.strip);
- cache.add(single_threaded);
+ // We do this even if outputting to the current directory because we need somewhere to store
+ // incremental compilation metadata.
+ const cache = try arena.create(std.cache_hash.Cache);
+ cache.* = .{
+ .gpa = gpa,
+ .manifest_dir = try options.zig_cache_directory.handle.makeOpenPath("h", .{}),
+ };
+ errdefer cache.manifest_dir.close();
+
+ // This is shared hasher state common to zig source and all C source files.
+ cache.hash.addBytes(build_options.version);
+ cache.hash.addBytes(options.zig_lib_directory.path orelse ".");
+ cache.hash.add(options.optimize_mode);
+ cache.hash.add(options.target.cpu.arch);
+ cache.hash.addBytes(options.target.cpu.model.name);
+ cache.hash.add(options.target.cpu.features.ints);
+ cache.hash.add(options.target.os.tag);
+ cache.hash.add(options.target.abi);
+ cache.hash.add(ofmt);
+ cache.hash.add(pic);
+ cache.hash.add(stack_check);
+ cache.hash.add(link_mode);
+ cache.hash.add(options.strip);
+ cache.hash.add(options.link_libc);
+ cache.hash.add(options.output_mode);
// TODO audit this and make sure everything is in it
- // We don't care whether we find something there, just show us the digest.
- const digest = (try cache.hit()) orelse cache.final();
-
- const artifact_sub_dir = try std.fs.path.join(arena, &[_][]const u8{ "o", &digest });
- var artifact_dir = try options.zig_cache_directory.handle.makeOpenPath(artifact_sub_dir, .{});
- errdefer artifact_dir.close();
- const zig_cache_artifact_directory: Directory = .{
- .handle = artifact_dir,
- .path = if (options.zig_cache_directory.path) |p|
- try std.fs.path.join(arena, &[_][]const u8{ p, artifact_sub_dir })
- else
- artifact_sub_dir,
+ const zig_module: ?*ZigModule = if (options.root_pkg) |root_pkg| blk: {
+ // Options that are specific to zig source files, that cannot be
+ // modified between incremental updates.
+ var hash = cache.hash;
+
+ hash.add(valgrind);
+ hash.add(single_threaded);
+ switch (options.target.os.getVersionRange()) {
+ .linux => |linux| {
+ hash.add(linux.range.min);
+ hash.add(linux.range.max);
+ hash.add(linux.glibc);
+ },
+ .windows => |windows| {
+ hash.add(windows.min);
+ hash.add(windows.max);
+ },
+ .semver => |semver| {
+ hash.add(semver.min);
+ hash.add(semver.max);
+ },
+ .none => {},
+ }
+
+ const digest = hash.final();
+ const artifact_sub_dir = try std.fs.path.join(arena, &[_][]const u8{ "o", &digest });
+ var artifact_dir = try options.zig_cache_directory.handle.makeOpenPath(artifact_sub_dir, .{});
+ errdefer artifact_dir.close();
+ const zig_cache_artifact_directory: Directory = .{
+ .handle = artifact_dir,
+ .path = if (options.zig_cache_directory.path) |p|
+ try std.fs.path.join(arena, &[_][]const u8{ p, artifact_sub_dir })
+ else
+ artifact_sub_dir,
+ };
+
+ // TODO when we implement serialization and deserialization of incremental compilation metadata,
+ // this is where we would load it. We have open a handle to the directory where
+ // the output either already is, or will be.
+ // However we currently do not have serialization of such metadata, so for now
+ // we set up an empty ZigModule that does the entire compilation fresh.
+
+ const root_scope = rs: {
+ if (mem.endsWith(u8, root_pkg.root_src_path, ".zig")) {
+ const root_scope = try gpa.create(ZigModule.Scope.File);
+ root_scope.* = .{
+ .sub_file_path = root_pkg.root_src_path,
+ .source = .{ .unloaded = {} },
+ .contents = .{ .not_available = {} },
+ .status = .never_loaded,
+ .root_container = .{
+ .file_scope = root_scope,
+ .decls = .{},
+ },
+ };
+ break :rs &root_scope.base;
+ } else if (mem.endsWith(u8, root_pkg.root_src_path, ".zir")) {
+ const root_scope = try gpa.create(ZigModule.Scope.ZIRModule);
+ root_scope.* = .{
+ .sub_file_path = root_pkg.root_src_path,
+ .source = .{ .unloaded = {} },
+ .contents = .{ .not_available = {} },
+ .status = .never_loaded,
+ .decls = .{},
+ };
+ break :rs &root_scope.base;
+ } else {
+ unreachable;
+ }
+ };
+
+ const zig_module = try arena.create(ZigModule);
+ zig_module.* = .{
+ .gpa = gpa,
+ .comp = comp,
+ .root_pkg = root_pkg,
+ .root_scope = root_scope,
+ .zig_cache_artifact_directory = zig_cache_artifact_directory,
+ };
+ break :blk zig_module;
+ } else null;
+ errdefer if (zig_module) |zm| zm.deinit();
+
+ // For resource management purposes.
+ var owned_link_dir: ?std.fs.Dir = null;
+ errdefer if (owned_link_dir) |*dir| dir.close();
+
+ const bin_directory = emit_bin.directory orelse blk: {
+ if (zig_module) |zm| break :blk zm.zig_cache_artifact_directory;
+
+ const digest = cache.hash.peek();
+ const artifact_sub_dir = try std.fs.path.join(arena, &[_][]const u8{ "o", &digest });
+ var artifact_dir = try options.zig_cache_directory.handle.makeOpenPath(artifact_sub_dir, .{});
+ owned_link_dir = artifact_dir;
+ const link_artifact_directory: Directory = .{
+ .handle = artifact_dir,
+ .path = if (options.zig_cache_directory.path) |p|
+ try std.fs.path.join(arena, &[_][]const u8{ p, artifact_sub_dir })
+ else
+ artifact_sub_dir,
+ };
+ break :blk link_artifact_directory;
};
const bin_file = try link.File.openPath(gpa, .{
- .directory = emit_bin.directory orelse zig_cache_artifact_directory,
+ .directory = bin_directory,
.sub_path = emit_bin.basename,
.root_name = root_name,
- .root_pkg = options.root_pkg,
+ .zig_module = zig_module,
.target = options.target,
.dynamic_linker = options.dynamic_linker,
.output_mode = options.output_mode,
@@ -1263,70 +554,33 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Module {
});
errdefer bin_file.destroy();
- // We arena-allocate the root scope so there is no free needed.
- const root_scope = blk: {
- if (options.root_pkg) |root_pkg| {
- if (mem.endsWith(u8, root_pkg.root_src_path, ".zig")) {
- const root_scope = try gpa.create(Scope.File);
- root_scope.* = .{
- .sub_file_path = root_pkg.root_src_path,
- .source = .{ .unloaded = {} },
- .contents = .{ .not_available = {} },
- .status = .never_loaded,
- .root_container = .{
- .file_scope = root_scope,
- .decls = .{},
- },
- };
- break :blk &root_scope.base;
- } else if (mem.endsWith(u8, root_pkg.root_src_path, ".zir")) {
- const root_scope = try gpa.create(Scope.ZIRModule);
- root_scope.* = .{
- .sub_file_path = root_pkg.root_src_path,
- .source = .{ .unloaded = {} },
- .contents = .{ .not_available = {} },
- .status = .never_loaded,
- .decls = .{},
- };
- break :blk &root_scope.base;
- } else {
- unreachable;
- }
- } else {
- const root_scope = try gpa.create(Scope.None);
- root_scope.* = .{};
- break :blk &root_scope.base;
- }
- };
-
- mod.* = .{
+ comp.* = .{
.gpa = gpa,
.arena_state = arena_allocator.state,
.zig_lib_directory = options.zig_lib_directory,
.zig_cache_directory = options.zig_cache_directory,
- .zig_cache_artifact_directory = zig_cache_artifact_directory,
- .root_pkg = options.root_pkg,
- .root_scope = root_scope,
.bin_file = bin_file,
.work_queue = std.fifo.LinearFifo(WorkItem, .Dynamic).init(gpa),
.keep_source_files_loaded = options.keep_source_files_loaded,
.use_clang = use_clang,
.clang_argv = options.clang_argv,
.c_source_files = options.c_source_files,
- .cache = cache,
+ .cache_parent = cache,
.self_exe_path = options.self_exe_path,
.libc_include_dir_list = libc_dirs.libc_include_dir_list,
.sanitize_c = sanitize_c,
.rand = options.rand,
.clang_passthrough_mode = options.clang_passthrough_mode,
.debug_cc = options.debug_cc,
+ .disable_c_depfile = options.disable_c_depfile,
+ .owned_link_dir = owned_link_dir,
};
- break :mod mod;
+ break :comp comp;
};
- errdefer mod.destroy();
+ errdefer comp.destroy();
// Add a `CObject` for each `c_source_files`.
- try mod.c_object_table.ensureCapacity(gpa, options.c_source_files.len);
+ try comp.c_object_table.ensureCapacity(gpa, options.c_source_files.len);
for (options.c_source_files) |c_source_file| {
var local_arena = std.heap.ArenaAllocator.init(gpa);
errdefer local_arena.deinit();
@@ -1335,28 +589,29 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Module {
c_object.* = .{
.status = .{ .new = {} },
- // TODO why are we duplicating this memory? do we need to?
- // look into refactoring to turn these 2 fields simply into a CSourceFile
+ // TODO look into refactoring to turn these 2 fields simply into a CSourceFile
.src_path = try local_arena.allocator.dupe(u8, c_source_file.src_path),
.extra_flags = try local_arena.allocator.dupe([]const u8, c_source_file.extra_flags),
.arena = local_arena.state,
};
- mod.c_object_table.putAssumeCapacityNoClobber(c_object, {});
+ comp.c_object_table.putAssumeCapacityNoClobber(c_object, {});
}
// If we need to build glibc for the target, add work items for it.
// We go through the work queue so that building can be done in parallel.
- if (mod.wantBuildGLibCFromSource()) {
- try mod.addBuildingGLibCWorkItems();
+ if (comp.wantBuildGLibCFromSource()) {
+ try comp.addBuildingGLibCWorkItems();
}
- return mod;
+ return comp;
}
pub fn destroy(self: *Module) void {
+ const optional_zig_module = self.bin_file.options.zig_module;
self.bin_file.destroy();
+ if (optional_zig_module) |zig_module| zig_module.deinit();
+
const gpa = self.gpa;
- self.deletion_set.deinit(gpa);
self.work_queue.deinit();
{
@@ -1368,86 +623,32 @@ pub fn destroy(self: *Module) void {
self.crt_files.deinit(gpa);
}
- for (self.decl_table.items()) |entry| {
- entry.value.destroy(gpa);
- }
- self.decl_table.deinit(gpa);
-
for (self.c_object_table.items()) |entry| {
entry.key.destroy(gpa);
}
self.c_object_table.deinit(gpa);
- for (self.failed_decls.items()) |entry| {
- entry.value.destroy(gpa);
- }
- self.failed_decls.deinit(gpa);
-
for (self.failed_c_objects.items()) |entry| {
entry.value.destroy(gpa);
}
self.failed_c_objects.deinit(gpa);
- for (self.failed_files.items()) |entry| {
- entry.value.destroy(gpa);
- }
- self.failed_files.deinit(gpa);
-
- for (self.failed_exports.items()) |entry| {
- entry.value.destroy(gpa);
- }
- self.failed_exports.deinit(gpa);
-
- for (self.decl_exports.items()) |entry| {
- const export_list = entry.value;
- gpa.free(export_list);
- }
- self.decl_exports.deinit(gpa);
-
- for (self.export_owners.items()) |entry| {
- freeExportList(gpa, entry.value);
- }
- self.export_owners.deinit(gpa);
-
- self.symbol_exports.deinit(gpa);
- self.root_scope.destroy(gpa);
-
- var it = self.global_error_set.iterator();
- while (it.next()) |entry| {
- gpa.free(entry.key);
- }
- self.global_error_set.deinit(gpa);
-
- self.zig_cache_artifact_directory.handle.close();
- self.cache.release();
+ self.cache_parent.manifest_dir.close();
+ if (self.owned_link_dir) |*dir| dir.close();
// This destroys `self`.
self.arena_state.promote(gpa).deinit();
}
-fn freeExportList(gpa: *Allocator, export_list: []*Export) void {
- for (export_list) |exp| {
- gpa.free(exp.options.name);
- gpa.destroy(exp);
- }
- gpa.free(export_list);
-}
-
pub fn getTarget(self: Module) Target {
return self.bin_file.options.target;
}
-pub fn optimizeMode(self: Module) std.builtin.Mode {
- return self.bin_file.options.optimize_mode;
-}
-
/// Detect changes to source files, perform semantic analysis, and update the output files.
pub fn update(self: *Module) !void {
const tracy = trace(@src());
defer tracy.end();
- self.generation += 1;
-
// For compiling C objects, we rely on the cache hash system to avoid duplicating work.
// TODO Look into caching this data in memory to improve performance.
// Add a WorkItem for each C object.
@@ -1456,36 +657,42 @@ pub fn update(self: *Module) !void {
self.work_queue.writeItemAssumeCapacity(.{ .c_object = entry.key });
}
- // TODO Detect which source files changed.
- // Until then we simulate a full cache miss. Source files could have been loaded for any reason;
- // to force a refresh we unload now.
- if (self.root_scope.cast(Scope.File)) |zig_file| {
- zig_file.unload(self.gpa);
- self.analyzeContainer(&zig_file.root_container) catch |err| switch (err) {
- error.AnalysisFail => {
- assert(self.totalErrorCount() != 0);
- },
- else => |e| return e,
- };
- } else if (self.root_scope.cast(Scope.ZIRModule)) |zir_module| {
- zir_module.unload(self.gpa);
- self.analyzeRootZIRModule(zir_module) catch |err| switch (err) {
- error.AnalysisFail => {
- assert(self.totalErrorCount() != 0);
- },
- else => |e| return e,
- };
+ if (self.bin_file.options.zig_module) |zig_module| {
+ zig_module.generation += 1;
+
+ // TODO Detect which source files changed.
+ // Until then we simulate a full cache miss. Source files could have been loaded for any reason;
+ // to force a refresh we unload now.
+ if (zig_module.root_scope.cast(ZigModule.Scope.File)) |zig_file| {
+ zig_file.unload(zig_module.gpa);
+ zig_module.analyzeContainer(&zig_file.root_container) catch |err| switch (err) {
+ error.AnalysisFail => {
+ assert(self.totalErrorCount() != 0);
+ },
+ else => |e| return e,
+ };
+ } else if (zig_module.root_scope.cast(ZigModule.Scope.ZIRModule)) |zir_module| {
+ zir_module.unload(zig_module.gpa);
+ zig_module.analyzeRootZIRModule(zir_module) catch |err| switch (err) {
+ error.AnalysisFail => {
+ assert(self.totalErrorCount() != 0);
+ },
+ else => |e| return e,
+ };
+ }
}
try self.performAllTheWork();
- // Process the deletion set.
- while (self.deletion_set.popOrNull()) |decl| {
- if (decl.dependants.items().len != 0) {
- decl.deletion_flag = false;
- continue;
+ if (self.bin_file.options.zig_module) |zig_module| {
+ // Process the deletion set.
+ while (zig_module.deletion_set.popOrNull()) |decl| {
+ if (decl.dependants.items().len != 0) {
+ decl.deletion_flag = false;
+ continue;
+ }
+ try zig_module.deleteDecl(decl);
}
- try self.deleteDecl(decl);
}
// This is needed before reading the error flags.
@@ -1496,7 +703,9 @@ pub fn update(self: *Module) !void {
// If there are any errors, we anticipate the source files being loaded
// to report error messages. Otherwise we unload all source files to save memory.
if (self.totalErrorCount() == 0 and !self.keep_source_files_loaded) {
- self.root_scope.unload(self.gpa);
+ if (self.bin_file.options.zig_module) |zig_module| {
+ zig_module.root_scope.unload(self.gpa);
+ }
}
}
@@ -1513,11 +722,20 @@ pub fn makeBinFileWritable(self: *Module) !void {
}
pub fn totalErrorCount(self: *Module) usize {
- const total = self.failed_decls.items().len +
- self.failed_c_objects.items().len +
- self.failed_files.items().len +
- self.failed_exports.items().len;
- return if (total == 0) @boolToInt(self.link_error_flags.no_entry_point_found) else total;
+ var total: usize = self.failed_c_objects.items().len;
+
+ if (self.bin_file.options.zig_module) |zig_module| {
+ total += zig_module.failed_decls.items().len +
+ zig_module.failed_exports.items().len +
+ zig_module.failed_files.items().len;
+ }
+
+ // The "no entry point found" error only counts if there are no other errors.
+ if (total == 0) {
+ return @boolToInt(self.link_error_flags.no_entry_point_found);
+ }
+
+ return total;
}
pub fn getAllErrorsAlloc(self: *Module) !AllErrors {
@@ -1530,31 +748,32 @@ pub fn getAllErrorsAlloc(self: *Module) !AllErrors {
for (self.failed_c_objects.items()) |entry| {
const c_object = entry.key;
const err_msg = entry.value;
- const source = c_object.status.failure;
- try AllErrors.add(&arena, &errors, c_object.src_path, source, err_msg.*);
- }
- for (self.failed_files.items()) |entry| {
- const scope = entry.key;
- const err_msg = entry.value;
- const source = try scope.getSource(self);
- try AllErrors.add(&arena, &errors, scope.subFilePath(), source, err_msg.*);
- }
- for (self.failed_decls.items()) |entry| {
- const decl = entry.key;
- const err_msg = entry.value;
- const source = try decl.scope.getSource(self);
- try AllErrors.add(&arena, &errors, decl.scope.subFilePath(), source, err_msg.*);
- }
- for (self.failed_exports.items()) |entry| {
- const decl = entry.key.owner_decl;
- const err_msg = entry.value;
- const source = try decl.scope.getSource(self);
- try AllErrors.add(&arena, &errors, decl.scope.subFilePath(), source, err_msg.*);
+ try AllErrors.add(&arena, &errors, c_object.src_path, "", err_msg.*);
+ }
+ if (self.bin_file.options.zig_module) |zig_module| {
+ for (zig_module.failed_files.items()) |entry| {
+ const scope = entry.key;
+ const err_msg = entry.value;
+ const source = try scope.getSource(zig_module);
+ try AllErrors.add(&arena, &errors, scope.subFilePath(), source, err_msg.*);
+ }
+ for (zig_module.failed_decls.items()) |entry| {
+ const decl = entry.key;
+ const err_msg = entry.value;
+ const source = try decl.scope.getSource(zig_module);
+ try AllErrors.add(&arena, &errors, decl.scope.subFilePath(), source, err_msg.*);
+ }
+ for (zig_module.failed_exports.items()) |entry| {
+ const decl = entry.key.owner_decl;
+ const err_msg = entry.value;
+ const source = try decl.scope.getSource(zig_module);
+ try AllErrors.add(&arena, &errors, decl.scope.subFilePath(), source, err_msg.*);
+ }
}
if (errors.items.len == 0 and self.link_error_flags.no_entry_point_found) {
const global_err_src_path = blk: {
- if (self.root_pkg) |root_pkg| break :blk root_pkg.root_src_path;
+ if (self.bin_file.options.zig_module) |zig_module| break :blk zig_module.root_pkg.root_src_path;
if (self.c_source_files.len != 0) break :blk self.c_source_files[0].src_path;
if (self.bin_file.options.objects.len != 0) break :blk self.bin_file.options.objects[0];
break :blk "(no file)";
@@ -1590,9 +809,10 @@ pub fn performAllTheWork(self: *Module) error{OutOfMemory}!void {
=> continue,
.complete, .codegen_failure_retryable => {
+ const zig_module = self.bin_file.options.zig_module.?;
if (decl.typed_value.most_recent.typed_value.val.cast(Value.Payload.Function)) |payload| {
switch (payload.func.analysis) {
- .queued => self.analyzeFnBody(decl, payload.func) catch |err| switch (err) {
+ .queued => zig_module.analyzeFnBody(decl, payload.func) catch |err| switch (err) {
error.AnalysisFail => {
assert(payload.func.analysis != .in_progress);
continue;
@@ -1605,23 +825,23 @@ pub fn performAllTheWork(self: *Module) error{OutOfMemory}!void {
}
// Here we tack on additional allocations to the Decl's arena. The allocations are
// lifetime annotations in the ZIR.
- var decl_arena = decl.typed_value.most_recent.arena.?.promote(self.gpa);
+ var decl_arena = decl.typed_value.most_recent.arena.?.promote(zig_module.gpa);
defer decl.typed_value.most_recent.arena.?.* = decl_arena.state;
log.debug("analyze liveness of {}\n", .{decl.name});
- try liveness.analyze(self.gpa, &decl_arena.allocator, payload.func.analysis.success);
+ try liveness.analyze(zig_module.gpa, &decl_arena.allocator, payload.func.analysis.success);
}
assert(decl.typed_value.most_recent.typed_value.ty.hasCodeGenBits());
- self.bin_file.updateDecl(self, decl) catch |err| switch (err) {
+ self.bin_file.updateDecl(zig_module, decl) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => {
decl.analysis = .dependency_failure;
},
else => {
- try self.failed_decls.ensureCapacity(self.gpa, self.failed_decls.items().len + 1);
- self.failed_decls.putAssumeCapacityNoClobber(decl, try ErrorMsg.create(
- self.gpa,
+ try zig_module.failed_decls.ensureCapacity(zig_module.gpa, zig_module.failed_decls.items().len + 1);
+ zig_module.failed_decls.putAssumeCapacityNoClobber(decl, try ErrorMsg.create(
+ zig_module.gpa,
decl.src(),
"unable to codegen: {}",
.{@errorName(err)},
@@ -1632,16 +852,18 @@ pub fn performAllTheWork(self: *Module) error{OutOfMemory}!void {
},
},
.analyze_decl => |decl| {
- self.ensureDeclAnalyzed(decl) catch |err| switch (err) {
+ const zig_module = self.bin_file.options.zig_module.?;
+ zig_module.ensureDeclAnalyzed(decl) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.AnalysisFail => continue,
};
},
.update_line_number => |decl| {
- self.bin_file.updateDeclLineNumber(self, decl) catch |err| {
- try self.failed_decls.ensureCapacity(self.gpa, self.failed_decls.items().len + 1);
- self.failed_decls.putAssumeCapacityNoClobber(decl, try ErrorMsg.create(
- self.gpa,
+ const zig_module = self.bin_file.options.zig_module.?;
+ self.bin_file.updateDeclLineNumber(zig_module, decl) catch |err| {
+ try zig_module.failed_decls.ensureCapacity(zig_module.gpa, zig_module.failed_decls.items().len + 1);
+ zig_module.failed_decls.putAssumeCapacityNoClobber(decl, try ErrorMsg.create(
+ zig_module.gpa,
decl.src(),
"unable to update line number: {}",
.{@errorName(err)},
@@ -1650,21 +872,7 @@ pub fn performAllTheWork(self: *Module) error{OutOfMemory}!void {
};
},
.c_object => |c_object| {
- // Free the previous attempt.
- switch (c_object.status) {
- .new => {},
- .success => |o_file_path| {
- self.gpa.free(o_file_path);
- c_object.status = .{ .new = {} };
- },
- .failure => |source| {
- self.failed_c_objects.removeAssertDiscard(c_object);
- self.gpa.free(source);
-
- c_object.status = .{ .new = {} };
- },
- }
- self.buildCObject(c_object) catch |err| switch (err) {
+ self.updateCObject(c_object) catch |err| switch (err) {
error.AnalysisFail => continue,
else => {
try self.failed_c_objects.ensureCapacity(self.gpa, self.failed_c_objects.items().len + 1);
@@ -1674,7 +882,7 @@ pub fn performAllTheWork(self: *Module) error{OutOfMemory}!void {
"unable to build C object: {}",
.{@errorName(err)},
));
- c_object.status = .{ .failure = "" };
+ c_object.status = .{ .failure = {} };
},
};
},
@@ -1692,127 +900,175 @@ pub fn performAllTheWork(self: *Module) error{OutOfMemory}!void {
};
}
-fn buildCObject(mod: *Module, c_object: *CObject) !void {
+fn updateCObject(comp: *Compilation, c_object: *CObject) !void {
const tracy = trace(@src());
defer tracy.end();
- // TODO this C source file needs its own cache hash instance
-
if (!build_options.have_llvm) {
- return mod.failCObj(c_object, "clang not available: compiler not built with LLVM extensions enabled", .{});
+ return comp.failCObj(c_object, "clang not available: compiler not built with LLVM extensions enabled", .{});
+ }
+ const self_exe_path = comp.self_exe_path orelse
+ return comp.failCObj(c_object, "clang compilation disabled", .{});
+
+ if (c_object.clearStatus(comp.gpa)) {
+ // There was previous failure.
+ comp.failed_c_objects.removeAssertDiscard(c_object);
+ }
+
+ var ch = comp.cache_parent.obtain();
+ defer ch.deinit();
+
+ ch.hash.add(comp.sanitize_c);
+ ch.hash.addListOfBytes(comp.clang_argv);
+ ch.hash.add(comp.bin_file.options.link_libcpp);
+ ch.hash.addListOfBytes(comp.libc_include_dir_list);
+ // TODO
+ //cache_int(cache_hash, g->code_model);
+ //cache_bool(cache_hash, codegen_have_frame_pointer(g));
+ _ = try ch.addFile(c_object.src_path, null);
+ {
+ // Hash the extra flags, with special care to call addFile for file parameters.
+ // TODO this logic can likely be improved by utilizing clang_options_data.zig.
+ const file_args = [_][]const u8{"-include"};
+ var arg_i: usize = 0;
+ while (arg_i < c_object.extra_flags.len) : (arg_i += 1) {
+ const arg = c_object.extra_flags[arg_i];
+ ch.hash.addBytes(arg);
+ for (file_args) |file_arg| {
+ if (mem.eql(u8, file_arg, arg) and arg_i + 1 < c_object.extra_flags.len) {
+ arg_i += 1;
+ _ = try ch.addFile(c_object.extra_flags[arg_i], null);
+ }
+ }
+ }
}
- const self_exe_path = mod.self_exe_path orelse
- return mod.failCObj(c_object, "clang compilation disabled", .{});
- var arena_allocator = std.heap.ArenaAllocator.init(mod.gpa);
+ var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
defer arena_allocator.deinit();
const arena = &arena_allocator.allocator;
- var argv = std.ArrayList([]const u8).init(mod.gpa);
- defer argv.deinit();
-
const c_source_basename = std.fs.path.basename(c_object.src_path);
// Special case when doing build-obj for just one C file. When there are more than one object
// file and building an object we need to link them together, but with just one it should go
// directly to the output file.
- const direct_o = mod.c_source_files.len == 1 and mod.root_pkg == null and
- mod.bin_file.options.output_mode == .Obj and mod.bin_file.options.objects.len == 0;
+ const direct_o = comp.c_source_files.len == 1 and comp.bin_file.options.zig_module == null and
+ comp.bin_file.options.output_mode == .Obj and comp.bin_file.options.objects.len == 0;
const o_basename_noext = if (direct_o)
- mod.bin_file.options.root_name
+ comp.bin_file.options.root_name
else
mem.split(c_source_basename, ".").next().?;
- const o_basename = try std.fmt.allocPrint(arena, "{}{}", .{ o_basename_noext, mod.getTarget().oFileExt() });
+ const o_basename = try std.fmt.allocPrint(arena, "{}{}", .{ o_basename_noext, comp.getTarget().oFileExt() });
- // We can't know the digest until we do the C compiler invocation, so we need a temporary filename.
- const out_obj_path = try mod.tmpFilePath(arena, o_basename);
+ const full_object_path = if (!(try ch.hit()) or comp.disable_c_depfile) blk: {
+ var argv = std.ArrayList([]const u8).init(comp.gpa);
+ defer argv.deinit();
- var zig_cache_tmp_dir = try mod.zig_cache_directory.handle.makeOpenPath("tmp", .{});
- defer zig_cache_tmp_dir.close();
+ // We can't know the digest until we do the C compiler invocation, so we need a temporary filename.
+ const out_obj_path = try comp.tmpFilePath(arena, o_basename);
- try argv.appendSlice(&[_][]const u8{ self_exe_path, "clang", "-c" });
+ try argv.appendSlice(&[_][]const u8{ self_exe_path, "clang", "-c" });
- const ext = classifyFileExt(c_object.src_path);
- // TODO capture the .d file and deal with caching stuff
- try mod.addCCArgs(arena, &argv, ext, false, null);
+ const ext = classifyFileExt(c_object.src_path);
+ // TODO capture the .d file and deal with caching stuff
+ try comp.addCCArgs(arena, &argv, ext, false, null);
- try argv.append("-o");
- try argv.append(out_obj_path);
+ try argv.append("-o");
+ try argv.append(out_obj_path);
- try argv.append(c_object.src_path);
- try argv.appendSlice(c_object.extra_flags);
+ try argv.append(c_object.src_path);
+ try argv.appendSlice(c_object.extra_flags);
- if (mod.debug_cc) {
- for (argv.items[0 .. argv.items.len - 1]) |arg| {
- std.debug.print("{} ", .{arg});
+ if (comp.debug_cc) {
+ for (argv.items[0 .. argv.items.len - 1]) |arg| {
+ std.debug.print("{} ", .{arg});
+ }
+ std.debug.print("{}\n", .{argv.items[argv.items.len - 1]});
}
- std.debug.print("{}\n", .{argv.items[argv.items.len - 1]});
- }
- const child = try std.ChildProcess.init(argv.items, arena);
- defer child.deinit();
+ const child = try std.ChildProcess.init(argv.items, arena);
+ defer child.deinit();
- if (mod.clang_passthrough_mode) {
- child.stdin_behavior = .Inherit;
- child.stdout_behavior = .Inherit;
- child.stderr_behavior = .Inherit;
+ if (comp.clang_passthrough_mode) {
+ child.stdin_behavior = .Inherit;
+ child.stdout_behavior = .Inherit;
+ child.stderr_behavior = .Inherit;
- const term = child.spawnAndWait() catch |err| {
- return mod.failCObj(c_object, "unable to spawn {}: {}", .{ argv.items[0], @errorName(err) });
- };
- switch (term) {
- .Exited => |code| {
- if (code != 0) {
- // TODO make std.process.exit and std.ChildProcess exit code have the same type
- // and forward it here. Currently it is u32 vs u8.
- std.process.exit(1);
- }
- },
- else => std.process.exit(1),
- }
- } else {
- child.stdin_behavior = .Ignore;
- child.stdout_behavior = .Pipe;
- child.stderr_behavior = .Pipe;
+ const term = child.spawnAndWait() catch |err| {
+ return comp.failCObj(c_object, "unable to spawn {}: {}", .{ argv.items[0], @errorName(err) });
+ };
+ switch (term) {
+ .Exited => |code| {
+ if (code != 0) {
+ // TODO make std.process.exit and std.ChildProcess exit code have the same type
+ // and forward it here. Currently it is u32 vs u8.
+ std.process.exit(1);
+ }
+ },
+ else => std.process.exit(1),
+ }
+ } else {
+ child.stdin_behavior = .Ignore;
+ child.stdout_behavior = .Pipe;
+ child.stderr_behavior = .Pipe;
- try child.spawn();
+ try child.spawn();
- const stdout_reader = child.stdout.?.reader();
- const stderr_reader = child.stderr.?.reader();
+ const stdout_reader = child.stdout.?.reader();
+ const stderr_reader = child.stderr.?.reader();
- // TODO Need to poll to read these streams to prevent a deadlock (or rely on evented I/O).
- const stdout = try stdout_reader.readAllAlloc(arena, std.math.maxInt(u32));
- const stderr = try stderr_reader.readAllAlloc(arena, 10 * 1024 * 1024);
+ // TODO Need to poll to read these streams to prevent a deadlock (or rely on evented I/O).
+ const stdout = try stdout_reader.readAllAlloc(arena, std.math.maxInt(u32));
+ const stderr = try stderr_reader.readAllAlloc(arena, 10 * 1024 * 1024);
- const term = child.wait() catch |err| {
- return mod.failCObj(c_object, "unable to spawn {}: {}", .{ argv.items[0], @errorName(err) });
- };
+ const term = child.wait() catch |err| {
+ return comp.failCObj(c_object, "unable to spawn {}: {}", .{ argv.items[0], @errorName(err) });
+ };
- switch (term) {
- .Exited => |code| {
- if (code != 0) {
- // TODO parse clang stderr and turn it into an error message
- // and then call failCObjWithOwnedErrorMsg
- std.log.err("clang failed with stderr: {}", .{stderr});
- return mod.failCObj(c_object, "clang exited with code {}", .{code});
- }
- },
- else => {
- std.log.err("clang terminated with stderr: {}", .{stderr});
- return mod.failCObj(c_object, "clang terminated unexpectedly", .{});
- },
+ switch (term) {
+ .Exited => |code| {
+ if (code != 0) {
+ // TODO parse clang stderr and turn it into an error message
+ // and then call failCObjWithOwnedErrorMsg
+ std.log.err("clang failed with stderr: {}", .{stderr});
+ return comp.failCObj(c_object, "clang exited with code {}", .{code});
+ }
+ },
+ else => {
+ std.log.err("clang terminated with stderr: {}", .{stderr});
+ return comp.failCObj(c_object, "clang terminated unexpectedly", .{});
+ },
+ }
}
- }
- // TODO handle .d files
+ // TODO handle .d files
- // TODO Add renameat capabilities to the std lib in a higher layer than the posix layer.
- const tmp_basename = std.fs.path.basename(out_obj_path);
- try std.os.renameat(zig_cache_tmp_dir.fd, tmp_basename, mod.zig_cache_artifact_directory.handle.fd, o_basename);
+ // Rename into place.
+ const digest = ch.final();
+ const full_object_path = if (comp.zig_cache_directory.path) |p|
+ try std.fs.path.join(arena, &[_][]const u8{ p, "o", &digest, o_basename })
+ else
+ try std.fs.path.join(arena, &[_][]const u8{ "o", &digest, o_basename });
+ try std.fs.rename(out_obj_path, full_object_path);
- const success_file_path = try std.fs.path.join(mod.gpa, &[_][]const u8{
- mod.zig_cache_artifact_directory.path.?, o_basename,
- });
- c_object.status = .{ .success = success_file_path };
+ ch.writeManifest() catch |err| {
+ std.log.warn("failed to write cache manifest when compiling '{}': {}", .{ c_object.src_path, @errorName(err) });
+ };
+ break :blk full_object_path;
+ } else blk: {
+ const digest = ch.final();
+ const full_object_path = if (comp.zig_cache_directory.path) |p|
+ try std.fs.path.join(arena, &[_][]const u8{ p, "o", &digest, o_basename })
+ else
+ try std.fs.path.join(arena, &[_][]const u8{ "o", &digest, o_basename });
+ break :blk full_object_path;
+ };
+
+ c_object.status = .{
+ .success = .{
+ .object_path = full_object_path,
+ .lock = ch.toOwnedLock(),
+ },
+ };
}
fn tmpFilePath(mod: *Module, arena: *Allocator, suffix: []const u8) error{OutOfMemory}![]const u8 {
@@ -2016,2041 +1272,37 @@ fn addCCArgs(
try argv.appendSlice(mod.clang_argv);
}
-pub fn ensureDeclAnalyzed(self: *Module, decl: *Decl) InnerError!void {
- const tracy = trace(@src());
- defer tracy.end();
-
- const subsequent_analysis = switch (decl.analysis) {
- .in_progress => unreachable,
-
- .sema_failure,
- .sema_failure_retryable,
- .codegen_failure,
- .dependency_failure,
- .codegen_failure_retryable,
- => return error.AnalysisFail,
-
- .complete => return,
-
- .outdated => blk: {
- log.debug("re-analyzing {}\n", .{decl.name});
-
- // The exports this Decl performs will be re-discovered, so we remove them here
- // prior to re-analysis.
- self.deleteDeclExports(decl);
- // Dependencies will be re-discovered, so we remove them here prior to re-analysis.
- for (decl.dependencies.items()) |entry| {
- const dep = entry.key;
- dep.removeDependant(decl);
- if (dep.dependants.items().len == 0 and !dep.deletion_flag) {
- // We don't perform a deletion here, because this Decl or another one
- // may end up referencing it before the update is complete.
- dep.deletion_flag = true;
- try self.deletion_set.append(self.gpa, dep);
- }
- }
- decl.dependencies.clearRetainingCapacity();
-
- break :blk true;
- },
-
- .unreferenced => false,
- };
-
- const type_changed = if (self.root_scope.cast(Scope.ZIRModule)) |zir_module|
- try zir_sema.analyzeZirDecl(self, decl, zir_module.contents.module.decls[decl.src_index])
- else
- self.astGenAndAnalyzeDecl(decl) catch |err| switch (err) {
- error.OutOfMemory => return error.OutOfMemory,
- error.AnalysisFail => return error.AnalysisFail,
- else => {
- try self.failed_decls.ensureCapacity(self.gpa, self.failed_decls.items().len + 1);
- self.failed_decls.putAssumeCapacityNoClobber(decl, try ErrorMsg.create(
- self.gpa,
- decl.src(),
- "unable to analyze: {}",
- .{@errorName(err)},
- ));
- decl.analysis = .sema_failure_retryable;
- return error.AnalysisFail;
- },
- };
-
- if (subsequent_analysis) {
- // We may need to chase the dependants and re-analyze them.
- // However, if the decl is a function, and the type is the same, we do not need to.
- if (type_changed or decl.typed_value.most_recent.typed_value.val.tag() != .function) {
- for (decl.dependants.items()) |entry| {
- const dep = entry.key;
- switch (dep.analysis) {
- .unreferenced => unreachable,
- .in_progress => unreachable,
- .outdated => continue, // already queued for update
-
- .dependency_failure,
- .sema_failure,
- .sema_failure_retryable,
- .codegen_failure,
- .codegen_failure_retryable,
- .complete,
- => if (dep.generation != self.generation) {
- try self.markOutdatedDecl(dep);
- },
- }
- }
- }
- }
+fn failCObj(mod: *Module, c_object: *CObject, comptime format: []const u8, args: anytype) InnerError {
+ @setCold(true);
+ const err_msg = try ErrorMsg.create(mod.gpa, 0, "unable to build C object: " ++ format, args);
+ return mod.failCObjWithOwnedErrorMsg(c_object, err_msg);
}
-fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool {
- const tracy = trace(@src());
- defer tracy.end();
-
- const container_scope = decl.scope.cast(Scope.Container).?;
- const tree = try self.getAstTree(container_scope);
- const ast_node = tree.root_node.decls()[decl.src_index];
- switch (ast_node.tag) {
- .FnProto => {
- const fn_proto = @fieldParentPtr(ast.Node.FnProto, "base", ast_node);
-
- decl.analysis = .in_progress;
-
- // This arena allocator's memory is discarded at the end of this function. It is used
- // to determine the type of the function, and hence the type of the decl, which is needed
- // to complete the Decl analysis.
- var fn_type_scope_arena = std.heap.ArenaAllocator.init(self.gpa);
- defer fn_type_scope_arena.deinit();
- var fn_type_scope: Scope.GenZIR = .{
- .decl = decl,
- .arena = &fn_type_scope_arena.allocator,
- .parent = decl.scope,
- };
- defer fn_type_scope.instructions.deinit(self.gpa);
-
- decl.is_pub = fn_proto.getVisibToken() != null;
- const body_node = fn_proto.getBodyNode() orelse
- return self.failTok(&fn_type_scope.base, fn_proto.fn_token, "TODO implement extern functions", .{});
-
- const param_decls = fn_proto.params();
- const param_types = try fn_type_scope.arena.alloc(*zir.Inst, param_decls.len);
-
- const fn_src = tree.token_locs[fn_proto.fn_token].start;
- const type_type = try astgen.addZIRInstConst(self, &fn_type_scope.base, fn_src, .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.type_type),
- });
- const type_type_rl: astgen.ResultLoc = .{ .ty = type_type };
- for (param_decls) |param_decl, i| {
- const param_type_node = switch (param_decl.param_type) {
- .any_type => |node| return self.failNode(&fn_type_scope.base, node, "TODO implement anytype parameter", .{}),
- .type_expr => |node| node,
- };
- param_types[i] = try astgen.expr(self, &fn_type_scope.base, type_type_rl, param_type_node);
- }
- if (fn_proto.getVarArgsToken()) |var_args_token| {
- return self.failTok(&fn_type_scope.base, var_args_token, "TODO implement var args", .{});
- }
- if (fn_proto.getLibName()) |lib_name| {
- return self.failNode(&fn_type_scope.base, lib_name, "TODO implement function library name", .{});
- }
- if (fn_proto.getAlignExpr()) |align_expr| {
- return self.failNode(&fn_type_scope.base, align_expr, "TODO implement function align expression", .{});
- }
- if (fn_proto.getSectionExpr()) |sect_expr| {
- return self.failNode(&fn_type_scope.base, sect_expr, "TODO implement function section expression", .{});
- }
- if (fn_proto.getCallconvExpr()) |callconv_expr| {
- return self.failNode(
- &fn_type_scope.base,
- callconv_expr,
- "TODO implement function calling convention expression",
- .{},
- );
- }
- const return_type_expr = switch (fn_proto.return_type) {
- .Explicit => |node| node,
- .InferErrorSet => |node| return self.failNode(&fn_type_scope.base, node, "TODO implement inferred error sets", .{}),
- .Invalid => |tok| return self.failTok(&fn_type_scope.base, tok, "unable to parse return type", .{}),
- };
-
- const return_type_inst = try astgen.expr(self, &fn_type_scope.base, type_type_rl, return_type_expr);
- const fn_type_inst = try astgen.addZIRInst(self, &fn_type_scope.base, fn_src, zir.Inst.FnType, .{
- .return_type = return_type_inst,
- .param_types = param_types,
- }, .{});
-
- // We need the memory for the Type to go into the arena for the Decl
- var decl_arena = std.heap.ArenaAllocator.init(self.gpa);
- errdefer decl_arena.deinit();
- const decl_arena_state = try decl_arena.allocator.create(std.heap.ArenaAllocator.State);
-
- var block_scope: Scope.Block = .{
- .parent = null,
- .func = null,
- .decl = decl,
- .instructions = .{},
- .arena = &decl_arena.allocator,
- .is_comptime = false,
- };
- defer block_scope.instructions.deinit(self.gpa);
-
- const fn_type = try zir_sema.analyzeBodyValueAsType(self, &block_scope, fn_type_inst, .{
- .instructions = fn_type_scope.instructions.items,
- });
- const new_func = try decl_arena.allocator.create(Fn);
- const fn_payload = try decl_arena.allocator.create(Value.Payload.Function);
-
- const fn_zir = blk: {
- // This scope's arena memory is discarded after the ZIR generation
- // pass completes, and semantic analysis of it completes.
- var gen_scope_arena = std.heap.ArenaAllocator.init(self.gpa);
- errdefer gen_scope_arena.deinit();
- var gen_scope: Scope.GenZIR = .{
- .decl = decl,
- .arena = &gen_scope_arena.allocator,
- .parent = decl.scope,
- };
- defer gen_scope.instructions.deinit(self.gpa);
-
- // We need an instruction for each parameter, and they must be first in the body.
- try gen_scope.instructions.resize(self.gpa, fn_proto.params_len);
- var params_scope = &gen_scope.base;
- for (fn_proto.params()) |param, i| {
- const name_token = param.name_token.?;
- const src = tree.token_locs[name_token].start;
- const param_name = tree.tokenSlice(name_token); // TODO: call identifierTokenString
- const arg = try gen_scope_arena.allocator.create(zir.Inst.Arg);
- arg.* = .{
- .base = .{
- .tag = .arg,
- .src = src,
- },
- .positionals = .{
- .name = param_name,
- },
- .kw_args = .{},
- };
- gen_scope.instructions.items[i] = &arg.base;
- const sub_scope = try gen_scope_arena.allocator.create(Scope.LocalVal);
- sub_scope.* = .{
- .parent = params_scope,
- .gen_zir = &gen_scope,
- .name = param_name,
- .inst = &arg.base,
- };
- params_scope = &sub_scope.base;
- }
-
- const body_block = body_node.cast(ast.Node.Block).?;
-
- try astgen.blockExpr(self, params_scope, body_block);
-
- if (gen_scope.instructions.items.len == 0 or
- !gen_scope.instructions.items[gen_scope.instructions.items.len - 1].tag.isNoReturn())
- {
- const src = tree.token_locs[body_block.rbrace].start;
- _ = try astgen.addZIRNoOp(self, &gen_scope.base, src, .returnvoid);
- }
-
- const fn_zir = try gen_scope_arena.allocator.create(Fn.ZIR);
- fn_zir.* = .{
- .body = .{
- .instructions = try gen_scope.arena.dupe(*zir.Inst, gen_scope.instructions.items),
- },
- .arena = gen_scope_arena.state,
- };
- break :blk fn_zir;
- };
-
- new_func.* = .{
- .analysis = .{ .queued = fn_zir },
- .owner_decl = decl,
- };
- fn_payload.* = .{ .func = new_func };
-
- var prev_type_has_bits = false;
- var type_changed = true;
-
- if (decl.typedValueManaged()) |tvm| {
- prev_type_has_bits = tvm.typed_value.ty.hasCodeGenBits();
- type_changed = !tvm.typed_value.ty.eql(fn_type);
-
- tvm.deinit(self.gpa);
- }
-
- decl_arena_state.* = decl_arena.state;
- decl.typed_value = .{
- .most_recent = .{
- .typed_value = .{
- .ty = fn_type,
- .val = Value.initPayload(&fn_payload.base),
- },
- .arena = decl_arena_state,
- },
- };
- decl.analysis = .complete;
- decl.generation = self.generation;
-
- if (fn_type.hasCodeGenBits()) {
- // We don't fully codegen the decl until later, but we do need to reserve a global
- // offset table index for it. This allows us to codegen decls out of dependency order,
- // increasing how many computations can be done in parallel.
- try self.bin_file.allocateDeclIndexes(decl);
- try self.work_queue.writeItem(.{ .codegen_decl = decl });
- } else if (prev_type_has_bits) {
- self.bin_file.freeDecl(decl);
- }
-
- if (fn_proto.getExternExportInlineToken()) |maybe_export_token| {
- if (tree.token_ids[maybe_export_token] == .Keyword_export) {
- const export_src = tree.token_locs[maybe_export_token].start;
- const name_loc = tree.token_locs[fn_proto.getNameToken().?];
- const name = tree.tokenSliceLoc(name_loc);
- // The scope needs to have the decl in it.
- try self.analyzeExport(&block_scope.base, export_src, name, decl);
- }
- }
- return type_changed;
- },
- .VarDecl => {
- const var_decl = @fieldParentPtr(ast.Node.VarDecl, "base", ast_node);
-
- decl.analysis = .in_progress;
-
- // We need the memory for the Type to go into the arena for the Decl
- var decl_arena = std.heap.ArenaAllocator.init(self.gpa);
- errdefer decl_arena.deinit();
- const decl_arena_state = try decl_arena.allocator.create(std.heap.ArenaAllocator.State);
-
- var block_scope: Scope.Block = .{
- .parent = null,
- .func = null,
- .decl = decl,
- .instructions = .{},
- .arena = &decl_arena.allocator,
- .is_comptime = true,
- };
- defer block_scope.instructions.deinit(self.gpa);
-
- decl.is_pub = var_decl.getVisibToken() != null;
- const is_extern = blk: {
- const maybe_extern_token = var_decl.getExternExportToken() orelse
- break :blk false;
- if (tree.token_ids[maybe_extern_token] != .Keyword_extern) break :blk false;
- if (var_decl.getInitNode()) |some| {
- return self.failNode(&block_scope.base, some, "extern variables have no initializers", .{});
- }
- break :blk true;
- };
- if (var_decl.getLibName()) |lib_name| {
- assert(is_extern);
- return self.failNode(&block_scope.base, lib_name, "TODO implement function library name", .{});
- }
- const is_mutable = tree.token_ids[var_decl.mut_token] == .Keyword_var;
- const is_threadlocal = if (var_decl.getThreadLocalToken()) |some| blk: {
- if (!is_mutable) {
- return self.failTok(&block_scope.base, some, "threadlocal variable cannot be constant", .{});
- }
- break :blk true;
- } else false;
- assert(var_decl.getComptimeToken() == null);
- if (var_decl.getAlignNode()) |align_expr| {
- return self.failNode(&block_scope.base, align_expr, "TODO implement function align expression", .{});
- }
- if (var_decl.getSectionNode()) |sect_expr| {
- return self.failNode(&block_scope.base, sect_expr, "TODO implement function section expression", .{});
- }
-
- const var_info: struct { ty: Type, val: ?Value } = if (var_decl.getInitNode()) |init_node| vi: {
- var gen_scope_arena = std.heap.ArenaAllocator.init(self.gpa);
- defer gen_scope_arena.deinit();
- var gen_scope: Scope.GenZIR = .{
- .decl = decl,
- .arena = &gen_scope_arena.allocator,
- .parent = decl.scope,
- };
- defer gen_scope.instructions.deinit(self.gpa);
-
- const init_result_loc: astgen.ResultLoc = if (var_decl.getTypeNode()) |type_node| rl: {
- const src = tree.token_locs[type_node.firstToken()].start;
- const type_type = try astgen.addZIRInstConst(self, &gen_scope.base, src, .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.type_type),
- });
- const var_type = try astgen.expr(self, &gen_scope.base, .{ .ty = type_type }, type_node);
- break :rl .{ .ty = var_type };
- } else .none;
-
- const src = tree.token_locs[init_node.firstToken()].start;
- const init_inst = try astgen.expr(self, &gen_scope.base, init_result_loc, init_node);
-
- var inner_block: Scope.Block = .{
- .parent = null,
- .func = null,
- .decl = decl,
- .instructions = .{},
- .arena = &gen_scope_arena.allocator,
- .is_comptime = true,
- };
- defer inner_block.instructions.deinit(self.gpa);
- try zir_sema.analyzeBody(self, &inner_block.base, .{ .instructions = gen_scope.instructions.items });
-
- // The result location guarantees the type coercion.
- const analyzed_init_inst = init_inst.analyzed_inst.?;
- // The is_comptime in the Scope.Block guarantees the result is comptime-known.
- const val = analyzed_init_inst.value().?;
-
- const ty = try analyzed_init_inst.ty.copy(block_scope.arena);
- break :vi .{
- .ty = ty,
- .val = try val.copy(block_scope.arena),
- };
- } else if (!is_extern) {
- return self.failTok(&block_scope.base, var_decl.firstToken(), "variables must be initialized", .{});
- } else if (var_decl.getTypeNode()) |type_node| vi: {
- // Temporary arena for the zir instructions.
- var type_scope_arena = std.heap.ArenaAllocator.init(self.gpa);
- defer type_scope_arena.deinit();
- var type_scope: Scope.GenZIR = .{
- .decl = decl,
- .arena = &type_scope_arena.allocator,
- .parent = decl.scope,
- };
- defer type_scope.instructions.deinit(self.gpa);
-
- const src = tree.token_locs[type_node.firstToken()].start;
- const type_type = try astgen.addZIRInstConst(self, &type_scope.base, src, .{
- .ty = Type.initTag(.type),
- .val = Value.initTag(.type_type),
- });
- const var_type = try astgen.expr(self, &type_scope.base, .{ .ty = type_type }, type_node);
- const ty = try zir_sema.analyzeBodyValueAsType(self, &block_scope, var_type, .{
- .instructions = type_scope.instructions.items,
- });
- break :vi .{
- .ty = ty,
- .val = null,
- };
- } else {
- return self.failTok(&block_scope.base, var_decl.firstToken(), "unable to infer variable type", .{});
- };
-
- if (is_mutable and !var_info.ty.isValidVarType(is_extern)) {
- return self.failTok(&block_scope.base, var_decl.firstToken(), "variable of type '{}' must be const", .{var_info.ty});
- }
-
- var type_changed = true;
- if (decl.typedValueManaged()) |tvm| {
- type_changed = !tvm.typed_value.ty.eql(var_info.ty);
-
- tvm.deinit(self.gpa);
- }
-
- const new_variable = try decl_arena.allocator.create(Var);
- const var_payload = try decl_arena.allocator.create(Value.Payload.Variable);
- new_variable.* = .{
- .owner_decl = decl,
- .init = var_info.val orelse undefined,
- .is_extern = is_extern,
- .is_mutable = is_mutable,
- .is_threadlocal = is_threadlocal,
- };
- var_payload.* = .{ .variable = new_variable };
-
- decl_arena_state.* = decl_arena.state;
- decl.typed_value = .{
- .most_recent = .{
- .typed_value = .{
- .ty = var_info.ty,
- .val = Value.initPayload(&var_payload.base),
- },
- .arena = decl_arena_state,
- },
- };
- decl.analysis = .complete;
- decl.generation = self.generation;
-
- if (var_decl.getExternExportToken()) |maybe_export_token| {
- if (tree.token_ids[maybe_export_token] == .Keyword_export) {
- const export_src = tree.token_locs[maybe_export_token].start;
- const name_loc = tree.token_locs[var_decl.name_token];
- const name = tree.tokenSliceLoc(name_loc);
- // The scope needs to have the decl in it.
- try self.analyzeExport(&block_scope.base, export_src, name, decl);
- }
- }
- return type_changed;
- },
- .Comptime => {
- const comptime_decl = @fieldParentPtr(ast.Node.Comptime, "base", ast_node);
-
- decl.analysis = .in_progress;
-
- // A comptime decl does not store any value so we can just deinit this arena after analysis is done.
- var analysis_arena = std.heap.ArenaAllocator.init(self.gpa);
- defer analysis_arena.deinit();
- var gen_scope: Scope.GenZIR = .{
- .decl = decl,
- .arena = &analysis_arena.allocator,
- .parent = decl.scope,
- };
- defer gen_scope.instructions.deinit(self.gpa);
-
- _ = try astgen.comptimeExpr(self, &gen_scope.base, .none, comptime_decl.expr);
-
- var block_scope: Scope.Block = .{
- .parent = null,
- .func = null,
- .decl = decl,
- .instructions = .{},
- .arena = &analysis_arena.allocator,
- .is_comptime = true,
- };
- defer block_scope.instructions.deinit(self.gpa);
-
- _ = try zir_sema.analyzeBody(self, &block_scope.base, .{
- .instructions = gen_scope.instructions.items,
- });
-
- decl.analysis = .complete;
- decl.generation = self.generation;
- return true;
- },
- .Use => @panic("TODO usingnamespace decl"),
- else => unreachable,
+fn failCObjWithOwnedErrorMsg(mod: *Module, c_object: *CObject, err_msg: *ErrorMsg) InnerError {
+ {
+ errdefer err_msg.destroy(mod.gpa);
+ try mod.failed_c_objects.ensureCapacity(mod.gpa, mod.failed_c_objects.items().len + 1);
}
+ mod.failed_c_objects.putAssumeCapacityNoClobber(c_object, err_msg);
+ c_object.status = .failure;
+ return error.AnalysisFail;
}
-fn declareDeclDependency(self: *Module, depender: *Decl, dependee: *Decl) !void {
- try depender.dependencies.ensureCapacity(self.gpa, depender.dependencies.items().len + 1);
- try dependee.dependants.ensureCapacity(self.gpa, dependee.dependants.items().len + 1);
-
- depender.dependencies.putAssumeCapacity(dependee, {});
- dependee.dependants.putAssumeCapacity(depender, {});
-}
-
-fn getSrcModule(self: *Module, root_scope: *Scope.ZIRModule) !*zir.Module {
- switch (root_scope.status) {
- .never_loaded, .unloaded_success => {
- try self.failed_files.ensureCapacity(self.gpa, self.failed_files.items().len + 1);
-
- const source = try root_scope.getSource(self);
-
- var keep_zir_module = false;
- const zir_module = try self.gpa.create(zir.Module);
- defer if (!keep_zir_module) self.gpa.destroy(zir_module);
-
- zir_module.* = try zir.parse(self.gpa, source);
- defer if (!keep_zir_module) zir_module.deinit(self.gpa);
-
- if (zir_module.error_msg) |src_err_msg| {
- self.failed_files.putAssumeCapacityNoClobber(
- &root_scope.base,
- try ErrorMsg.create(self.gpa, src_err_msg.byte_offset, "{}", .{src_err_msg.msg}),
- );
- root_scope.status = .unloaded_parse_failure;
- return error.AnalysisFail;
- }
-
- root_scope.status = .loaded_success;
- root_scope.contents = .{ .module = zir_module };
- keep_zir_module = true;
-
- return zir_module;
- },
-
- .unloaded_parse_failure,
- .unloaded_sema_failure,
- => return error.AnalysisFail,
+pub const ErrorMsg = struct {
+ byte_offset: usize,
+ msg: []const u8,
- .loaded_success, .loaded_sema_failure => return root_scope.contents.module,
+ pub fn create(gpa: *Allocator, byte_offset: usize, comptime format: []const u8, args: anytype) !*ErrorMsg {
+ const self = try gpa.create(ErrorMsg);
+ errdefer gpa.destroy(self);
+ self.* = try init(gpa, byte_offset, format, args);
+ return self;
}
-}
-
-fn getAstTree(self: *Module, container_scope: *Scope.Container) !*ast.Tree {
- const tracy = trace(@src());
- defer tracy.end();
-
- const root_scope = container_scope.file_scope;
-
- switch (root_scope.status) {
- .never_loaded, .unloaded_success => {
- try self.failed_files.ensureCapacity(self.gpa, self.failed_files.items().len + 1);
-
- const source = try root_scope.getSource(self);
-
- var keep_tree = false;
- const tree = try std.zig.parse(self.gpa, source);
- defer if (!keep_tree) tree.deinit();
-
- if (tree.errors.len != 0) {
- const parse_err = tree.errors[0];
-
- var msg = std.ArrayList(u8).init(self.gpa);
- defer msg.deinit();
-
- try parse_err.render(tree.token_ids, msg.outStream());
- const err_msg = try self.gpa.create(ErrorMsg);
- err_msg.* = .{
- .msg = msg.toOwnedSlice(),
- .byte_offset = tree.token_locs[parse_err.loc()].start,
- };
-
- self.failed_files.putAssumeCapacityNoClobber(&root_scope.base, err_msg);
- root_scope.status = .unloaded_parse_failure;
- return error.AnalysisFail;
- }
-
- root_scope.status = .loaded_success;
- root_scope.contents = .{ .tree = tree };
- keep_tree = true;
- return tree;
- },
-
- .unloaded_parse_failure => return error.AnalysisFail,
-
- .loaded_success => return root_scope.contents.tree,
- }
-}
-
-fn analyzeContainer(self: *Module, container_scope: *Scope.Container) !void {
- const tracy = trace(@src());
- defer tracy.end();
-
- // We may be analyzing it for the first time, or this may be
- // an incremental update. This code handles both cases.
- const tree = try self.getAstTree(container_scope);
- const decls = tree.root_node.decls();
-
- try self.work_queue.ensureUnusedCapacity(decls.len);
- try container_scope.decls.ensureCapacity(self.gpa, decls.len);
-
- // Keep track of the decls that we expect to see in this file so that
- // we know which ones have been deleted.
- var deleted_decls = std.AutoArrayHashMap(*Decl, void).init(self.gpa);
- defer deleted_decls.deinit();
- try deleted_decls.ensureCapacity(container_scope.decls.items().len);
- for (container_scope.decls.items()) |entry| {
- deleted_decls.putAssumeCapacityNoClobber(entry.key, {});
- }
-
- for (decls) |src_decl, decl_i| {
- if (src_decl.cast(ast.Node.FnProto)) |fn_proto| {
- // We will create a Decl for it regardless of analysis status.
- const name_tok = fn_proto.getNameToken() orelse {
- @panic("TODO missing function name");
- };
-
- const name_loc = tree.token_locs[name_tok];
- const name = tree.tokenSliceLoc(name_loc);
- const name_hash = container_scope.fullyQualifiedNameHash(name);
- const contents_hash = std.zig.hashSrc(tree.getNodeSource(src_decl));
- if (self.decl_table.get(name_hash)) |decl| {
- // Update the AST Node index of the decl, even if its contents are unchanged, it may
- // have been re-ordered.
- decl.src_index = decl_i;
- if (deleted_decls.remove(decl) == null) {
- decl.analysis = .sema_failure;
- const err_msg = try ErrorMsg.create(self.gpa, tree.token_locs[name_tok].start, "redefinition of '{}'", .{decl.name});
- errdefer err_msg.destroy(self.gpa);
- try self.failed_decls.putNoClobber(self.gpa, decl, err_msg);
- } else {
- if (!srcHashEql(decl.contents_hash, contents_hash)) {
- try self.markOutdatedDecl(decl);
- decl.contents_hash = contents_hash;
- } else switch (self.bin_file.tag) {
- .coff => {
- // TODO Implement for COFF
- },
- .elf => if (decl.fn_link.elf.len != 0) {
- // TODO Look into detecting when this would be unnecessary by storing enough state
- // in `Decl` to notice that the line number did not change.
- self.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl });
- },
- .macho => {
- // TODO Implement for MachO
- },
- .c, .wasm => {},
- }
- }
- } else {
- const new_decl = try self.createNewDecl(&container_scope.base, name, decl_i, name_hash, contents_hash);
- container_scope.decls.putAssumeCapacity(new_decl, {});
- if (fn_proto.getExternExportInlineToken()) |maybe_export_token| {
- if (tree.token_ids[maybe_export_token] == .Keyword_export) {
- self.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl });
- }
- }
- }
- } else if (src_decl.castTag(.VarDecl)) |var_decl| {
- const name_loc = tree.token_locs[var_decl.name_token];
- const name = tree.tokenSliceLoc(name_loc);
- const name_hash = container_scope.fullyQualifiedNameHash(name);
- const contents_hash = std.zig.hashSrc(tree.getNodeSource(src_decl));
- if (self.decl_table.get(name_hash)) |decl| {
- // Update the AST Node index of the decl, even if its contents are unchanged, it may
- // have been re-ordered.
- decl.src_index = decl_i;
- if (deleted_decls.remove(decl) == null) {
- decl.analysis = .sema_failure;
- const err_msg = try ErrorMsg.create(self.gpa, name_loc.start, "redefinition of '{}'", .{decl.name});
- errdefer err_msg.destroy(self.gpa);
- try self.failed_decls.putNoClobber(self.gpa, decl, err_msg);
- } else if (!srcHashEql(decl.contents_hash, contents_hash)) {
- try self.markOutdatedDecl(decl);
- decl.contents_hash = contents_hash;
- }
- } else {
- const new_decl = try self.createNewDecl(&container_scope.base, name, decl_i, name_hash, contents_hash);
- container_scope.decls.putAssumeCapacity(new_decl, {});
- if (var_decl.getExternExportToken()) |maybe_export_token| {
- if (tree.token_ids[maybe_export_token] == .Keyword_export) {
- self.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl });
- }
- }
- }
- } else if (src_decl.castTag(.Comptime)) |comptime_node| {
- const name_index = self.getNextAnonNameIndex();
- const name = try std.fmt.allocPrint(self.gpa, "__comptime_{}", .{name_index});
- defer self.gpa.free(name);
-
- const name_hash = container_scope.fullyQualifiedNameHash(name);
- const contents_hash = std.zig.hashSrc(tree.getNodeSource(src_decl));
-
- const new_decl = try self.createNewDecl(&container_scope.base, name, decl_i, name_hash, contents_hash);
- container_scope.decls.putAssumeCapacity(new_decl, {});
- self.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl });
- } else if (src_decl.castTag(.ContainerField)) |container_field| {
- log.err("TODO: analyze container field", .{});
- } else if (src_decl.castTag(.TestDecl)) |test_decl| {
- log.err("TODO: analyze test decl", .{});
- } else if (src_decl.castTag(.Use)) |use_decl| {
- log.err("TODO: analyze usingnamespace decl", .{});
- } else {
- unreachable;
- }
- }
- // Handle explicitly deleted decls from the source code. Not to be confused
- // with when we delete decls because they are no longer referenced.
- for (deleted_decls.items()) |entry| {
- log.debug("noticed '{}' deleted from source\n", .{entry.key.name});
- try self.deleteDecl(entry.key);
- }
-}
-
-fn analyzeRootZIRModule(self: *Module, root_scope: *Scope.ZIRModule) !void {
- // We may be analyzing it for the first time, or this may be
- // an incremental update. This code handles both cases.
- const src_module = try self.getSrcModule(root_scope);
-
- try self.work_queue.ensureUnusedCapacity(src_module.decls.len);
- try root_scope.decls.ensureCapacity(self.gpa, src_module.decls.len);
-
- var exports_to_resolve = std.ArrayList(*zir.Decl).init(self.gpa);
- defer exports_to_resolve.deinit();
-
- // Keep track of the decls that we expect to see in this file so that
- // we know which ones have been deleted.
- var deleted_decls = std.AutoArrayHashMap(*Decl, void).init(self.gpa);
- defer deleted_decls.deinit();
- try deleted_decls.ensureCapacity(self.decl_table.items().len);
- for (self.decl_table.items()) |entry| {
- deleted_decls.putAssumeCapacityNoClobber(entry.value, {});
- }
-
- for (src_module.decls) |src_decl, decl_i| {
- const name_hash = root_scope.fullyQualifiedNameHash(src_decl.name);
- if (self.decl_table.get(name_hash)) |decl| {
- deleted_decls.removeAssertDiscard(decl);
- if (!srcHashEql(src_decl.contents_hash, decl.contents_hash)) {
- try self.markOutdatedDecl(decl);
- decl.contents_hash = src_decl.contents_hash;
- }
- } else {
- const new_decl = try self.createNewDecl(
- &root_scope.base,
- src_decl.name,
- decl_i,
- name_hash,
- src_decl.contents_hash,
- );
- root_scope.decls.appendAssumeCapacity(new_decl);
- if (src_decl.inst.cast(zir.Inst.Export)) |export_inst| {
- try exports_to_resolve.append(src_decl);
- }
- }
- }
- for (exports_to_resolve.items) |export_decl| {
- _ = try zir_sema.resolveZirDecl(self, &root_scope.base, export_decl);
- }
- // Handle explicitly deleted decls from the source code. Not to be confused
- // with when we delete decls because they are no longer referenced.
- for (deleted_decls.items()) |entry| {
- log.debug("noticed '{}' deleted from source\n", .{entry.key.name});
- try self.deleteDecl(entry.key);
- }
-}
-
-fn deleteDecl(self: *Module, decl: *Decl) !void {
- try self.deletion_set.ensureCapacity(self.gpa, self.deletion_set.items.len + decl.dependencies.items().len);
-
- // Remove from the namespace it resides in. In the case of an anonymous Decl it will
- // not be present in the set, and this does nothing.
- decl.scope.removeDecl(decl);
-
- log.debug("deleting decl '{}'\n", .{decl.name});
- const name_hash = decl.fullyQualifiedNameHash();
- self.decl_table.removeAssertDiscard(name_hash);
- // Remove itself from its dependencies, because we are about to destroy the decl pointer.
- for (decl.dependencies.items()) |entry| {
- const dep = entry.key;
- dep.removeDependant(decl);
- if (dep.dependants.items().len == 0 and !dep.deletion_flag) {
- // We don't recursively perform a deletion here, because during the update,
- // another reference to it may turn up.
- dep.deletion_flag = true;
- self.deletion_set.appendAssumeCapacity(dep);
- }
- }
- // Anything that depends on this deleted decl certainly needs to be re-analyzed.
- for (decl.dependants.items()) |entry| {
- const dep = entry.key;
- dep.removeDependency(decl);
- if (dep.analysis != .outdated) {
- // TODO Move this failure possibility to the top of the function.
- try self.markOutdatedDecl(dep);
- }
- }
- if (self.failed_decls.remove(decl)) |entry| {
- entry.value.destroy(self.gpa);
- }
- self.deleteDeclExports(decl);
- self.bin_file.freeDecl(decl);
- decl.destroy(self.gpa);
-}
-
-/// Delete all the Export objects that are caused by this Decl. Re-analysis of
-/// this Decl will cause them to be re-created (or not).
-fn deleteDeclExports(self: *Module, decl: *Decl) void {
- const kv = self.export_owners.remove(decl) orelse return;
-
- for (kv.value) |exp| {
- if (self.decl_exports.getEntry(exp.exported_decl)) |decl_exports_kv| {
- // Remove exports with owner_decl matching the regenerating decl.
- const list = decl_exports_kv.value;
- var i: usize = 0;
- var new_len = list.len;
- while (i < new_len) {
- if (list[i].owner_decl == decl) {
- mem.copyBackwards(*Export, list[i..], list[i + 1 .. new_len]);
- new_len -= 1;
- } else {
- i += 1;
- }
- }
- decl_exports_kv.value = self.gpa.shrink(list, new_len);
- if (new_len == 0) {
- self.decl_exports.removeAssertDiscard(exp.exported_decl);
- }
- }
- if (self.bin_file.cast(link.File.Elf)) |elf| {
- elf.deleteExport(exp.link);
- }
- if (self.failed_exports.remove(exp)) |entry| {
- entry.value.destroy(self.gpa);
- }
- _ = self.symbol_exports.remove(exp.options.name);
- self.gpa.free(exp.options.name);
- self.gpa.destroy(exp);
- }
- self.gpa.free(kv.value);
-}
-
-fn analyzeFnBody(self: *Module, decl: *Decl, func: *Fn) !void {
- const tracy = trace(@src());
- defer tracy.end();
-
- // Use the Decl's arena for function memory.
- var arena = decl.typed_value.most_recent.arena.?.promote(self.gpa);
- defer decl.typed_value.most_recent.arena.?.* = arena.state;
- var inner_block: Scope.Block = .{
- .parent = null,
- .func = func,
- .decl = decl,
- .instructions = .{},
- .arena = &arena.allocator,
- .is_comptime = false,
- };
- defer inner_block.instructions.deinit(self.gpa);
-
- const fn_zir = func.analysis.queued;
- defer fn_zir.arena.promote(self.gpa).deinit();
- func.analysis = .{ .in_progress = {} };
- log.debug("set {} to in_progress\n", .{decl.name});
-
- try zir_sema.analyzeBody(self, &inner_block.base, fn_zir.body);
-
- const instructions = try arena.allocator.dupe(*Inst, inner_block.instructions.items);
- func.analysis = .{ .success = .{ .instructions = instructions } };
- log.debug("set {} to success\n", .{decl.name});
-}
-
-fn markOutdatedDecl(self: *Module, decl: *Decl) !void {
- log.debug("mark {} outdated\n", .{decl.name});
- try self.work_queue.writeItem(.{ .analyze_decl = decl });
- if (self.failed_decls.remove(decl)) |entry| {
- entry.value.destroy(self.gpa);
- }
- decl.analysis = .outdated;
-}
-
-fn allocateNewDecl(
- self: *Module,
- scope: *Scope,
- src_index: usize,
- contents_hash: std.zig.SrcHash,
-) !*Decl {
- const new_decl = try self.gpa.create(Decl);
- new_decl.* = .{
- .name = "",
- .scope = scope.namespace(),
- .src_index = src_index,
- .typed_value = .{ .never_succeeded = {} },
- .analysis = .unreferenced,
- .deletion_flag = false,
- .contents_hash = contents_hash,
- .link = switch (self.bin_file.tag) {
- .coff => .{ .coff = link.File.Coff.TextBlock.empty },
- .elf => .{ .elf = link.File.Elf.TextBlock.empty },
- .macho => .{ .macho = link.File.MachO.TextBlock.empty },
- .c => .{ .c = {} },
- .wasm => .{ .wasm = {} },
- },
- .fn_link = switch (self.bin_file.tag) {
- .coff => .{ .coff = {} },
- .elf => .{ .elf = link.File.Elf.SrcFn.empty },
- .macho => .{ .macho = link.File.MachO.SrcFn.empty },
- .c => .{ .c = {} },
- .wasm => .{ .wasm = null },
- },
- .generation = 0,
- .is_pub = false,
- };
- return new_decl;
-}
-
-fn createNewDecl(
- self: *Module,
- scope: *Scope,
- decl_name: []const u8,
- src_index: usize,
- name_hash: Scope.NameHash,
- contents_hash: std.zig.SrcHash,
-) !*Decl {
- try self.decl_table.ensureCapacity(self.gpa, self.decl_table.items().len + 1);
- const new_decl = try self.allocateNewDecl(scope, src_index, contents_hash);
- errdefer self.gpa.destroy(new_decl);
- new_decl.name = try mem.dupeZ(self.gpa, u8, decl_name);
- self.decl_table.putAssumeCapacityNoClobber(name_hash, new_decl);
- return new_decl;
-}
-
-/// Get error value for error tag `name`.
-pub fn getErrorValue(self: *Module, name: []const u8) !std.StringHashMapUnmanaged(u16).Entry {
- const gop = try self.global_error_set.getOrPut(self.gpa, name);
- if (gop.found_existing)
- return gop.entry.*;
- errdefer self.global_error_set.removeAssertDiscard(name);
-
- gop.entry.key = try self.gpa.dupe(u8, name);
- gop.entry.value = @intCast(u16, self.global_error_set.count() - 1);
- return gop.entry.*;
-}
-
-pub fn requireFunctionBlock(self: *Module, scope: *Scope, src: usize) !*Scope.Block {
- return scope.cast(Scope.Block) orelse
- return self.fail(scope, src, "instruction illegal outside function body", .{});
-}
-
-pub fn requireRuntimeBlock(self: *Module, scope: *Scope, src: usize) !*Scope.Block {
- const block = try self.requireFunctionBlock(scope, src);
- if (block.is_comptime) {
- return self.fail(scope, src, "unable to resolve comptime value", .{});
- }
- return block;
-}
-
-pub fn resolveConstValue(self: *Module, scope: *Scope, base: *Inst) !Value {
- return (try self.resolveDefinedValue(scope, base)) orelse
- return self.fail(scope, base.src, "unable to resolve comptime value", .{});
-}
-
-pub fn resolveDefinedValue(self: *Module, scope: *Scope, base: *Inst) !?Value {
- if (base.value()) |val| {
- if (val.isUndef()) {
- return self.fail(scope, base.src, "use of undefined value here causes undefined behavior", .{});
- }
- return val;
- }
- return null;
-}
-
-pub fn analyzeExport(self: *Module, scope: *Scope, src: usize, borrowed_symbol_name: []const u8, exported_decl: *Decl) !void {
- try self.ensureDeclAnalyzed(exported_decl);
- const typed_value = exported_decl.typed_value.most_recent.typed_value;
- switch (typed_value.ty.zigTypeTag()) {
- .Fn => {},
- else => return self.fail(scope, src, "unable to export type '{}'", .{typed_value.ty}),
- }
-
- try self.decl_exports.ensureCapacity(self.gpa, self.decl_exports.items().len + 1);
- try self.export_owners.ensureCapacity(self.gpa, self.export_owners.items().len + 1);
-
- const new_export = try self.gpa.create(Export);
- errdefer self.gpa.destroy(new_export);
-
- const symbol_name = try self.gpa.dupe(u8, borrowed_symbol_name);
- errdefer self.gpa.free(symbol_name);
-
- const owner_decl = scope.decl().?;
-
- new_export.* = .{
- .options = .{ .name = symbol_name },
- .src = src,
- .link = .{},
- .owner_decl = owner_decl,
- .exported_decl = exported_decl,
- .status = .in_progress,
- };
-
- // Add to export_owners table.
- const eo_gop = self.export_owners.getOrPutAssumeCapacity(owner_decl);
- if (!eo_gop.found_existing) {
- eo_gop.entry.value = &[0]*Export{};
- }
- eo_gop.entry.value = try self.gpa.realloc(eo_gop.entry.value, eo_gop.entry.value.len + 1);
- eo_gop.entry.value[eo_gop.entry.value.len - 1] = new_export;
- errdefer eo_gop.entry.value = self.gpa.shrink(eo_gop.entry.value, eo_gop.entry.value.len - 1);
-
- // Add to exported_decl table.
- const de_gop = self.decl_exports.getOrPutAssumeCapacity(exported_decl);
- if (!de_gop.found_existing) {
- de_gop.entry.value = &[0]*Export{};
- }
- de_gop.entry.value = try self.gpa.realloc(de_gop.entry.value, de_gop.entry.value.len + 1);
- de_gop.entry.value[de_gop.entry.value.len - 1] = new_export;
- errdefer de_gop.entry.value = self.gpa.shrink(de_gop.entry.value, de_gop.entry.value.len - 1);
-
- if (self.symbol_exports.get(symbol_name)) |_| {
- try self.failed_exports.ensureCapacity(self.gpa, self.failed_exports.items().len + 1);
- self.failed_exports.putAssumeCapacityNoClobber(new_export, try ErrorMsg.create(
- self.gpa,
- src,
- "exported symbol collision: {}",
- .{symbol_name},
- ));
- // TODO: add a note
- new_export.status = .failed;
- return;
- }
-
- try self.symbol_exports.putNoClobber(self.gpa, symbol_name, new_export);
- self.bin_file.updateDeclExports(self, exported_decl, de_gop.entry.value) catch |err| switch (err) {
- error.OutOfMemory => return error.OutOfMemory,
- else => {
- try self.failed_exports.ensureCapacity(self.gpa, self.failed_exports.items().len + 1);
- self.failed_exports.putAssumeCapacityNoClobber(new_export, try ErrorMsg.create(
- self.gpa,
- src,
- "unable to export: {}",
- .{@errorName(err)},
- ));
- new_export.status = .failed_retryable;
- },
- };
-}
-
-pub fn addNoOp(
- self: *Module,
- block: *Scope.Block,
- src: usize,
- ty: Type,
- comptime tag: Inst.Tag,
-) !*Inst {
- const inst = try block.arena.create(tag.Type());
- inst.* = .{
- .base = .{
- .tag = tag,
- .ty = ty,
- .src = src,
- },
- };
- try block.instructions.append(self.gpa, &inst.base);
- return &inst.base;
-}
-
-pub fn addUnOp(
- self: *Module,
- block: *Scope.Block,
- src: usize,
- ty: Type,
- tag: Inst.Tag,
- operand: *Inst,
-) !*Inst {
- const inst = try block.arena.create(Inst.UnOp);
- inst.* = .{
- .base = .{
- .tag = tag,
- .ty = ty,
- .src = src,
- },
- .operand = operand,
- };
- try block.instructions.append(self.gpa, &inst.base);
- return &inst.base;
-}
-
-pub fn addBinOp(
- self: *Module,
- block: *Scope.Block,
- src: usize,
- ty: Type,
- tag: Inst.Tag,
- lhs: *Inst,
- rhs: *Inst,
-) !*Inst {
- const inst = try block.arena.create(Inst.BinOp);
- inst.* = .{
- .base = .{
- .tag = tag,
- .ty = ty,
- .src = src,
- },
- .lhs = lhs,
- .rhs = rhs,
- };
- try block.instructions.append(self.gpa, &inst.base);
- return &inst.base;
-}
-
-pub fn addArg(self: *Module, block: *Scope.Block, src: usize, ty: Type, name: [*:0]const u8) !*Inst {
- const inst = try block.arena.create(Inst.Arg);
- inst.* = .{
- .base = .{
- .tag = .arg,
- .ty = ty,
- .src = src,
- },
- .name = name,
- };
- try block.instructions.append(self.gpa, &inst.base);
- return &inst.base;
-}
-
-pub fn addBr(
- self: *Module,
- scope_block: *Scope.Block,
- src: usize,
- target_block: *Inst.Block,
- operand: *Inst,
-) !*Inst {
- const inst = try scope_block.arena.create(Inst.Br);
- inst.* = .{
- .base = .{
- .tag = .br,
- .ty = Type.initTag(.noreturn),
- .src = src,
- },
- .operand = operand,
- .block = target_block,
- };
- try scope_block.instructions.append(self.gpa, &inst.base);
- return &inst.base;
-}
-
-pub fn addCondBr(
- self: *Module,
- block: *Scope.Block,
- src: usize,
- condition: *Inst,
- then_body: ir.Body,
- else_body: ir.Body,
-) !*Inst {
- const inst = try block.arena.create(Inst.CondBr);
- inst.* = .{
- .base = .{
- .tag = .condbr,
- .ty = Type.initTag(.noreturn),
- .src = src,
- },
- .condition = condition,
- .then_body = then_body,
- .else_body = else_body,
- };
- try block.instructions.append(self.gpa, &inst.base);
- return &inst.base;
-}
-
-pub fn addCall(
- self: *Module,
- block: *Scope.Block,
- src: usize,
- ty: Type,
- func: *Inst,
- args: []const *Inst,
-) !*Inst {
- const inst = try block.arena.create(Inst.Call);
- inst.* = .{
- .base = .{
- .tag = .call,
- .ty = ty,
- .src = src,
- },
- .func = func,
- .args = args,
- };
- try block.instructions.append(self.gpa, &inst.base);
- return &inst.base;
-}
-
-pub fn constInst(self: *Module, scope: *Scope, src: usize, typed_value: TypedValue) !*Inst {
- const const_inst = try scope.arena().create(Inst.Constant);
- const_inst.* = .{
- .base = .{
- .tag = Inst.Constant.base_tag,
- .ty = typed_value.ty,
- .src = src,
- },
- .val = typed_value.val,
- };
- return &const_inst.base;
-}
-
-pub fn constType(self: *Module, scope: *Scope, src: usize, ty: Type) !*Inst {
- return self.constInst(scope, src, .{
- .ty = Type.initTag(.type),
- .val = try ty.toValue(scope.arena()),
- });
-}
-
-pub fn constVoid(self: *Module, scope: *Scope, src: usize) !*Inst {
- return self.constInst(scope, src, .{
- .ty = Type.initTag(.void),
- .val = Value.initTag(.void_value),
- });
-}
-
-pub fn constNoReturn(self: *Module, scope: *Scope, src: usize) !*Inst {
- return self.constInst(scope, src, .{
- .ty = Type.initTag(.noreturn),
- .val = Value.initTag(.unreachable_value),
- });
-}
-
-pub fn constUndef(self: *Module, scope: *Scope, src: usize, ty: Type) !*Inst {
- return self.constInst(scope, src, .{
- .ty = ty,
- .val = Value.initTag(.undef),
- });
-}
-
-pub fn constBool(self: *Module, scope: *Scope, src: usize, v: bool) !*Inst {
- return self.constInst(scope, src, .{
- .ty = Type.initTag(.bool),
- .val = ([2]Value{ Value.initTag(.bool_false), Value.initTag(.bool_true) })[@boolToInt(v)],
- });
-}
-
-pub fn constIntUnsigned(self: *Module, scope: *Scope, src: usize, ty: Type, int: u64) !*Inst {
- const int_payload = try scope.arena().create(Value.Payload.Int_u64);
- int_payload.* = .{ .int = int };
-
- return self.constInst(scope, src, .{
- .ty = ty,
- .val = Value.initPayload(&int_payload.base),
- });
-}
-
-pub fn constIntSigned(self: *Module, scope: *Scope, src: usize, ty: Type, int: i64) !*Inst {
- const int_payload = try scope.arena().create(Value.Payload.Int_i64);
- int_payload.* = .{ .int = int };
-
- return self.constInst(scope, src, .{
- .ty = ty,
- .val = Value.initPayload(&int_payload.base),
- });
-}
-
-pub fn constIntBig(self: *Module, scope: *Scope, src: usize, ty: Type, big_int: BigIntConst) !*Inst {
- const val_payload = if (big_int.positive) blk: {
- if (big_int.to(u64)) |x| {
- return self.constIntUnsigned(scope, src, ty, x);
- } else |err| switch (err) {
- error.NegativeIntoUnsigned => unreachable,
- error.TargetTooSmall => {}, // handled below
- }
- const big_int_payload = try scope.arena().create(Value.Payload.IntBigPositive);
- big_int_payload.* = .{ .limbs = big_int.limbs };
- break :blk &big_int_payload.base;
- } else blk: {
- if (big_int.to(i64)) |x| {
- return self.constIntSigned(scope, src, ty, x);
- } else |err| switch (err) {
- error.NegativeIntoUnsigned => unreachable,
- error.TargetTooSmall => {}, // handled below
- }
- const big_int_payload = try scope.arena().create(Value.Payload.IntBigNegative);
- big_int_payload.* = .{ .limbs = big_int.limbs };
- break :blk &big_int_payload.base;
- };
-
- return self.constInst(scope, src, .{
- .ty = ty,
- .val = Value.initPayload(val_payload),
- });
-}
-
-pub fn createAnonymousDecl(
- self: *Module,
- scope: *Scope,
- decl_arena: *std.heap.ArenaAllocator,
- typed_value: TypedValue,
-) !*Decl {
- const name_index = self.getNextAnonNameIndex();
- const scope_decl = scope.decl().?;
- const name = try std.fmt.allocPrint(self.gpa, "{}__anon_{}", .{ scope_decl.name, name_index });
- defer self.gpa.free(name);
- const name_hash = scope.namespace().fullyQualifiedNameHash(name);
- const src_hash: std.zig.SrcHash = undefined;
- const new_decl = try self.createNewDecl(scope, name, scope_decl.src_index, name_hash, src_hash);
- const decl_arena_state = try decl_arena.allocator.create(std.heap.ArenaAllocator.State);
-
- decl_arena_state.* = decl_arena.state;
- new_decl.typed_value = .{
- .most_recent = .{
- .typed_value = typed_value,
- .arena = decl_arena_state,
- },
- };
- new_decl.analysis = .complete;
- new_decl.generation = self.generation;
-
- // TODO: This generates the Decl into the machine code file if it is of a type that is non-zero size.
- // We should be able to further improve the compiler to not omit Decls which are only referenced at
- // compile-time and not runtime.
- if (typed_value.ty.hasCodeGenBits()) {
- try self.bin_file.allocateDeclIndexes(new_decl);
- try self.work_queue.writeItem(.{ .codegen_decl = new_decl });
- }
-
- return new_decl;
-}
-
-fn getNextAnonNameIndex(self: *Module) usize {
- return @atomicRmw(usize, &self.next_anon_name_index, .Add, 1, .Monotonic);
-}
-
-pub fn lookupDeclName(self: *Module, scope: *Scope, ident_name: []const u8) ?*Decl {
- const namespace = scope.namespace();
- const name_hash = namespace.fullyQualifiedNameHash(ident_name);
- return self.decl_table.get(name_hash);
-}
-
-pub fn analyzeDeclRef(self: *Module, scope: *Scope, src: usize, decl: *Decl) InnerError!*Inst {
- const scope_decl = scope.decl().?;
- try self.declareDeclDependency(scope_decl, decl);
- self.ensureDeclAnalyzed(decl) catch |err| {
- if (scope.cast(Scope.Block)) |block| {
- if (block.func) |func| {
- func.analysis = .dependency_failure;
- } else {
- block.decl.analysis = .dependency_failure;
- }
- } else {
- scope_decl.analysis = .dependency_failure;
- }
- return err;
- };
-
- const decl_tv = try decl.typedValue();
- if (decl_tv.val.tag() == .variable) {
- return self.analyzeVarRef(scope, src, decl_tv);
- }
- const ty = try self.simplePtrType(scope, src, decl_tv.ty, false, .One);
- const val_payload = try scope.arena().create(Value.Payload.DeclRef);
- val_payload.* = .{ .decl = decl };
-
- return self.constInst(scope, src, .{
- .ty = ty,
- .val = Value.initPayload(&val_payload.base),
- });
-}
-
-fn analyzeVarRef(self: *Module, scope: *Scope, src: usize, tv: TypedValue) InnerError!*Inst {
- const variable = tv.val.cast(Value.Payload.Variable).?.variable;
-
- const ty = try self.simplePtrType(scope, src, tv.ty, variable.is_mutable, .One);
- if (!variable.is_mutable and !variable.is_extern) {
- const val_payload = try scope.arena().create(Value.Payload.RefVal);
- val_payload.* = .{ .val = variable.init };
- return self.constInst(scope, src, .{
- .ty = ty,
- .val = Value.initPayload(&val_payload.base),
- });
- }
-
- const b = try self.requireRuntimeBlock(scope, src);
- const inst = try b.arena.create(Inst.VarPtr);
- inst.* = .{
- .base = .{
- .tag = .varptr,
- .ty = ty,
- .src = src,
- },
- .variable = variable,
- };
- try b.instructions.append(self.gpa, &inst.base);
- return &inst.base;
-}
-
-pub fn analyzeDeref(self: *Module, scope: *Scope, src: usize, ptr: *Inst, ptr_src: usize) InnerError!*Inst {
- const elem_ty = switch (ptr.ty.zigTypeTag()) {
- .Pointer => ptr.ty.elemType(),
- else => return self.fail(scope, ptr_src, "expected pointer, found '{}'", .{ptr.ty}),
- };
- if (ptr.value()) |val| {
- return self.constInst(scope, src, .{
- .ty = elem_ty,
- .val = try val.pointerDeref(scope.arena()),
- });
- }
-
- const b = try self.requireRuntimeBlock(scope, src);
- return self.addUnOp(b, src, elem_ty, .load, ptr);
-}
-
-pub fn analyzeDeclRefByName(self: *Module, scope: *Scope, src: usize, decl_name: []const u8) InnerError!*Inst {
- const decl = self.lookupDeclName(scope, decl_name) orelse
- return self.fail(scope, src, "decl '{}' not found", .{decl_name});
- return self.analyzeDeclRef(scope, src, decl);
-}
-
-pub fn wantSafety(self: *Module, scope: *Scope) bool {
- // TODO take into account scope's safety overrides
- return switch (self.optimizeMode()) {
- .Debug => true,
- .ReleaseSafe => true,
- .ReleaseFast => false,
- .ReleaseSmall => false,
- };
-}
-
-pub fn analyzeIsNull(
- self: *Module,
- scope: *Scope,
- src: usize,
- operand: *Inst,
- invert_logic: bool,
-) InnerError!*Inst {
- if (operand.value()) |opt_val| {
- const is_null = opt_val.isNull();
- const bool_value = if (invert_logic) !is_null else is_null;
- return self.constBool(scope, src, bool_value);
- }
- const b = try self.requireRuntimeBlock(scope, src);
- const inst_tag: Inst.Tag = if (invert_logic) .isnonnull else .isnull;
- return self.addUnOp(b, src, Type.initTag(.bool), inst_tag, operand);
-}
-
-pub fn analyzeIsErr(self: *Module, scope: *Scope, src: usize, operand: *Inst) InnerError!*Inst {
- return self.fail(scope, src, "TODO implement analysis of iserr", .{});
-}
-
-pub fn analyzeSlice(self: *Module, scope: *Scope, src: usize, array_ptr: *Inst, start: *Inst, end_opt: ?*Inst, sentinel_opt: ?*Inst) InnerError!*Inst {
- const ptr_child = switch (array_ptr.ty.zigTypeTag()) {
- .Pointer => array_ptr.ty.elemType(),
- else => return self.fail(scope, src, "expected pointer, found '{}'", .{array_ptr.ty}),
- };
-
- var array_type = ptr_child;
- const elem_type = switch (ptr_child.zigTypeTag()) {
- .Array => ptr_child.elemType(),
- .Pointer => blk: {
- if (ptr_child.isSinglePointer()) {
- if (ptr_child.elemType().zigTypeTag() == .Array) {
- array_type = ptr_child.elemType();
- break :blk ptr_child.elemType().elemType();
- }
-
- return self.fail(scope, src, "slice of single-item pointer", .{});
- }
- break :blk ptr_child.elemType();
- },
- else => return self.fail(scope, src, "slice of non-array type '{}'", .{ptr_child}),
- };
-
- const slice_sentinel = if (sentinel_opt) |sentinel| blk: {
- const casted = try self.coerce(scope, elem_type, sentinel);
- break :blk try self.resolveConstValue(scope, casted);
- } else null;
-
- var return_ptr_size: std.builtin.TypeInfo.Pointer.Size = .Slice;
- var return_elem_type = elem_type;
- if (end_opt) |end| {
- if (end.value()) |end_val| {
- if (start.value()) |start_val| {
- const start_u64 = start_val.toUnsignedInt();
- const end_u64 = end_val.toUnsignedInt();
- if (start_u64 > end_u64) {
- return self.fail(scope, src, "out of bounds slice", .{});
- }
-
- const len = end_u64 - start_u64;
- const array_sentinel = if (array_type.zigTypeTag() == .Array and end_u64 == array_type.arrayLen())
- array_type.sentinel()
- else
- slice_sentinel;
- return_elem_type = try self.arrayType(scope, len, array_sentinel, elem_type);
- return_ptr_size = .One;
- }
- }
- }
- const return_type = try self.ptrType(
- scope,
- src,
- return_elem_type,
- if (end_opt == null) slice_sentinel else null,
- 0, // TODO alignment
- 0,
- 0,
- !ptr_child.isConstPtr(),
- ptr_child.isAllowzeroPtr(),
- ptr_child.isVolatilePtr(),
- return_ptr_size,
- );
-
- return self.fail(scope, src, "TODO implement analysis of slice", .{});
-}
-
-/// Asserts that lhs and rhs types are both numeric.
-pub fn cmpNumeric(
- self: *Module,
- scope: *Scope,
- src: usize,
- lhs: *Inst,
- rhs: *Inst,
- op: std.math.CompareOperator,
-) !*Inst {
- assert(lhs.ty.isNumeric());
- assert(rhs.ty.isNumeric());
-
- const lhs_ty_tag = lhs.ty.zigTypeTag();
- const rhs_ty_tag = rhs.ty.zigTypeTag();
-
- if (lhs_ty_tag == .Vector and rhs_ty_tag == .Vector) {
- if (lhs.ty.arrayLen() != rhs.ty.arrayLen()) {
- return self.fail(scope, src, "vector length mismatch: {} and {}", .{
- lhs.ty.arrayLen(),
- rhs.ty.arrayLen(),
- });
- }
- return self.fail(scope, src, "TODO implement support for vectors in cmpNumeric", .{});
- } else if (lhs_ty_tag == .Vector or rhs_ty_tag == .Vector) {
- return self.fail(scope, src, "mixed scalar and vector operands to comparison operator: '{}' and '{}'", .{
- lhs.ty,
- rhs.ty,
- });
- }
-
- if (lhs.value()) |lhs_val| {
- if (rhs.value()) |rhs_val| {
- return self.constBool(scope, src, Value.compare(lhs_val, op, rhs_val));
- }
- }
-
- // TODO handle comparisons against lazy zero values
- // Some values can be compared against zero without being runtime known or without forcing
- // a full resolution of their value, for example `@sizeOf(@Frame(function))` is known to
- // always be nonzero, and we benefit from not forcing the full evaluation and stack frame layout
- // of this function if we don't need to.
-
- // It must be a runtime comparison.
- const b = try self.requireRuntimeBlock(scope, src);
- // For floats, emit a float comparison instruction.
- const lhs_is_float = switch (lhs_ty_tag) {
- .Float, .ComptimeFloat => true,
- else => false,
- };
- const rhs_is_float = switch (rhs_ty_tag) {
- .Float, .ComptimeFloat => true,
- else => false,
- };
- if (lhs_is_float and rhs_is_float) {
- // Implicit cast the smaller one to the larger one.
- const dest_type = x: {
- if (lhs_ty_tag == .ComptimeFloat) {
- break :x rhs.ty;
- } else if (rhs_ty_tag == .ComptimeFloat) {
- break :x lhs.ty;
- }
- if (lhs.ty.floatBits(self.getTarget()) >= rhs.ty.floatBits(self.getTarget())) {
- break :x lhs.ty;
- } else {
- break :x rhs.ty;
- }
- };
- const casted_lhs = try self.coerce(scope, dest_type, lhs);
- const casted_rhs = try self.coerce(scope, dest_type, rhs);
- return self.addBinOp(b, src, dest_type, Inst.Tag.fromCmpOp(op), casted_lhs, casted_rhs);
- }
- // For mixed unsigned integer sizes, implicit cast both operands to the larger integer.
- // For mixed signed and unsigned integers, implicit cast both operands to a signed
- // integer with + 1 bit.
- // For mixed floats and integers, extract the integer part from the float, cast that to
- // a signed integer with mantissa bits + 1, and if there was any non-integral part of the float,
- // add/subtract 1.
- const lhs_is_signed = if (lhs.value()) |lhs_val|
- lhs_val.compareWithZero(.lt)
- else
- (lhs.ty.isFloat() or lhs.ty.isSignedInt());
- const rhs_is_signed = if (rhs.value()) |rhs_val|
- rhs_val.compareWithZero(.lt)
- else
- (rhs.ty.isFloat() or rhs.ty.isSignedInt());
- const dest_int_is_signed = lhs_is_signed or rhs_is_signed;
-
- var dest_float_type: ?Type = null;
-
- var lhs_bits: usize = undefined;
- if (lhs.value()) |lhs_val| {
- if (lhs_val.isUndef())
- return self.constUndef(scope, src, Type.initTag(.bool));
- const is_unsigned = if (lhs_is_float) x: {
- var bigint_space: Value.BigIntSpace = undefined;
- var bigint = try lhs_val.toBigInt(&bigint_space).toManaged(self.gpa);
- defer bigint.deinit();
- const zcmp = lhs_val.orderAgainstZero();
- if (lhs_val.floatHasFraction()) {
- switch (op) {
- .eq => return self.constBool(scope, src, false),
- .neq => return self.constBool(scope, src, true),
- else => {},
- }
- if (zcmp == .lt) {
- try bigint.addScalar(bigint.toConst(), -1);
- } else {
- try bigint.addScalar(bigint.toConst(), 1);
- }
- }
- lhs_bits = bigint.toConst().bitCountTwosComp();
- break :x (zcmp != .lt);
- } else x: {
- lhs_bits = lhs_val.intBitCountTwosComp();
- break :x (lhs_val.orderAgainstZero() != .lt);
- };
- lhs_bits += @boolToInt(is_unsigned and dest_int_is_signed);
- } else if (lhs_is_float) {
- dest_float_type = lhs.ty;
- } else {
- const int_info = lhs.ty.intInfo(self.getTarget());
- lhs_bits = int_info.bits + @boolToInt(!int_info.signed and dest_int_is_signed);
- }
-
- var rhs_bits: usize = undefined;
- if (rhs.value()) |rhs_val| {
- if (rhs_val.isUndef())
- return self.constUndef(scope, src, Type.initTag(.bool));
- const is_unsigned = if (rhs_is_float) x: {
- var bigint_space: Value.BigIntSpace = undefined;
- var bigint = try rhs_val.toBigInt(&bigint_space).toManaged(self.gpa);
- defer bigint.deinit();
- const zcmp = rhs_val.orderAgainstZero();
- if (rhs_val.floatHasFraction()) {
- switch (op) {
- .eq => return self.constBool(scope, src, false),
- .neq => return self.constBool(scope, src, true),
- else => {},
- }
- if (zcmp == .lt) {
- try bigint.addScalar(bigint.toConst(), -1);
- } else {
- try bigint.addScalar(bigint.toConst(), 1);
- }
- }
- rhs_bits = bigint.toConst().bitCountTwosComp();
- break :x (zcmp != .lt);
- } else x: {
- rhs_bits = rhs_val.intBitCountTwosComp();
- break :x (rhs_val.orderAgainstZero() != .lt);
- };
- rhs_bits += @boolToInt(is_unsigned and dest_int_is_signed);
- } else if (rhs_is_float) {
- dest_float_type = rhs.ty;
- } else {
- const int_info = rhs.ty.intInfo(self.getTarget());
- rhs_bits = int_info.bits + @boolToInt(!int_info.signed and dest_int_is_signed);
- }
-
- const dest_type = if (dest_float_type) |ft| ft else blk: {
- const max_bits = std.math.max(lhs_bits, rhs_bits);
- const casted_bits = std.math.cast(u16, max_bits) catch |err| switch (err) {
- error.Overflow => return self.fail(scope, src, "{} exceeds maximum integer bit count", .{max_bits}),
- };
- break :blk try self.makeIntType(scope, dest_int_is_signed, casted_bits);
- };
- const casted_lhs = try self.coerce(scope, dest_type, lhs);
- const casted_rhs = try self.coerce(scope, dest_type, rhs);
-
- return self.addBinOp(b, src, Type.initTag(.bool), Inst.Tag.fromCmpOp(op), casted_lhs, casted_rhs);
-}
-
-fn wrapOptional(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !*Inst {
- if (inst.value()) |val| {
- return self.constInst(scope, inst.src, .{ .ty = dest_type, .val = val });
- }
-
- const b = try self.requireRuntimeBlock(scope, inst.src);
- return self.addUnOp(b, inst.src, dest_type, .wrap_optional, inst);
-}
-
-fn makeIntType(self: *Module, scope: *Scope, signed: bool, bits: u16) !Type {
- if (signed) {
- const int_payload = try scope.arena().create(Type.Payload.IntSigned);
- int_payload.* = .{ .bits = bits };
- return Type.initPayload(&int_payload.base);
- } else {
- const int_payload = try scope.arena().create(Type.Payload.IntUnsigned);
- int_payload.* = .{ .bits = bits };
- return Type.initPayload(&int_payload.base);
- }
-}
-
-pub fn resolvePeerTypes(self: *Module, scope: *Scope, instructions: []*Inst) !Type {
- if (instructions.len == 0)
- return Type.initTag(.noreturn);
-
- if (instructions.len == 1)
- return instructions[0].ty;
-
- var prev_inst = instructions[0];
- for (instructions[1..]) |next_inst| {
- if (next_inst.ty.eql(prev_inst.ty))
- continue;
- if (next_inst.ty.zigTypeTag() == .NoReturn)
- continue;
- if (prev_inst.ty.zigTypeTag() == .NoReturn) {
- prev_inst = next_inst;
- continue;
- }
- if (next_inst.ty.zigTypeTag() == .Undefined)
- continue;
- if (prev_inst.ty.zigTypeTag() == .Undefined) {
- prev_inst = next_inst;
- continue;
- }
- if (prev_inst.ty.isInt() and
- next_inst.ty.isInt() and
- prev_inst.ty.isSignedInt() == next_inst.ty.isSignedInt())
- {
- if (prev_inst.ty.intInfo(self.getTarget()).bits < next_inst.ty.intInfo(self.getTarget()).bits) {
- prev_inst = next_inst;
- }
- continue;
- }
- if (prev_inst.ty.isFloat() and next_inst.ty.isFloat()) {
- if (prev_inst.ty.floatBits(self.getTarget()) < next_inst.ty.floatBits(self.getTarget())) {
- prev_inst = next_inst;
- }
- continue;
- }
-
- // TODO error notes pointing out each type
- return self.fail(scope, next_inst.src, "incompatible types: '{}' and '{}'", .{ prev_inst.ty, next_inst.ty });
- }
-
- return prev_inst.ty;
-}
-
-pub fn coerce(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !*Inst {
- // If the types are the same, we can return the operand.
- if (dest_type.eql(inst.ty))
- return inst;
-
- const in_memory_result = coerceInMemoryAllowed(dest_type, inst.ty);
- if (in_memory_result == .ok) {
- return self.bitcast(scope, dest_type, inst);
- }
-
- // undefined to anything
- if (inst.value()) |val| {
- if (val.isUndef() or inst.ty.zigTypeTag() == .Undefined) {
- return self.constInst(scope, inst.src, .{ .ty = dest_type, .val = val });
- }
- }
- assert(inst.ty.zigTypeTag() != .Undefined);
-
- // null to ?T
- if (dest_type.zigTypeTag() == .Optional and inst.ty.zigTypeTag() == .Null) {
- return self.constInst(scope, inst.src, .{ .ty = dest_type, .val = Value.initTag(.null_value) });
- }
-
- // T to ?T
- if (dest_type.zigTypeTag() == .Optional) {
- var buf: Type.Payload.PointerSimple = undefined;
- const child_type = dest_type.optionalChild(&buf);
- if (child_type.eql(inst.ty)) {
- return self.wrapOptional(scope, dest_type, inst);
- } else if (try self.coerceNum(scope, child_type, inst)) |some| {
- return self.wrapOptional(scope, dest_type, some);
- }
- }
-
- // *[N]T to []T
- if (inst.ty.isSinglePointer() and dest_type.isSlice() and
- (!inst.ty.isConstPtr() or dest_type.isConstPtr()))
- {
- const array_type = inst.ty.elemType();
- const dst_elem_type = dest_type.elemType();
- if (array_type.zigTypeTag() == .Array and
- coerceInMemoryAllowed(dst_elem_type, array_type.elemType()) == .ok)
- {
- return self.coerceArrayPtrToSlice(scope, dest_type, inst);
- }
- }
-
- // comptime known number to other number
- if (try self.coerceNum(scope, dest_type, inst)) |some|
- return some;
-
- // integer widening
- if (inst.ty.zigTypeTag() == .Int and dest_type.zigTypeTag() == .Int) {
- assert(inst.value() == null); // handled above
-
- const src_info = inst.ty.intInfo(self.getTarget());
- const dst_info = dest_type.intInfo(self.getTarget());
- if ((src_info.signed == dst_info.signed and dst_info.bits >= src_info.bits) or
- // small enough unsigned ints can get casted to large enough signed ints
- (src_info.signed and !dst_info.signed and dst_info.bits > src_info.bits))
- {
- const b = try self.requireRuntimeBlock(scope, inst.src);
- return self.addUnOp(b, inst.src, dest_type, .intcast, inst);
- }
- }
-
- // float widening
- if (inst.ty.zigTypeTag() == .Float and dest_type.zigTypeTag() == .Float) {
- assert(inst.value() == null); // handled above
-
- const src_bits = inst.ty.floatBits(self.getTarget());
- const dst_bits = dest_type.floatBits(self.getTarget());
- if (dst_bits >= src_bits) {
- const b = try self.requireRuntimeBlock(scope, inst.src);
- return self.addUnOp(b, inst.src, dest_type, .floatcast, inst);
- }
- }
-
- return self.fail(scope, inst.src, "expected {}, found {}", .{ dest_type, inst.ty });
-}
-
-pub fn coerceNum(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !?*Inst {
- const val = inst.value() orelse return null;
- const src_zig_tag = inst.ty.zigTypeTag();
- const dst_zig_tag = dest_type.zigTypeTag();
-
- if (dst_zig_tag == .ComptimeInt or dst_zig_tag == .Int) {
- if (src_zig_tag == .Float or src_zig_tag == .ComptimeFloat) {
- if (val.floatHasFraction()) {
- return self.fail(scope, inst.src, "fractional component prevents float value {} from being casted to type '{}'", .{ val, inst.ty });
- }
- return self.fail(scope, inst.src, "TODO float to int", .{});
- } else if (src_zig_tag == .Int or src_zig_tag == .ComptimeInt) {
- if (!val.intFitsInType(dest_type, self.getTarget())) {
- return self.fail(scope, inst.src, "type {} cannot represent integer value {}", .{ inst.ty, val });
- }
- return self.constInst(scope, inst.src, .{ .ty = dest_type, .val = val });
- }
- } else if (dst_zig_tag == .ComptimeFloat or dst_zig_tag == .Float) {
- if (src_zig_tag == .Float or src_zig_tag == .ComptimeFloat) {
- const res = val.floatCast(scope.arena(), dest_type, self.getTarget()) catch |err| switch (err) {
- error.Overflow => return self.fail(
- scope,
- inst.src,
- "cast of value {} to type '{}' loses information",
- .{ val, dest_type },
- ),
- error.OutOfMemory => return error.OutOfMemory,
- };
- return self.constInst(scope, inst.src, .{ .ty = dest_type, .val = res });
- } else if (src_zig_tag == .Int or src_zig_tag == .ComptimeInt) {
- return self.fail(scope, inst.src, "TODO int to float", .{});
- }
- }
- return null;
-}
-
-pub fn storePtr(self: *Module, scope: *Scope, src: usize, ptr: *Inst, uncasted_value: *Inst) !*Inst {
- if (ptr.ty.isConstPtr())
- return self.fail(scope, src, "cannot assign to constant", .{});
-
- const elem_ty = ptr.ty.elemType();
- const value = try self.coerce(scope, elem_ty, uncasted_value);
- if (elem_ty.onePossibleValue() != null)
- return self.constVoid(scope, src);
-
- // TODO handle comptime pointer writes
- // TODO handle if the element type requires comptime
-
- const b = try self.requireRuntimeBlock(scope, src);
- return self.addBinOp(b, src, Type.initTag(.void), .store, ptr, value);
-}
-
-pub fn bitcast(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !*Inst {
- if (inst.value()) |val| {
- // Keep the comptime Value representation; take the new type.
- return self.constInst(scope, inst.src, .{ .ty = dest_type, .val = val });
- }
- // TODO validate the type size and other compile errors
- const b = try self.requireRuntimeBlock(scope, inst.src);
- return self.addUnOp(b, inst.src, dest_type, .bitcast, inst);
-}
-
-fn coerceArrayPtrToSlice(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !*Inst {
- if (inst.value()) |val| {
- // The comptime Value representation is compatible with both types.
- return self.constInst(scope, inst.src, .{ .ty = dest_type, .val = val });
- }
- return self.fail(scope, inst.src, "TODO implement coerceArrayPtrToSlice runtime instruction", .{});
-}
-
-fn failCObj(mod: *Module, c_object: *CObject, comptime format: []const u8, args: anytype) InnerError {
- @setCold(true);
- const err_msg = try ErrorMsg.create(mod.gpa, 0, "unable to build C object: " ++ format, args);
- return mod.failCObjWithOwnedErrorMsg(c_object, err_msg);
-}
-
-fn failCObjWithOwnedErrorMsg(mod: *Module, c_object: *CObject, err_msg: *ErrorMsg) InnerError {
- {
- errdefer err_msg.destroy(mod.gpa);
- try mod.failed_c_objects.ensureCapacity(mod.gpa, mod.failed_c_objects.items().len + 1);
- }
- mod.failed_c_objects.putAssumeCapacityNoClobber(c_object, err_msg);
- c_object.status = .{ .failure = "" };
- return error.AnalysisFail;
-}
-
-pub fn fail(self: *Module, scope: *Scope, src: usize, comptime format: []const u8, args: anytype) InnerError {
- @setCold(true);
- const err_msg = try ErrorMsg.create(self.gpa, src, format, args);
- return self.failWithOwnedErrorMsg(scope, src, err_msg);
-}
-
-pub fn failTok(
- self: *Module,
- scope: *Scope,
- token_index: ast.TokenIndex,
- comptime format: []const u8,
- args: anytype,
-) InnerError {
- @setCold(true);
- const src = scope.tree().token_locs[token_index].start;
- return self.fail(scope, src, format, args);
-}
-
-pub fn failNode(
- self: *Module,
- scope: *Scope,
- ast_node: *ast.Node,
- comptime format: []const u8,
- args: anytype,
-) InnerError {
- @setCold(true);
- const src = scope.tree().token_locs[ast_node.firstToken()].start;
- return self.fail(scope, src, format, args);
-}
-
-fn failWithOwnedErrorMsg(self: *Module, scope: *Scope, src: usize, err_msg: *ErrorMsg) InnerError {
- {
- errdefer err_msg.destroy(self.gpa);
- try self.failed_decls.ensureCapacity(self.gpa, self.failed_decls.items().len + 1);
- try self.failed_files.ensureCapacity(self.gpa, self.failed_files.items().len + 1);
- }
- switch (scope.tag) {
- .decl => {
- const decl = scope.cast(Scope.DeclAnalysis).?.decl;
- decl.analysis = .sema_failure;
- decl.generation = self.generation;
- self.failed_decls.putAssumeCapacityNoClobber(decl, err_msg);
- },
- .block => {
- const block = scope.cast(Scope.Block).?;
- if (block.func) |func| {
- func.analysis = .sema_failure;
- } else {
- block.decl.analysis = .sema_failure;
- block.decl.generation = self.generation;
- }
- self.failed_decls.putAssumeCapacityNoClobber(block.decl, err_msg);
- },
- .gen_zir => {
- const gen_zir = scope.cast(Scope.GenZIR).?;
- gen_zir.decl.analysis = .sema_failure;
- gen_zir.decl.generation = self.generation;
- self.failed_decls.putAssumeCapacityNoClobber(gen_zir.decl, err_msg);
- },
- .local_val => {
- const gen_zir = scope.cast(Scope.LocalVal).?.gen_zir;
- gen_zir.decl.analysis = .sema_failure;
- gen_zir.decl.generation = self.generation;
- self.failed_decls.putAssumeCapacityNoClobber(gen_zir.decl, err_msg);
- },
- .local_ptr => {
- const gen_zir = scope.cast(Scope.LocalPtr).?.gen_zir;
- gen_zir.decl.analysis = .sema_failure;
- gen_zir.decl.generation = self.generation;
- self.failed_decls.putAssumeCapacityNoClobber(gen_zir.decl, err_msg);
- },
- .zir_module => {
- const zir_module = scope.cast(Scope.ZIRModule).?;
- zir_module.status = .loaded_sema_failure;
- self.failed_files.putAssumeCapacityNoClobber(scope, err_msg);
- },
- .none => unreachable,
- .file => unreachable,
- .container => unreachable,
- }
- return error.AnalysisFail;
-}
-
-const InMemoryCoercionResult = enum {
- ok,
- no_match,
-};
-
-fn coerceInMemoryAllowed(dest_type: Type, src_type: Type) InMemoryCoercionResult {
- if (dest_type.eql(src_type))
- return .ok;
-
- // TODO: implement more of this function
-
- return .no_match;
-}
-
-pub const ErrorMsg = struct {
- byte_offset: usize,
- msg: []const u8,
-
- pub fn create(gpa: *Allocator, byte_offset: usize, comptime format: []const u8, args: anytype) !*ErrorMsg {
- const self = try gpa.create(ErrorMsg);
- errdefer gpa.destroy(self);
- self.* = try init(gpa, byte_offset, format, args);
- return self;
- }
-
- /// Assumes the ErrorMsg struct and msg were both allocated with allocator.
- pub fn destroy(self: *ErrorMsg, gpa: *Allocator) void {
- self.deinit(gpa);
- gpa.destroy(self);
+ /// Assumes the ErrorMsg struct and msg were both allocated with allocator.
+ pub fn destroy(self: *ErrorMsg, gpa: *Allocator) void {
+ self.deinit(gpa);
+ gpa.destroy(self);
}
pub fn init(gpa: *Allocator, byte_offset: usize, comptime format: []const u8, args: anytype) !ErrorMsg {
@@ -4066,375 +1318,6 @@ pub const ErrorMsg = struct {
}
};
-fn srcHashEql(a: std.zig.SrcHash, b: std.zig.SrcHash) bool {
- return @bitCast(u128, a) == @bitCast(u128, b);
-}
-
-pub fn intAdd(allocator: *Allocator, lhs: Value, rhs: Value) !Value {
- // TODO is this a performance issue? maybe we should try the operation without
- // resorting to BigInt first.
- var lhs_space: Value.BigIntSpace = undefined;
- var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space);
- const rhs_bigint = rhs.toBigInt(&rhs_space);
- const limbs = try allocator.alloc(
- std.math.big.Limb,
- std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
- );
- var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
- result_bigint.add(lhs_bigint, rhs_bigint);
- const result_limbs = result_bigint.limbs[0..result_bigint.len];
-
- const val_payload = if (result_bigint.positive) blk: {
- const val_payload = try allocator.create(Value.Payload.IntBigPositive);
- val_payload.* = .{ .limbs = result_limbs };
- break :blk &val_payload.base;
- } else blk: {
- const val_payload = try allocator.create(Value.Payload.IntBigNegative);
- val_payload.* = .{ .limbs = result_limbs };
- break :blk &val_payload.base;
- };
-
- return Value.initPayload(val_payload);
-}
-
-pub fn intSub(allocator: *Allocator, lhs: Value, rhs: Value) !Value {
- // TODO is this a performance issue? maybe we should try the operation without
- // resorting to BigInt first.
- var lhs_space: Value.BigIntSpace = undefined;
- var rhs_space: Value.BigIntSpace = undefined;
- const lhs_bigint = lhs.toBigInt(&lhs_space);
- const rhs_bigint = rhs.toBigInt(&rhs_space);
- const limbs = try allocator.alloc(
- std.math.big.Limb,
- std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
- );
- var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
- result_bigint.sub(lhs_bigint, rhs_bigint);
- const result_limbs = result_bigint.limbs[0..result_bigint.len];
-
- const val_payload = if (result_bigint.positive) blk: {
- const val_payload = try allocator.create(Value.Payload.IntBigPositive);
- val_payload.* = .{ .limbs = result_limbs };
- break :blk &val_payload.base;
- } else blk: {
- const val_payload = try allocator.create(Value.Payload.IntBigNegative);
- val_payload.* = .{ .limbs = result_limbs };
- break :blk &val_payload.base;
- };
-
- return Value.initPayload(val_payload);
-}
-
-pub fn floatAdd(self: *Module, scope: *Scope, float_type: Type, src: usize, lhs: Value, rhs: Value) !Value {
- var bit_count = switch (float_type.tag()) {
- .comptime_float => 128,
- else => float_type.floatBits(self.getTarget()),
- };
-
- const allocator = scope.arena();
- const val_payload = switch (bit_count) {
- 16 => {
- return self.fail(scope, src, "TODO Implement addition for soft floats", .{});
- },
- 32 => blk: {
- const lhs_val = lhs.toFloat(f32);
- const rhs_val = rhs.toFloat(f32);
- const val_payload = try allocator.create(Value.Payload.Float_32);
- val_payload.* = .{ .val = lhs_val + rhs_val };
- break :blk &val_payload.base;
- },
- 64 => blk: {
- const lhs_val = lhs.toFloat(f64);
- const rhs_val = rhs.toFloat(f64);
- const val_payload = try allocator.create(Value.Payload.Float_64);
- val_payload.* = .{ .val = lhs_val + rhs_val };
- break :blk &val_payload.base;
- },
- 128 => {
- return self.fail(scope, src, "TODO Implement addition for big floats", .{});
- },
- else => unreachable,
- };
-
- return Value.initPayload(val_payload);
-}
-
-pub fn floatSub(self: *Module, scope: *Scope, float_type: Type, src: usize, lhs: Value, rhs: Value) !Value {
- var bit_count = switch (float_type.tag()) {
- .comptime_float => 128,
- else => float_type.floatBits(self.getTarget()),
- };
-
- const allocator = scope.arena();
- const val_payload = switch (bit_count) {
- 16 => {
- return self.fail(scope, src, "TODO Implement substraction for soft floats", .{});
- },
- 32 => blk: {
- const lhs_val = lhs.toFloat(f32);
- const rhs_val = rhs.toFloat(f32);
- const val_payload = try allocator.create(Value.Payload.Float_32);
- val_payload.* = .{ .val = lhs_val - rhs_val };
- break :blk &val_payload.base;
- },
- 64 => blk: {
- const lhs_val = lhs.toFloat(f64);
- const rhs_val = rhs.toFloat(f64);
- const val_payload = try allocator.create(Value.Payload.Float_64);
- val_payload.* = .{ .val = lhs_val - rhs_val };
- break :blk &val_payload.base;
- },
- 128 => {
- return self.fail(scope, src, "TODO Implement substraction for big floats", .{});
- },
- else => unreachable,
- };
-
- return Value.initPayload(val_payload);
-}
-
-pub fn simplePtrType(self: *Module, scope: *Scope, src: usize, elem_ty: Type, mutable: bool, size: std.builtin.TypeInfo.Pointer.Size) Allocator.Error!Type {
- if (!mutable and size == .Slice and elem_ty.eql(Type.initTag(.u8))) {
- return Type.initTag(.const_slice_u8);
- }
- // TODO stage1 type inference bug
- const T = Type.Tag;
-
- const type_payload = try scope.arena().create(Type.Payload.PointerSimple);
- type_payload.* = .{
- .base = .{
- .tag = switch (size) {
- .One => if (mutable) T.single_mut_pointer else T.single_const_pointer,
- .Many => if (mutable) T.many_mut_pointer else T.many_const_pointer,
- .C => if (mutable) T.c_mut_pointer else T.c_const_pointer,
- .Slice => if (mutable) T.mut_slice else T.const_slice,
- },
- },
- .pointee_type = elem_ty,
- };
- return Type.initPayload(&type_payload.base);
-}
-
-pub fn ptrType(
- self: *Module,
- scope: *Scope,
- src: usize,
- elem_ty: Type,
- sentinel: ?Value,
- @"align": u32,
- bit_offset: u16,
- host_size: u16,
- mutable: bool,
- @"allowzero": bool,
- @"volatile": bool,
- size: std.builtin.TypeInfo.Pointer.Size,
-) Allocator.Error!Type {
- assert(host_size == 0 or bit_offset < host_size * 8);
-
- // TODO check if type can be represented by simplePtrType
- const type_payload = try scope.arena().create(Type.Payload.Pointer);
- type_payload.* = .{
- .pointee_type = elem_ty,
- .sentinel = sentinel,
- .@"align" = @"align",
- .bit_offset = bit_offset,
- .host_size = host_size,
- .@"allowzero" = @"allowzero",
- .mutable = mutable,
- .@"volatile" = @"volatile",
- .size = size,
- };
- return Type.initPayload(&type_payload.base);
-}
-
-pub fn optionalType(self: *Module, scope: *Scope, child_type: Type) Allocator.Error!Type {
- return Type.initPayload(switch (child_type.tag()) {
- .single_const_pointer => blk: {
- const payload = try scope.arena().create(Type.Payload.PointerSimple);
- payload.* = .{
- .base = .{ .tag = .optional_single_const_pointer },
- .pointee_type = child_type.elemType(),
- };
- break :blk &payload.base;
- },
- .single_mut_pointer => blk: {
- const payload = try scope.arena().create(Type.Payload.PointerSimple);
- payload.* = .{
- .base = .{ .tag = .optional_single_mut_pointer },
- .pointee_type = child_type.elemType(),
- };
- break :blk &payload.base;
- },
- else => blk: {
- const payload = try scope.arena().create(Type.Payload.Optional);
- payload.* = .{
- .child_type = child_type,
- };
- break :blk &payload.base;
- },
- });
-}
-
-pub fn arrayType(self: *Module, scope: *Scope, len: u64, sentinel: ?Value, elem_type: Type) Allocator.Error!Type {
- if (elem_type.eql(Type.initTag(.u8))) {
- if (sentinel) |some| {
- if (some.eql(Value.initTag(.zero))) {
- const payload = try scope.arena().create(Type.Payload.Array_u8_Sentinel0);
- payload.* = .{
- .len = len,
- };
- return Type.initPayload(&payload.base);
- }
- } else {
- const payload = try scope.arena().create(Type.Payload.Array_u8);
- payload.* = .{
- .len = len,
- };
- return Type.initPayload(&payload.base);
- }
- }
-
- if (sentinel) |some| {
- const payload = try scope.arena().create(Type.Payload.ArraySentinel);
- payload.* = .{
- .len = len,
- .sentinel = some,
- .elem_type = elem_type,
- };
- return Type.initPayload(&payload.base);
- }
-
- const payload = try scope.arena().create(Type.Payload.Array);
- payload.* = .{
- .len = len,
- .elem_type = elem_type,
- };
- return Type.initPayload(&payload.base);
-}
-
-pub fn errorUnionType(self: *Module, scope: *Scope, error_set: Type, payload: Type) Allocator.Error!Type {
- assert(error_set.zigTypeTag() == .ErrorSet);
- if (error_set.eql(Type.initTag(.anyerror)) and payload.eql(Type.initTag(.void))) {
- return Type.initTag(.anyerror_void_error_union);
- }
-
- const result = try scope.arena().create(Type.Payload.ErrorUnion);
- result.* = .{
- .error_set = error_set,
- .payload = payload,
- };
- return Type.initPayload(&result.base);
-}
-
-pub fn anyframeType(self: *Module, scope: *Scope, return_type: Type) Allocator.Error!Type {
- const result = try scope.arena().create(Type.Payload.AnyFrame);
- result.* = .{
- .return_type = return_type,
- };
- return Type.initPayload(&result.base);
-}
-
-pub fn dumpInst(self: *Module, scope: *Scope, inst: *Inst) void {
- const zir_module = scope.namespace();
- const source = zir_module.getSource(self) catch @panic("dumpInst failed to get source");
- const loc = std.zig.findLineColumn(source, inst.src);
- if (inst.tag == .constant) {
- std.debug.print("constant ty={} val={} src={}:{}:{}\n", .{
- inst.ty,
- inst.castTag(.constant).?.val,
- zir_module.subFilePath(),
- loc.line + 1,
- loc.column + 1,
- });
- } else if (inst.deaths == 0) {
- std.debug.print("{} ty={} src={}:{}:{}\n", .{
- @tagName(inst.tag),
- inst.ty,
- zir_module.subFilePath(),
- loc.line + 1,
- loc.column + 1,
- });
- } else {
- std.debug.print("{} ty={} deaths={b} src={}:{}:{}\n", .{
- @tagName(inst.tag),
- inst.ty,
- inst.deaths,
- zir_module.subFilePath(),
- loc.line + 1,
- loc.column + 1,
- });
- }
-}
-
-pub const PanicId = enum {
- unreach,
- unwrap_null,
-};
-
-pub fn addSafetyCheck(mod: *Module, parent_block: *Scope.Block, ok: *Inst, panic_id: PanicId) !void {
- const block_inst = try parent_block.arena.create(Inst.Block);
- block_inst.* = .{
- .base = .{
- .tag = Inst.Block.base_tag,
- .ty = Type.initTag(.void),
- .src = ok.src,
- },
- .body = .{
- .instructions = try parent_block.arena.alloc(*Inst, 1), // Only need space for the condbr.
- },
- };
-
- const ok_body: ir.Body = .{
- .instructions = try parent_block.arena.alloc(*Inst, 1), // Only need space for the brvoid.
- };
- const brvoid = try parent_block.arena.create(Inst.BrVoid);
- brvoid.* = .{
- .base = .{
- .tag = .brvoid,
- .ty = Type.initTag(.noreturn),
- .src = ok.src,
- },
- .block = block_inst,
- };
- ok_body.instructions[0] = &brvoid.base;
-
- var fail_block: Scope.Block = .{
- .parent = parent_block,
- .func = parent_block.func,
- .decl = parent_block.decl,
- .instructions = .{},
- .arena = parent_block.arena,
- .is_comptime = parent_block.is_comptime,
- };
- defer fail_block.instructions.deinit(mod.gpa);
-
- _ = try mod.safetyPanic(&fail_block, ok.src, panic_id);
-
- const fail_body: ir.Body = .{ .instructions = try parent_block.arena.dupe(*Inst, fail_block.instructions.items) };
-
- const condbr = try parent_block.arena.create(Inst.CondBr);
- condbr.* = .{
- .base = .{
- .tag = .condbr,
- .ty = Type.initTag(.noreturn),
- .src = ok.src,
- },
- .condition = ok,
- .then_body = ok_body,
- .else_body = fail_body,
- };
- block_inst.body.instructions[0] = &condbr.base;
-
- try parent_block.instructions.append(mod.gpa, &block_inst.base);
-}
-
-pub fn safetyPanic(mod: *Module, block: *Scope.Block, src: usize, panic_id: PanicId) !*Inst {
- // TODO Once we have a panic function to call, call it here instead of breakpoint.
- _ = try mod.addNoOp(block, src, Type.initTag(.void), .breakpoint);
- return mod.addNoOp(block, src, Type.initTag(.noreturn), .unreach);
-}
-
pub const FileExt = enum {
c,
cpp,
src-self-hosted/test.zig
@@ -549,7 +549,7 @@ pub const TestContext = struct {
update_node.estimated_total_items = 5;
var emit_node = update_node.start("emit", null);
emit_node.activate();
- var new_zir_module = try zir.emit(allocator, module);
+ var new_zir_module = try zir.emit(allocator, module.bin_file.options.zig_module.?);
defer new_zir_module.deinit(allocator);
emit_node.end();
src-self-hosted/type.zig
@@ -3,7 +3,7 @@ const Value = @import("value.zig").Value;
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const Target = std.Target;
-const Module = @import("Module.zig");
+const Module = @import("ZigModule.zig");
/// This is the raw data, with no bookkeeping, no memory awareness, no de-duplication.
/// It's important for this type to be small.
src-self-hosted/value.zig
@@ -6,7 +6,7 @@ const BigIntConst = std.math.big.int.Const;
const BigIntMutable = std.math.big.int.Mutable;
const Target = std.Target;
const Allocator = std.mem.Allocator;
-const Module = @import("Module.zig");
+const Module = @import("ZigModule.zig");
/// This is the raw data, with no bookkeeping, no memory awareness,
/// no de-duplication, and no type system awareness.
src-self-hosted/ZigModule.zig
@@ -0,0 +1,3238 @@
+//! TODO This is going to get renamed from ZigModule to Module (but first we have to rename
+//! Module to Compilation).
+const Module = @This();
+const Compilation = @import("Module.zig");
+
+const std = @import("std");
+const mem = std.mem;
+const Allocator = std.mem.Allocator;
+const ArrayListUnmanaged = std.ArrayListUnmanaged;
+const Value = @import("value.zig").Value;
+const Type = @import("type.zig").Type;
+const TypedValue = @import("TypedValue.zig");
+const assert = std.debug.assert;
+const log = std.log.scoped(.module);
+const BigIntConst = std.math.big.int.Const;
+const BigIntMutable = std.math.big.int.Mutable;
+const Target = std.Target;
+const Package = @import("Package.zig");
+const link = @import("link.zig");
+const ir = @import("ir.zig");
+const zir = @import("zir.zig");
+const Inst = ir.Inst;
+const Body = ir.Body;
+const ast = std.zig.ast;
+const trace = @import("tracy.zig").trace;
+const astgen = @import("astgen.zig");
+const zir_sema = @import("zir_sema.zig");
+
+/// General-purpose allocator. Used for both temporary and long-term storage.
+gpa: *Allocator,
+comp: *Compilation,
+
+/// Where our incremental compilation metadata serialization will go.
+zig_cache_artifact_directory: Compilation.Directory,
+/// Pointer to externally managed resource. `null` if there is no zig file being compiled.
+root_pkg: *Package,
+/// Module owns this resource.
+/// The `Scope` is either a `Scope.ZIRModule` or `Scope.File`.
+root_scope: *Scope,
+/// It's rare for a decl to be exported, so we save memory by having a sparse map of
+/// Decl pointers to details about them being exported.
+/// The Export memory is owned by the `export_owners` table; the slice itself is owned by this table.
+decl_exports: std.AutoArrayHashMapUnmanaged(*Decl, []*Export) = .{},
+/// We track which export is associated with the given symbol name for quick
+/// detection of symbol collisions.
+symbol_exports: std.StringArrayHashMapUnmanaged(*Export) = .{},
+/// This models the Decls that perform exports, so that `decl_exports` can be updated when a Decl
+/// is modified. Note that the key of this table is not the Decl being exported, but the Decl that
+/// is performing the export of another Decl.
+/// This table owns the Export memory.
+export_owners: std.AutoArrayHashMapUnmanaged(*Decl, []*Export) = .{},
+/// Maps fully qualified namespaced names to the Decl struct for them.
+decl_table: std.ArrayHashMapUnmanaged(Scope.NameHash, *Decl, Scope.name_hash_hash, Scope.name_hash_eql, false) = .{},
+/// We optimize memory usage for a compilation with no compile errors by storing the
+/// error messages and mapping outside of `Decl`.
+/// The ErrorMsg memory is owned by the decl, using Module's general purpose allocator.
+/// Note that a Decl can succeed but the Fn it represents can fail. In this case,
+/// a Decl can have a failed_decls entry but have analysis status of success.
+failed_decls: std.AutoArrayHashMapUnmanaged(*Decl, *Compilation.ErrorMsg) = .{},
+/// Using a map here for consistency with the other fields here.
+/// The ErrorMsg memory is owned by the `Scope`, using Module's general purpose allocator.
+failed_files: std.AutoArrayHashMapUnmanaged(*Scope, *Compilation.ErrorMsg) = .{},
+/// Using a map here for consistency with the other fields here.
+/// The ErrorMsg memory is owned by the `Export`, using Module's general purpose allocator.
+failed_exports: std.AutoArrayHashMapUnmanaged(*Export, *Compilation.ErrorMsg) = .{},
+
+next_anon_name_index: usize = 0,
+
+/// Candidates for deletion. After a semantic analysis update completes, this list
+/// contains Decls that need to be deleted if they end up having no references to them.
+deletion_set: ArrayListUnmanaged(*Decl) = .{},
+
+/// Error tags and their values, tag names are duped with mod.gpa.
+global_error_set: std.StringHashMapUnmanaged(u16) = .{},
+
+/// Incrementing integer used to compare against the corresponding Decl
+/// field to determine whether a Decl's status applies to an ongoing update, or a
+/// previous analysis.
+generation: u32 = 0,
+
+pub const Export = struct {
+ options: std.builtin.ExportOptions,
+ /// Byte offset into the file that contains the export directive.
+ src: usize,
+ /// Represents the position of the export, if any, in the output file.
+ link: link.File.Elf.Export,
+ /// The Decl that performs the export. Note that this is *not* the Decl being exported.
+ owner_decl: *Decl,
+ /// The Decl being exported. Note this is *not* the Decl performing the export.
+ exported_decl: *Decl,
+ status: enum {
+ in_progress,
+ failed,
+ /// Indicates that the failure was due to a temporary issue, such as an I/O error
+ /// when writing to the output file. Retrying the export may succeed.
+ failed_retryable,
+ complete,
+ },
+};
+
+pub const Decl = struct {
+ /// This name is relative to the containing namespace of the decl. It uses a null-termination
+ /// to save bytes, since there can be a lot of decls in a compilation. The null byte is not allowed
+ /// in symbol names, because executable file formats use null-terminated strings for symbol names.
+ /// All Decls have names, even values that are not bound to a zig namespace. This is necessary for
+ /// mapping them to an address in the output file.
+ /// Memory owned by this decl, using Module's allocator.
+ name: [*:0]const u8,
+ /// The direct parent container of the Decl. This is either a `Scope.Container` or `Scope.ZIRModule`.
+ /// Reference to externally owned memory.
+ scope: *Scope,
+ /// The AST Node decl index or ZIR Inst index that contains this declaration.
+ /// Must be recomputed when the corresponding source file is modified.
+ src_index: usize,
+ /// The most recent value of the Decl after a successful semantic analysis.
+ typed_value: union(enum) {
+ never_succeeded: void,
+ most_recent: TypedValue.Managed,
+ },
+ /// Represents the "shallow" analysis status. For example, for decls that are functions,
+ /// the function type is analyzed with this set to `in_progress`, however, the semantic
+ /// analysis of the function body is performed with this value set to `success`. Functions
+ /// have their own analysis status field.
+ analysis: enum {
+ /// This Decl corresponds to an AST Node that has not been referenced yet, and therefore
+ /// because of Zig's lazy declaration analysis, it will remain unanalyzed until referenced.
+ unreferenced,
+ /// Semantic analysis for this Decl is running right now. This state detects dependency loops.
+ in_progress,
+ /// This Decl might be OK but it depends on another one which did not successfully complete
+ /// semantic analysis.
+ dependency_failure,
+ /// Semantic analysis failure.
+ /// There will be a corresponding ErrorMsg in Module.failed_decls.
+ sema_failure,
+ /// There will be a corresponding ErrorMsg in Module.failed_decls.
+ /// This indicates the failure was something like running out of disk space,
+ /// and attempting semantic analysis again may succeed.
+ sema_failure_retryable,
+ /// There will be a corresponding ErrorMsg in Module.failed_decls.
+ codegen_failure,
+ /// There will be a corresponding ErrorMsg in Module.failed_decls.
+ /// This indicates the failure was something like running out of disk space,
+ /// and attempting codegen again may succeed.
+ codegen_failure_retryable,
+ /// Everything is done. During an update, this Decl may be out of date, depending
+ /// on its dependencies. The `generation` field can be used to determine if this
+ /// completion status occurred before or after a given update.
+ complete,
+ /// A Module update is in progress, and this Decl has been flagged as being known
+ /// to require re-analysis.
+ outdated,
+ },
+ /// This flag is set when this Decl is added to a check_for_deletion set, and cleared
+ /// when removed.
+ deletion_flag: bool,
+ /// Whether the corresponding AST decl has a `pub` keyword.
+ is_pub: bool,
+
+ /// An integer that can be checked against the corresponding incrementing
+ /// generation field of Module. This is used to determine whether `complete` status
+ /// represents pre- or post- re-analysis.
+ generation: u32,
+
+ /// Represents the position of the code in the output file.
+ /// This is populated regardless of semantic analysis and code generation.
+ link: link.File.LinkBlock,
+
+ /// Represents the function in the linked output file, if the `Decl` is a function.
+ /// This is stored here and not in `Fn` because `Decl` survives across updates but
+ /// `Fn` does not.
+ /// TODO Look into making `Fn` a longer lived structure and moving this field there
+ /// to save on memory usage.
+ fn_link: link.File.LinkFn,
+
+ contents_hash: std.zig.SrcHash,
+
+ /// The shallow set of other decls whose typed_value could possibly change if this Decl's
+ /// typed_value is modified.
+ dependants: DepsTable = .{},
+ /// The shallow set of other decls whose typed_value changing indicates that this Decl's
+ /// typed_value may need to be regenerated.
+ dependencies: DepsTable = .{},
+
+ /// The reason this is not `std.AutoArrayHashMapUnmanaged` is a workaround for
+ /// stage1 compiler giving me: `error: struct 'Module.Decl' depends on itself`
+ pub const DepsTable = std.ArrayHashMapUnmanaged(*Decl, void, std.array_hash_map.getAutoHashFn(*Decl), std.array_hash_map.getAutoEqlFn(*Decl), false);
+
+ pub fn destroy(self: *Decl, gpa: *Allocator) void {
+ gpa.free(mem.spanZ(self.name));
+ if (self.typedValueManaged()) |tvm| {
+ tvm.deinit(gpa);
+ }
+ self.dependants.deinit(gpa);
+ self.dependencies.deinit(gpa);
+ gpa.destroy(self);
+ }
+
+ pub fn src(self: Decl) usize {
+ switch (self.scope.tag) {
+ .container => {
+ const container = @fieldParentPtr(Scope.Container, "base", self.scope);
+ const tree = container.file_scope.contents.tree;
+ // TODO Container should have it's own decls()
+ const decl_node = tree.root_node.decls()[self.src_index];
+ return tree.token_locs[decl_node.firstToken()].start;
+ },
+ .zir_module => {
+ const zir_module = @fieldParentPtr(Scope.ZIRModule, "base", self.scope);
+ const module = zir_module.contents.module;
+ const src_decl = module.decls[self.src_index];
+ return src_decl.inst.src;
+ },
+ .file, .block => unreachable,
+ .gen_zir => unreachable,
+ .local_val => unreachable,
+ .local_ptr => unreachable,
+ .decl => unreachable,
+ }
+ }
+
+ pub fn fullyQualifiedNameHash(self: Decl) Scope.NameHash {
+ return self.scope.fullyQualifiedNameHash(mem.spanZ(self.name));
+ }
+
+ pub fn typedValue(self: *Decl) error{AnalysisFail}!TypedValue {
+ const tvm = self.typedValueManaged() orelse return error.AnalysisFail;
+ return tvm.typed_value;
+ }
+
+ pub fn value(self: *Decl) error{AnalysisFail}!Value {
+ return (try self.typedValue()).val;
+ }
+
+ pub fn dump(self: *Decl) void {
+ const loc = std.zig.findLineColumn(self.scope.source.bytes, self.src);
+ std.debug.print("{}:{}:{} name={} status={}", .{
+ self.scope.sub_file_path,
+ loc.line + 1,
+ loc.column + 1,
+ mem.spanZ(self.name),
+ @tagName(self.analysis),
+ });
+ if (self.typedValueManaged()) |tvm| {
+ std.debug.print(" ty={} val={}", .{ tvm.typed_value.ty, tvm.typed_value.val });
+ }
+ std.debug.print("\n", .{});
+ }
+
+ pub fn typedValueManaged(self: *Decl) ?*TypedValue.Managed {
+ switch (self.typed_value) {
+ .most_recent => |*x| return x,
+ .never_succeeded => return null,
+ }
+ }
+
+ fn removeDependant(self: *Decl, other: *Decl) void {
+ self.dependants.removeAssertDiscard(other);
+ }
+
+ fn removeDependency(self: *Decl, other: *Decl) void {
+ self.dependencies.removeAssertDiscard(other);
+ }
+};
+
+/// Fn struct memory is owned by the Decl's TypedValue.Managed arena allocator.
+pub const Fn = struct {
+ /// This memory owned by the Decl's TypedValue.Managed arena allocator.
+ analysis: union(enum) {
+ queued: *ZIR,
+ in_progress,
+ /// There will be a corresponding ErrorMsg in Module.failed_decls
+ sema_failure,
+ /// This Fn might be OK but it depends on another Decl which did not successfully complete
+ /// semantic analysis.
+ dependency_failure,
+ success: Body,
+ },
+ owner_decl: *Decl,
+
+ /// This memory is temporary and points to stack memory for the duration
+ /// of Fn analysis.
+ pub const Analysis = struct {
+ inner_block: Scope.Block,
+ };
+
+ /// Contains un-analyzed ZIR instructions generated from Zig source AST.
+ pub const ZIR = struct {
+ body: zir.Module.Body,
+ arena: std.heap.ArenaAllocator.State,
+ };
+
+ /// For debugging purposes.
+ pub fn dump(self: *Fn, mod: Module) void {
+ std.debug.print("Module.Function(name={}) ", .{self.owner_decl.name});
+ switch (self.analysis) {
+ .queued => {
+ std.debug.print("queued\n", .{});
+ },
+ .in_progress => {
+ std.debug.print("in_progress\n", .{});
+ },
+ else => {
+ std.debug.print("\n", .{});
+ zir.dumpFn(mod, self);
+ },
+ }
+ }
+};
+
+pub const Var = struct {
+ /// if is_extern == true this is undefined
+ init: Value,
+ owner_decl: *Decl,
+
+ is_extern: bool,
+ is_mutable: bool,
+ is_threadlocal: bool,
+};
+
+pub const Scope = struct {
+ tag: Tag,
+
+ pub const NameHash = [16]u8;
+
+ pub fn cast(base: *Scope, comptime T: type) ?*T {
+ if (base.tag != T.base_tag)
+ return null;
+
+ return @fieldParentPtr(T, "base", base);
+ }
+
+ /// Asserts the scope has a parent which is a DeclAnalysis and
+ /// returns the arena Allocator.
+ pub fn arena(self: *Scope) *Allocator {
+ switch (self.tag) {
+ .block => return self.cast(Block).?.arena,
+ .decl => return &self.cast(DeclAnalysis).?.arena.allocator,
+ .gen_zir => return self.cast(GenZIR).?.arena,
+ .local_val => return self.cast(LocalVal).?.gen_zir.arena,
+ .local_ptr => return self.cast(LocalPtr).?.gen_zir.arena,
+ .zir_module => return &self.cast(ZIRModule).?.contents.module.arena.allocator,
+ .file => unreachable,
+ .container => unreachable,
+ }
+ }
+
+ /// If the scope has a parent which is a `DeclAnalysis`,
+ /// returns the `Decl`, otherwise returns `null`.
+ pub fn decl(self: *Scope) ?*Decl {
+ return switch (self.tag) {
+ .block => self.cast(Block).?.decl,
+ .gen_zir => self.cast(GenZIR).?.decl,
+ .local_val => self.cast(LocalVal).?.gen_zir.decl,
+ .local_ptr => self.cast(LocalPtr).?.gen_zir.decl,
+ .decl => self.cast(DeclAnalysis).?.decl,
+ .zir_module => null,
+ .file => null,
+ .container => null,
+ };
+ }
+
+ /// Asserts the scope has a parent which is a ZIRModule or Container and
+ /// returns it.
+ pub fn namespace(self: *Scope) *Scope {
+ switch (self.tag) {
+ .block => return self.cast(Block).?.decl.scope,
+ .gen_zir => return self.cast(GenZIR).?.decl.scope,
+ .local_val => return self.cast(LocalVal).?.gen_zir.decl.scope,
+ .local_ptr => return self.cast(LocalPtr).?.gen_zir.decl.scope,
+ .decl => return self.cast(DeclAnalysis).?.decl.scope,
+ .file => return &self.cast(File).?.root_container.base,
+ .zir_module, .container => return self,
+ }
+ }
+
+ /// Must generate unique bytes with no collisions with other decls.
+ /// The point of hashing here is only to limit the number of bytes of
+ /// the unique identifier to a fixed size (16 bytes).
+ pub fn fullyQualifiedNameHash(self: *Scope, name: []const u8) NameHash {
+ switch (self.tag) {
+ .block => unreachable,
+ .gen_zir => unreachable,
+ .local_val => unreachable,
+ .local_ptr => unreachable,
+ .decl => unreachable,
+ .file => unreachable,
+ .zir_module => return self.cast(ZIRModule).?.fullyQualifiedNameHash(name),
+ .container => return self.cast(Container).?.fullyQualifiedNameHash(name),
+ }
+ }
+
+ /// Asserts the scope is a child of a File and has an AST tree and returns the tree.
+ pub fn tree(self: *Scope) *ast.Tree {
+ switch (self.tag) {
+ .file => return self.cast(File).?.contents.tree,
+ .zir_module => unreachable,
+ .decl => return self.cast(DeclAnalysis).?.decl.scope.cast(Container).?.file_scope.contents.tree,
+ .block => return self.cast(Block).?.decl.scope.cast(Container).?.file_scope.contents.tree,
+ .gen_zir => return self.cast(GenZIR).?.decl.scope.cast(Container).?.file_scope.contents.tree,
+ .local_val => return self.cast(LocalVal).?.gen_zir.decl.scope.cast(Container).?.file_scope.contents.tree,
+ .local_ptr => return self.cast(LocalPtr).?.gen_zir.decl.scope.cast(Container).?.file_scope.contents.tree,
+ .container => return self.cast(Container).?.file_scope.contents.tree,
+ }
+ }
+
+ /// Asserts the scope is a child of a `GenZIR` and returns it.
+ pub fn getGenZIR(self: *Scope) *GenZIR {
+ return switch (self.tag) {
+ .block => unreachable,
+ .gen_zir => self.cast(GenZIR).?,
+ .local_val => return self.cast(LocalVal).?.gen_zir,
+ .local_ptr => return self.cast(LocalPtr).?.gen_zir,
+ .decl => unreachable,
+ .zir_module => unreachable,
+ .file => unreachable,
+ .container => unreachable,
+ };
+ }
+
+ /// Asserts the scope has a parent which is a ZIRModule, Contaienr or File and
+ /// returns the sub_file_path field.
+ pub fn subFilePath(base: *Scope) []const u8 {
+ switch (base.tag) {
+ .container => return @fieldParentPtr(Container, "base", base).file_scope.sub_file_path,
+ .file => return @fieldParentPtr(File, "base", base).sub_file_path,
+ .zir_module => return @fieldParentPtr(ZIRModule, "base", base).sub_file_path,
+ .block => unreachable,
+ .gen_zir => unreachable,
+ .local_val => unreachable,
+ .local_ptr => unreachable,
+ .decl => unreachable,
+ }
+ }
+
+ pub fn unload(base: *Scope, gpa: *Allocator) void {
+ switch (base.tag) {
+ .file => return @fieldParentPtr(File, "base", base).unload(gpa),
+ .zir_module => return @fieldParentPtr(ZIRModule, "base", base).unload(gpa),
+ .block => unreachable,
+ .gen_zir => unreachable,
+ .local_val => unreachable,
+ .local_ptr => unreachable,
+ .decl => unreachable,
+ .container => unreachable,
+ }
+ }
+
+ pub fn getSource(base: *Scope, module: *Module) ![:0]const u8 {
+ switch (base.tag) {
+ .container => return @fieldParentPtr(Container, "base", base).file_scope.getSource(module),
+ .file => return @fieldParentPtr(File, "base", base).getSource(module),
+ .zir_module => return @fieldParentPtr(ZIRModule, "base", base).getSource(module),
+ .gen_zir => unreachable,
+ .local_val => unreachable,
+ .local_ptr => unreachable,
+ .block => unreachable,
+ .decl => unreachable,
+ }
+ }
+
+ /// Asserts the scope is a namespace Scope and removes the Decl from the namespace.
+ pub fn removeDecl(base: *Scope, child: *Decl) void {
+ switch (base.tag) {
+ .container => return @fieldParentPtr(Container, "base", base).removeDecl(child),
+ .zir_module => return @fieldParentPtr(ZIRModule, "base", base).removeDecl(child),
+ .file => unreachable,
+ .block => unreachable,
+ .gen_zir => unreachable,
+ .local_val => unreachable,
+ .local_ptr => unreachable,
+ .decl => unreachable,
+ }
+ }
+
+ /// Asserts the scope is a File or ZIRModule and deinitializes it, then deallocates it.
+ pub fn destroy(base: *Scope, gpa: *Allocator) void {
+ switch (base.tag) {
+ .file => {
+ const scope_file = @fieldParentPtr(File, "base", base);
+ scope_file.deinit(gpa);
+ gpa.destroy(scope_file);
+ },
+ .zir_module => {
+ const scope_zir_module = @fieldParentPtr(ZIRModule, "base", base);
+ scope_zir_module.deinit(gpa);
+ gpa.destroy(scope_zir_module);
+ },
+ .block => unreachable,
+ .gen_zir => unreachable,
+ .local_val => unreachable,
+ .local_ptr => unreachable,
+ .decl => unreachable,
+ .container => unreachable,
+ }
+ }
+
+ fn name_hash_hash(x: NameHash) u32 {
+ return @truncate(u32, @bitCast(u128, x));
+ }
+
+ fn name_hash_eql(a: NameHash, b: NameHash) bool {
+ return @bitCast(u128, a) == @bitCast(u128, b);
+ }
+
+ pub const Tag = enum {
+ /// .zir source code.
+ zir_module,
+ /// .zig source code.
+ file,
+ /// struct, enum or union, every .file contains one of these.
+ container,
+ block,
+ decl,
+ gen_zir,
+ local_val,
+ local_ptr,
+ };
+
+ pub const Container = struct {
+ pub const base_tag: Tag = .container;
+ base: Scope = Scope{ .tag = base_tag },
+
+ file_scope: *Scope.File,
+
+ /// Direct children of the file.
+ decls: std.AutoArrayHashMapUnmanaged(*Decl, void),
+
+ // TODO implement container types and put this in a status union
+ // ty: Type
+
+ pub fn deinit(self: *Container, gpa: *Allocator) void {
+ self.decls.deinit(gpa);
+ self.* = undefined;
+ }
+
+ pub fn removeDecl(self: *Container, child: *Decl) void {
+ _ = self.decls.remove(child);
+ }
+
+ pub fn fullyQualifiedNameHash(self: *Container, name: []const u8) NameHash {
+ // TODO container scope qualified names.
+ return std.zig.hashSrc(name);
+ }
+ };
+
+ pub const File = struct {
+ pub const base_tag: Tag = .file;
+ base: Scope = Scope{ .tag = base_tag },
+
+ /// Relative to the owning package's root_src_dir.
+ /// Reference to external memory, not owned by File.
+ sub_file_path: []const u8,
+ source: union(enum) {
+ unloaded: void,
+ bytes: [:0]const u8,
+ },
+ contents: union {
+ not_available: void,
+ tree: *ast.Tree,
+ },
+ status: enum {
+ never_loaded,
+ unloaded_success,
+ unloaded_parse_failure,
+ loaded_success,
+ },
+
+ root_container: Container,
+
+ pub fn unload(self: *File, gpa: *Allocator) void {
+ switch (self.status) {
+ .never_loaded,
+ .unloaded_parse_failure,
+ .unloaded_success,
+ => {},
+
+ .loaded_success => {
+ self.contents.tree.deinit();
+ self.status = .unloaded_success;
+ },
+ }
+ switch (self.source) {
+ .bytes => |bytes| {
+ gpa.free(bytes);
+ self.source = .{ .unloaded = {} };
+ },
+ .unloaded => {},
+ }
+ }
+
+ pub fn deinit(self: *File, gpa: *Allocator) void {
+ self.root_container.deinit(gpa);
+ self.unload(gpa);
+ self.* = undefined;
+ }
+
+ pub fn dumpSrc(self: *File, src: usize) void {
+ const loc = std.zig.findLineColumn(self.source.bytes, src);
+ std.debug.print("{}:{}:{}\n", .{ self.sub_file_path, loc.line + 1, loc.column + 1 });
+ }
+
+ pub fn getSource(self: *File, module: *Module) ![:0]const u8 {
+ switch (self.source) {
+ .unloaded => {
+ const source = try module.root_pkg.root_src_directory.handle.readFileAllocOptions(
+ module.gpa,
+ self.sub_file_path,
+ std.math.maxInt(u32),
+ null,
+ 1,
+ 0,
+ );
+ self.source = .{ .bytes = source };
+ return source;
+ },
+ .bytes => |bytes| return bytes,
+ }
+ }
+ };
+
+ pub const ZIRModule = struct {
+ pub const base_tag: Tag = .zir_module;
+ base: Scope = Scope{ .tag = base_tag },
+ /// Relative to the owning package's root_src_dir.
+ /// Reference to external memory, not owned by ZIRModule.
+ sub_file_path: []const u8,
+ source: union(enum) {
+ unloaded: void,
+ bytes: [:0]const u8,
+ },
+ contents: union {
+ not_available: void,
+ module: *zir.Module,
+ },
+ status: enum {
+ never_loaded,
+ unloaded_success,
+ unloaded_parse_failure,
+ unloaded_sema_failure,
+
+ loaded_sema_failure,
+ loaded_success,
+ },
+
+ /// Even though .zir files only have 1 module, this set is still needed
+ /// because of anonymous Decls, which can exist in the global set, but
+ /// not this one.
+ decls: ArrayListUnmanaged(*Decl),
+
+ pub fn unload(self: *ZIRModule, gpa: *Allocator) void {
+ switch (self.status) {
+ .never_loaded,
+ .unloaded_parse_failure,
+ .unloaded_sema_failure,
+ .unloaded_success,
+ => {},
+
+ .loaded_success => {
+ self.contents.module.deinit(gpa);
+ gpa.destroy(self.contents.module);
+ self.contents = .{ .not_available = {} };
+ self.status = .unloaded_success;
+ },
+ .loaded_sema_failure => {
+ self.contents.module.deinit(gpa);
+ gpa.destroy(self.contents.module);
+ self.contents = .{ .not_available = {} };
+ self.status = .unloaded_sema_failure;
+ },
+ }
+ switch (self.source) {
+ .bytes => |bytes| {
+ gpa.free(bytes);
+ self.source = .{ .unloaded = {} };
+ },
+ .unloaded => {},
+ }
+ }
+
+ pub fn deinit(self: *ZIRModule, gpa: *Allocator) void {
+ self.decls.deinit(gpa);
+ self.unload(gpa);
+ self.* = undefined;
+ }
+
+ pub fn removeDecl(self: *ZIRModule, child: *Decl) void {
+ for (self.decls.items) |item, i| {
+ if (item == child) {
+ _ = self.decls.swapRemove(i);
+ return;
+ }
+ }
+ }
+
+ pub fn dumpSrc(self: *ZIRModule, src: usize) void {
+ const loc = std.zig.findLineColumn(self.source.bytes, src);
+ std.debug.print("{}:{}:{}\n", .{ self.sub_file_path, loc.line + 1, loc.column + 1 });
+ }
+
+ pub fn getSource(self: *ZIRModule, module: *Module) ![:0]const u8 {
+ switch (self.source) {
+ .unloaded => {
+ const source = try module.root_pkg.root_src_directory.handle.readFileAllocOptions(
+ module.gpa,
+ self.sub_file_path,
+ std.math.maxInt(u32),
+ null,
+ 1,
+ 0,
+ );
+ self.source = .{ .bytes = source };
+ return source;
+ },
+ .bytes => |bytes| return bytes,
+ }
+ }
+
+ pub fn fullyQualifiedNameHash(self: *ZIRModule, name: []const u8) NameHash {
+ // ZIR modules only have 1 file with all decls global in the same namespace.
+ return std.zig.hashSrc(name);
+ }
+ };
+
+ /// This is a temporary structure, references to it are valid only
+ /// during semantic analysis of the block.
+ pub const Block = struct {
+ pub const base_tag: Tag = .block;
+ base: Scope = Scope{ .tag = base_tag },
+ parent: ?*Block,
+ func: ?*Fn,
+ decl: *Decl,
+ instructions: ArrayListUnmanaged(*Inst),
+ /// Points to the arena allocator of DeclAnalysis
+ arena: *Allocator,
+ label: ?Label = null,
+ is_comptime: bool,
+
+ pub const Label = struct {
+ zir_block: *zir.Inst.Block,
+ results: ArrayListUnmanaged(*Inst),
+ block_inst: *Inst.Block,
+ };
+ };
+
+ /// This is a temporary structure, references to it are valid only
+ /// during semantic analysis of the decl.
+ pub const DeclAnalysis = struct {
+ pub const base_tag: Tag = .decl;
+ base: Scope = Scope{ .tag = base_tag },
+ decl: *Decl,
+ arena: std.heap.ArenaAllocator,
+ };
+
+ /// This is a temporary structure, references to it are valid only
+ /// during semantic analysis of the decl.
+ pub const GenZIR = struct {
+ pub const base_tag: Tag = .gen_zir;
+ base: Scope = Scope{ .tag = base_tag },
+ /// Parents can be: `GenZIR`, `ZIRModule`, `File`
+ parent: *Scope,
+ decl: *Decl,
+ arena: *Allocator,
+ /// The first N instructions in a function body ZIR are arg instructions.
+ instructions: std.ArrayListUnmanaged(*zir.Inst) = .{},
+ label: ?Label = null,
+
+ pub const Label = struct {
+ token: ast.TokenIndex,
+ block_inst: *zir.Inst.Block,
+ result_loc: astgen.ResultLoc,
+ };
+ };
+
+ /// This is always a `const` local and importantly the `inst` is a value type, not a pointer.
+ /// This structure lives as long as the AST generation of the Block
+ /// node that contains the variable.
+ pub const LocalVal = struct {
+ pub const base_tag: Tag = .local_val;
+ base: Scope = Scope{ .tag = base_tag },
+ /// Parents can be: `LocalVal`, `LocalPtr`, `GenZIR`.
+ parent: *Scope,
+ gen_zir: *GenZIR,
+ name: []const u8,
+ inst: *zir.Inst,
+ };
+
+ /// This could be a `const` or `var` local. It has a pointer instead of a value.
+ /// This structure lives as long as the AST generation of the Block
+ /// node that contains the variable.
+ pub const LocalPtr = struct {
+ pub const base_tag: Tag = .local_ptr;
+ base: Scope = Scope{ .tag = base_tag },
+ /// Parents can be: `LocalVal`, `LocalPtr`, `GenZIR`.
+ parent: *Scope,
+ gen_zir: *GenZIR,
+ name: []const u8,
+ ptr: *zir.Inst,
+ };
+};
+
+pub const InnerError = error{ OutOfMemory, AnalysisFail };
+
+pub fn deinit(self: *Module) void {
+ const gpa = self.gpa;
+
+ self.zig_cache_artifact_directory.handle.close();
+
+ self.deletion_set.deinit(gpa);
+
+ for (self.decl_table.items()) |entry| {
+ entry.value.destroy(gpa);
+ }
+ self.decl_table.deinit(gpa);
+
+ for (self.failed_decls.items()) |entry| {
+ entry.value.destroy(gpa);
+ }
+ self.failed_decls.deinit(gpa);
+
+ for (self.failed_files.items()) |entry| {
+ entry.value.destroy(gpa);
+ }
+ self.failed_files.deinit(gpa);
+
+ for (self.failed_exports.items()) |entry| {
+ entry.value.destroy(gpa);
+ }
+ self.failed_exports.deinit(gpa);
+
+ for (self.decl_exports.items()) |entry| {
+ const export_list = entry.value;
+ gpa.free(export_list);
+ }
+ self.decl_exports.deinit(gpa);
+
+ for (self.export_owners.items()) |entry| {
+ freeExportList(gpa, entry.value);
+ }
+ self.export_owners.deinit(gpa);
+
+ self.symbol_exports.deinit(gpa);
+ self.root_scope.destroy(gpa);
+
+ var it = self.global_error_set.iterator();
+ while (it.next()) |entry| {
+ gpa.free(entry.key);
+ }
+ self.global_error_set.deinit(gpa);
+}
+
+fn freeExportList(gpa: *Allocator, export_list: []*Export) void {
+ for (export_list) |exp| {
+ gpa.free(exp.options.name);
+ gpa.destroy(exp);
+ }
+ gpa.free(export_list);
+}
+
+pub fn ensureDeclAnalyzed(self: *Module, decl: *Decl) InnerError!void {
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ const subsequent_analysis = switch (decl.analysis) {
+ .in_progress => unreachable,
+
+ .sema_failure,
+ .sema_failure_retryable,
+ .codegen_failure,
+ .dependency_failure,
+ .codegen_failure_retryable,
+ => return error.AnalysisFail,
+
+ .complete => return,
+
+ .outdated => blk: {
+ log.debug("re-analyzing {}\n", .{decl.name});
+
+ // The exports this Decl performs will be re-discovered, so we remove them here
+ // prior to re-analysis.
+ self.deleteDeclExports(decl);
+ // Dependencies will be re-discovered, so we remove them here prior to re-analysis.
+ for (decl.dependencies.items()) |entry| {
+ const dep = entry.key;
+ dep.removeDependant(decl);
+ if (dep.dependants.items().len == 0 and !dep.deletion_flag) {
+ // We don't perform a deletion here, because this Decl or another one
+ // may end up referencing it before the update is complete.
+ dep.deletion_flag = true;
+ try self.deletion_set.append(self.gpa, dep);
+ }
+ }
+ decl.dependencies.clearRetainingCapacity();
+
+ break :blk true;
+ },
+
+ .unreferenced => false,
+ };
+
+ const type_changed = if (self.root_scope.cast(Scope.ZIRModule)) |zir_module|
+ try zir_sema.analyzeZirDecl(self, decl, zir_module.contents.module.decls[decl.src_index])
+ else
+ self.astGenAndAnalyzeDecl(decl) catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ error.AnalysisFail => return error.AnalysisFail,
+ else => {
+ try self.failed_decls.ensureCapacity(self.gpa, self.failed_decls.items().len + 1);
+ self.failed_decls.putAssumeCapacityNoClobber(decl, try Compilation.ErrorMsg.create(
+ self.gpa,
+ decl.src(),
+ "unable to analyze: {}",
+ .{@errorName(err)},
+ ));
+ decl.analysis = .sema_failure_retryable;
+ return error.AnalysisFail;
+ },
+ };
+
+ if (subsequent_analysis) {
+ // We may need to chase the dependants and re-analyze them.
+ // However, if the decl is a function, and the type is the same, we do not need to.
+ if (type_changed or decl.typed_value.most_recent.typed_value.val.tag() != .function) {
+ for (decl.dependants.items()) |entry| {
+ const dep = entry.key;
+ switch (dep.analysis) {
+ .unreferenced => unreachable,
+ .in_progress => unreachable,
+ .outdated => continue, // already queued for update
+
+ .dependency_failure,
+ .sema_failure,
+ .sema_failure_retryable,
+ .codegen_failure,
+ .codegen_failure_retryable,
+ .complete,
+ => if (dep.generation != self.generation) {
+ try self.markOutdatedDecl(dep);
+ },
+ }
+ }
+ }
+ }
+}
+
+fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool {
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ const container_scope = decl.scope.cast(Scope.Container).?;
+ const tree = try self.getAstTree(container_scope);
+ const ast_node = tree.root_node.decls()[decl.src_index];
+ switch (ast_node.tag) {
+ .FnProto => {
+ const fn_proto = @fieldParentPtr(ast.Node.FnProto, "base", ast_node);
+
+ decl.analysis = .in_progress;
+
+ // This arena allocator's memory is discarded at the end of this function. It is used
+ // to determine the type of the function, and hence the type of the decl, which is needed
+ // to complete the Decl analysis.
+ var fn_type_scope_arena = std.heap.ArenaAllocator.init(self.gpa);
+ defer fn_type_scope_arena.deinit();
+ var fn_type_scope: Scope.GenZIR = .{
+ .decl = decl,
+ .arena = &fn_type_scope_arena.allocator,
+ .parent = decl.scope,
+ };
+ defer fn_type_scope.instructions.deinit(self.gpa);
+
+ decl.is_pub = fn_proto.getVisibToken() != null;
+ const body_node = fn_proto.getBodyNode() orelse
+ return self.failTok(&fn_type_scope.base, fn_proto.fn_token, "TODO implement extern functions", .{});
+
+ const param_decls = fn_proto.params();
+ const param_types = try fn_type_scope.arena.alloc(*zir.Inst, param_decls.len);
+
+ const fn_src = tree.token_locs[fn_proto.fn_token].start;
+ const type_type = try astgen.addZIRInstConst(self, &fn_type_scope.base, fn_src, .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.type_type),
+ });
+ const type_type_rl: astgen.ResultLoc = .{ .ty = type_type };
+ for (param_decls) |param_decl, i| {
+ const param_type_node = switch (param_decl.param_type) {
+ .any_type => |node| return self.failNode(&fn_type_scope.base, node, "TODO implement anytype parameter", .{}),
+ .type_expr => |node| node,
+ };
+ param_types[i] = try astgen.expr(self, &fn_type_scope.base, type_type_rl, param_type_node);
+ }
+ if (fn_proto.getVarArgsToken()) |var_args_token| {
+ return self.failTok(&fn_type_scope.base, var_args_token, "TODO implement var args", .{});
+ }
+ if (fn_proto.getLibName()) |lib_name| {
+ return self.failNode(&fn_type_scope.base, lib_name, "TODO implement function library name", .{});
+ }
+ if (fn_proto.getAlignExpr()) |align_expr| {
+ return self.failNode(&fn_type_scope.base, align_expr, "TODO implement function align expression", .{});
+ }
+ if (fn_proto.getSectionExpr()) |sect_expr| {
+ return self.failNode(&fn_type_scope.base, sect_expr, "TODO implement function section expression", .{});
+ }
+ if (fn_proto.getCallconvExpr()) |callconv_expr| {
+ return self.failNode(
+ &fn_type_scope.base,
+ callconv_expr,
+ "TODO implement function calling convention expression",
+ .{},
+ );
+ }
+ const return_type_expr = switch (fn_proto.return_type) {
+ .Explicit => |node| node,
+ .InferErrorSet => |node| return self.failNode(&fn_type_scope.base, node, "TODO implement inferred error sets", .{}),
+ .Invalid => |tok| return self.failTok(&fn_type_scope.base, tok, "unable to parse return type", .{}),
+ };
+
+ const return_type_inst = try astgen.expr(self, &fn_type_scope.base, type_type_rl, return_type_expr);
+ const fn_type_inst = try astgen.addZIRInst(self, &fn_type_scope.base, fn_src, zir.Inst.FnType, .{
+ .return_type = return_type_inst,
+ .param_types = param_types,
+ }, .{});
+
+ // We need the memory for the Type to go into the arena for the Decl
+ var decl_arena = std.heap.ArenaAllocator.init(self.gpa);
+ errdefer decl_arena.deinit();
+ const decl_arena_state = try decl_arena.allocator.create(std.heap.ArenaAllocator.State);
+
+ var block_scope: Scope.Block = .{
+ .parent = null,
+ .func = null,
+ .decl = decl,
+ .instructions = .{},
+ .arena = &decl_arena.allocator,
+ .is_comptime = false,
+ };
+ defer block_scope.instructions.deinit(self.gpa);
+
+ const fn_type = try zir_sema.analyzeBodyValueAsType(self, &block_scope, fn_type_inst, .{
+ .instructions = fn_type_scope.instructions.items,
+ });
+ const new_func = try decl_arena.allocator.create(Fn);
+ const fn_payload = try decl_arena.allocator.create(Value.Payload.Function);
+
+ const fn_zir = blk: {
+ // This scope's arena memory is discarded after the ZIR generation
+ // pass completes, and semantic analysis of it completes.
+ var gen_scope_arena = std.heap.ArenaAllocator.init(self.gpa);
+ errdefer gen_scope_arena.deinit();
+ var gen_scope: Scope.GenZIR = .{
+ .decl = decl,
+ .arena = &gen_scope_arena.allocator,
+ .parent = decl.scope,
+ };
+ defer gen_scope.instructions.deinit(self.gpa);
+
+ // We need an instruction for each parameter, and they must be first in the body.
+ try gen_scope.instructions.resize(self.gpa, fn_proto.params_len);
+ var params_scope = &gen_scope.base;
+ for (fn_proto.params()) |param, i| {
+ const name_token = param.name_token.?;
+ const src = tree.token_locs[name_token].start;
+ const param_name = tree.tokenSlice(name_token); // TODO: call identifierTokenString
+ const arg = try gen_scope_arena.allocator.create(zir.Inst.Arg);
+ arg.* = .{
+ .base = .{
+ .tag = .arg,
+ .src = src,
+ },
+ .positionals = .{
+ .name = param_name,
+ },
+ .kw_args = .{},
+ };
+ gen_scope.instructions.items[i] = &arg.base;
+ const sub_scope = try gen_scope_arena.allocator.create(Scope.LocalVal);
+ sub_scope.* = .{
+ .parent = params_scope,
+ .gen_zir = &gen_scope,
+ .name = param_name,
+ .inst = &arg.base,
+ };
+ params_scope = &sub_scope.base;
+ }
+
+ const body_block = body_node.cast(ast.Node.Block).?;
+
+ try astgen.blockExpr(self, params_scope, body_block);
+
+ if (gen_scope.instructions.items.len == 0 or
+ !gen_scope.instructions.items[gen_scope.instructions.items.len - 1].tag.isNoReturn())
+ {
+ const src = tree.token_locs[body_block.rbrace].start;
+ _ = try astgen.addZIRNoOp(self, &gen_scope.base, src, .returnvoid);
+ }
+
+ const fn_zir = try gen_scope_arena.allocator.create(Fn.ZIR);
+ fn_zir.* = .{
+ .body = .{
+ .instructions = try gen_scope.arena.dupe(*zir.Inst, gen_scope.instructions.items),
+ },
+ .arena = gen_scope_arena.state,
+ };
+ break :blk fn_zir;
+ };
+
+ new_func.* = .{
+ .analysis = .{ .queued = fn_zir },
+ .owner_decl = decl,
+ };
+ fn_payload.* = .{ .func = new_func };
+
+ var prev_type_has_bits = false;
+ var type_changed = true;
+
+ if (decl.typedValueManaged()) |tvm| {
+ prev_type_has_bits = tvm.typed_value.ty.hasCodeGenBits();
+ type_changed = !tvm.typed_value.ty.eql(fn_type);
+
+ tvm.deinit(self.gpa);
+ }
+
+ decl_arena_state.* = decl_arena.state;
+ decl.typed_value = .{
+ .most_recent = .{
+ .typed_value = .{
+ .ty = fn_type,
+ .val = Value.initPayload(&fn_payload.base),
+ },
+ .arena = decl_arena_state,
+ },
+ };
+ decl.analysis = .complete;
+ decl.generation = self.generation;
+
+ if (fn_type.hasCodeGenBits()) {
+ // We don't fully codegen the decl until later, but we do need to reserve a global
+ // offset table index for it. This allows us to codegen decls out of dependency order,
+ // increasing how many computations can be done in parallel.
+ try self.comp.bin_file.allocateDeclIndexes(decl);
+ try self.comp.work_queue.writeItem(.{ .codegen_decl = decl });
+ } else if (prev_type_has_bits) {
+ self.comp.bin_file.freeDecl(decl);
+ }
+
+ if (fn_proto.getExternExportInlineToken()) |maybe_export_token| {
+ if (tree.token_ids[maybe_export_token] == .Keyword_export) {
+ const export_src = tree.token_locs[maybe_export_token].start;
+ const name_loc = tree.token_locs[fn_proto.getNameToken().?];
+ const name = tree.tokenSliceLoc(name_loc);
+ // The scope needs to have the decl in it.
+ try self.analyzeExport(&block_scope.base, export_src, name, decl);
+ }
+ }
+ return type_changed;
+ },
+ .VarDecl => {
+ const var_decl = @fieldParentPtr(ast.Node.VarDecl, "base", ast_node);
+
+ decl.analysis = .in_progress;
+
+ // We need the memory for the Type to go into the arena for the Decl
+ var decl_arena = std.heap.ArenaAllocator.init(self.gpa);
+ errdefer decl_arena.deinit();
+ const decl_arena_state = try decl_arena.allocator.create(std.heap.ArenaAllocator.State);
+
+ var block_scope: Scope.Block = .{
+ .parent = null,
+ .func = null,
+ .decl = decl,
+ .instructions = .{},
+ .arena = &decl_arena.allocator,
+ .is_comptime = true,
+ };
+ defer block_scope.instructions.deinit(self.gpa);
+
+ decl.is_pub = var_decl.getVisibToken() != null;
+ const is_extern = blk: {
+ const maybe_extern_token = var_decl.getExternExportToken() orelse
+ break :blk false;
+ if (tree.token_ids[maybe_extern_token] != .Keyword_extern) break :blk false;
+ if (var_decl.getInitNode()) |some| {
+ return self.failNode(&block_scope.base, some, "extern variables have no initializers", .{});
+ }
+ break :blk true;
+ };
+ if (var_decl.getLibName()) |lib_name| {
+ assert(is_extern);
+ return self.failNode(&block_scope.base, lib_name, "TODO implement function library name", .{});
+ }
+ const is_mutable = tree.token_ids[var_decl.mut_token] == .Keyword_var;
+ const is_threadlocal = if (var_decl.getThreadLocalToken()) |some| blk: {
+ if (!is_mutable) {
+ return self.failTok(&block_scope.base, some, "threadlocal variable cannot be constant", .{});
+ }
+ break :blk true;
+ } else false;
+ assert(var_decl.getComptimeToken() == null);
+ if (var_decl.getAlignNode()) |align_expr| {
+ return self.failNode(&block_scope.base, align_expr, "TODO implement function align expression", .{});
+ }
+ if (var_decl.getSectionNode()) |sect_expr| {
+ return self.failNode(&block_scope.base, sect_expr, "TODO implement function section expression", .{});
+ }
+
+ const var_info: struct { ty: Type, val: ?Value } = if (var_decl.getInitNode()) |init_node| vi: {
+ var gen_scope_arena = std.heap.ArenaAllocator.init(self.gpa);
+ defer gen_scope_arena.deinit();
+ var gen_scope: Scope.GenZIR = .{
+ .decl = decl,
+ .arena = &gen_scope_arena.allocator,
+ .parent = decl.scope,
+ };
+ defer gen_scope.instructions.deinit(self.gpa);
+
+ const init_result_loc: astgen.ResultLoc = if (var_decl.getTypeNode()) |type_node| rl: {
+ const src = tree.token_locs[type_node.firstToken()].start;
+ const type_type = try astgen.addZIRInstConst(self, &gen_scope.base, src, .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.type_type),
+ });
+ const var_type = try astgen.expr(self, &gen_scope.base, .{ .ty = type_type }, type_node);
+ break :rl .{ .ty = var_type };
+ } else .none;
+
+ const src = tree.token_locs[init_node.firstToken()].start;
+ const init_inst = try astgen.expr(self, &gen_scope.base, init_result_loc, init_node);
+
+ var inner_block: Scope.Block = .{
+ .parent = null,
+ .func = null,
+ .decl = decl,
+ .instructions = .{},
+ .arena = &gen_scope_arena.allocator,
+ .is_comptime = true,
+ };
+ defer inner_block.instructions.deinit(self.gpa);
+ try zir_sema.analyzeBody(self, &inner_block.base, .{ .instructions = gen_scope.instructions.items });
+
+ // The result location guarantees the type coercion.
+ const analyzed_init_inst = init_inst.analyzed_inst.?;
+ // The is_comptime in the Scope.Block guarantees the result is comptime-known.
+ const val = analyzed_init_inst.value().?;
+
+ const ty = try analyzed_init_inst.ty.copy(block_scope.arena);
+ break :vi .{
+ .ty = ty,
+ .val = try val.copy(block_scope.arena),
+ };
+ } else if (!is_extern) {
+ return self.failTok(&block_scope.base, var_decl.firstToken(), "variables must be initialized", .{});
+ } else if (var_decl.getTypeNode()) |type_node| vi: {
+ // Temporary arena for the zir instructions.
+ var type_scope_arena = std.heap.ArenaAllocator.init(self.gpa);
+ defer type_scope_arena.deinit();
+ var type_scope: Scope.GenZIR = .{
+ .decl = decl,
+ .arena = &type_scope_arena.allocator,
+ .parent = decl.scope,
+ };
+ defer type_scope.instructions.deinit(self.gpa);
+
+ const src = tree.token_locs[type_node.firstToken()].start;
+ const type_type = try astgen.addZIRInstConst(self, &type_scope.base, src, .{
+ .ty = Type.initTag(.type),
+ .val = Value.initTag(.type_type),
+ });
+ const var_type = try astgen.expr(self, &type_scope.base, .{ .ty = type_type }, type_node);
+ const ty = try zir_sema.analyzeBodyValueAsType(self, &block_scope, var_type, .{
+ .instructions = type_scope.instructions.items,
+ });
+ break :vi .{
+ .ty = ty,
+ .val = null,
+ };
+ } else {
+ return self.failTok(&block_scope.base, var_decl.firstToken(), "unable to infer variable type", .{});
+ };
+
+ if (is_mutable and !var_info.ty.isValidVarType(is_extern)) {
+ return self.failTok(&block_scope.base, var_decl.firstToken(), "variable of type '{}' must be const", .{var_info.ty});
+ }
+
+ var type_changed = true;
+ if (decl.typedValueManaged()) |tvm| {
+ type_changed = !tvm.typed_value.ty.eql(var_info.ty);
+
+ tvm.deinit(self.gpa);
+ }
+
+ const new_variable = try decl_arena.allocator.create(Var);
+ const var_payload = try decl_arena.allocator.create(Value.Payload.Variable);
+ new_variable.* = .{
+ .owner_decl = decl,
+ .init = var_info.val orelse undefined,
+ .is_extern = is_extern,
+ .is_mutable = is_mutable,
+ .is_threadlocal = is_threadlocal,
+ };
+ var_payload.* = .{ .variable = new_variable };
+
+ decl_arena_state.* = decl_arena.state;
+ decl.typed_value = .{
+ .most_recent = .{
+ .typed_value = .{
+ .ty = var_info.ty,
+ .val = Value.initPayload(&var_payload.base),
+ },
+ .arena = decl_arena_state,
+ },
+ };
+ decl.analysis = .complete;
+ decl.generation = self.generation;
+
+ if (var_decl.getExternExportToken()) |maybe_export_token| {
+ if (tree.token_ids[maybe_export_token] == .Keyword_export) {
+ const export_src = tree.token_locs[maybe_export_token].start;
+ const name_loc = tree.token_locs[var_decl.name_token];
+ const name = tree.tokenSliceLoc(name_loc);
+ // The scope needs to have the decl in it.
+ try self.analyzeExport(&block_scope.base, export_src, name, decl);
+ }
+ }
+ return type_changed;
+ },
+ .Comptime => {
+ const comptime_decl = @fieldParentPtr(ast.Node.Comptime, "base", ast_node);
+
+ decl.analysis = .in_progress;
+
+ // A comptime decl does not store any value so we can just deinit this arena after analysis is done.
+ var analysis_arena = std.heap.ArenaAllocator.init(self.gpa);
+ defer analysis_arena.deinit();
+ var gen_scope: Scope.GenZIR = .{
+ .decl = decl,
+ .arena = &analysis_arena.allocator,
+ .parent = decl.scope,
+ };
+ defer gen_scope.instructions.deinit(self.gpa);
+
+ _ = try astgen.comptimeExpr(self, &gen_scope.base, .none, comptime_decl.expr);
+
+ var block_scope: Scope.Block = .{
+ .parent = null,
+ .func = null,
+ .decl = decl,
+ .instructions = .{},
+ .arena = &analysis_arena.allocator,
+ .is_comptime = true,
+ };
+ defer block_scope.instructions.deinit(self.gpa);
+
+ _ = try zir_sema.analyzeBody(self, &block_scope.base, .{
+ .instructions = gen_scope.instructions.items,
+ });
+
+ decl.analysis = .complete;
+ decl.generation = self.generation;
+ return true;
+ },
+ .Use => @panic("TODO usingnamespace decl"),
+ else => unreachable,
+ }
+}
+
+fn declareDeclDependency(self: *Module, depender: *Decl, dependee: *Decl) !void {
+ try depender.dependencies.ensureCapacity(self.gpa, depender.dependencies.items().len + 1);
+ try dependee.dependants.ensureCapacity(self.gpa, dependee.dependants.items().len + 1);
+
+ depender.dependencies.putAssumeCapacity(dependee, {});
+ dependee.dependants.putAssumeCapacity(depender, {});
+}
+
+fn getSrcModule(self: *Module, root_scope: *Scope.ZIRModule) !*zir.Module {
+ switch (root_scope.status) {
+ .never_loaded, .unloaded_success => {
+ try self.failed_files.ensureCapacity(self.gpa, self.failed_files.items().len + 1);
+
+ const source = try root_scope.getSource(self);
+
+ var keep_zir_module = false;
+ const zir_module = try self.gpa.create(zir.Module);
+ defer if (!keep_zir_module) self.gpa.destroy(zir_module);
+
+ zir_module.* = try zir.parse(self.gpa, source);
+ defer if (!keep_zir_module) zir_module.deinit(self.gpa);
+
+ if (zir_module.error_msg) |src_err_msg| {
+ self.failed_files.putAssumeCapacityNoClobber(
+ &root_scope.base,
+ try Compilation.ErrorMsg.create(self.gpa, src_err_msg.byte_offset, "{}", .{src_err_msg.msg}),
+ );
+ root_scope.status = .unloaded_parse_failure;
+ return error.AnalysisFail;
+ }
+
+ root_scope.status = .loaded_success;
+ root_scope.contents = .{ .module = zir_module };
+ keep_zir_module = true;
+
+ return zir_module;
+ },
+
+ .unloaded_parse_failure,
+ .unloaded_sema_failure,
+ => return error.AnalysisFail,
+
+ .loaded_success, .loaded_sema_failure => return root_scope.contents.module,
+ }
+}
+
+fn getAstTree(self: *Module, container_scope: *Scope.Container) !*ast.Tree {
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ const root_scope = container_scope.file_scope;
+
+ switch (root_scope.status) {
+ .never_loaded, .unloaded_success => {
+ try self.failed_files.ensureCapacity(self.gpa, self.failed_files.items().len + 1);
+
+ const source = try root_scope.getSource(self);
+
+ var keep_tree = false;
+ const tree = try std.zig.parse(self.gpa, source);
+ defer if (!keep_tree) tree.deinit();
+
+ if (tree.errors.len != 0) {
+ const parse_err = tree.errors[0];
+
+ var msg = std.ArrayList(u8).init(self.gpa);
+ defer msg.deinit();
+
+ try parse_err.render(tree.token_ids, msg.outStream());
+ const err_msg = try self.gpa.create(Compilation.ErrorMsg);
+ err_msg.* = .{
+ .msg = msg.toOwnedSlice(),
+ .byte_offset = tree.token_locs[parse_err.loc()].start,
+ };
+
+ self.failed_files.putAssumeCapacityNoClobber(&root_scope.base, err_msg);
+ root_scope.status = .unloaded_parse_failure;
+ return error.AnalysisFail;
+ }
+
+ root_scope.status = .loaded_success;
+ root_scope.contents = .{ .tree = tree };
+ keep_tree = true;
+
+ return tree;
+ },
+
+ .unloaded_parse_failure => return error.AnalysisFail,
+
+ .loaded_success => return root_scope.contents.tree,
+ }
+}
+
+pub fn analyzeContainer(self: *Module, container_scope: *Scope.Container) !void {
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ // We may be analyzing it for the first time, or this may be
+ // an incremental update. This code handles both cases.
+ const tree = try self.getAstTree(container_scope);
+ const decls = tree.root_node.decls();
+
+ try self.comp.work_queue.ensureUnusedCapacity(decls.len);
+ try container_scope.decls.ensureCapacity(self.gpa, decls.len);
+
+ // Keep track of the decls that we expect to see in this file so that
+ // we know which ones have been deleted.
+ var deleted_decls = std.AutoArrayHashMap(*Decl, void).init(self.gpa);
+ defer deleted_decls.deinit();
+ try deleted_decls.ensureCapacity(container_scope.decls.items().len);
+ for (container_scope.decls.items()) |entry| {
+ deleted_decls.putAssumeCapacityNoClobber(entry.key, {});
+ }
+
+ for (decls) |src_decl, decl_i| {
+ if (src_decl.cast(ast.Node.FnProto)) |fn_proto| {
+ // We will create a Decl for it regardless of analysis status.
+ const name_tok = fn_proto.getNameToken() orelse {
+ @panic("TODO missing function name");
+ };
+
+ const name_loc = tree.token_locs[name_tok];
+ const name = tree.tokenSliceLoc(name_loc);
+ const name_hash = container_scope.fullyQualifiedNameHash(name);
+ const contents_hash = std.zig.hashSrc(tree.getNodeSource(src_decl));
+ if (self.decl_table.get(name_hash)) |decl| {
+ // Update the AST Node index of the decl, even if its contents are unchanged, it may
+ // have been re-ordered.
+ decl.src_index = decl_i;
+ if (deleted_decls.remove(decl) == null) {
+ decl.analysis = .sema_failure;
+ const err_msg = try Compilation.ErrorMsg.create(self.gpa, tree.token_locs[name_tok].start, "redefinition of '{}'", .{decl.name});
+ errdefer err_msg.destroy(self.gpa);
+ try self.failed_decls.putNoClobber(self.gpa, decl, err_msg);
+ } else {
+ if (!srcHashEql(decl.contents_hash, contents_hash)) {
+ try self.markOutdatedDecl(decl);
+ decl.contents_hash = contents_hash;
+ } else switch (self.comp.bin_file.tag) {
+ .coff => {
+ // TODO Implement for COFF
+ },
+ .elf => if (decl.fn_link.elf.len != 0) {
+ // TODO Look into detecting when this would be unnecessary by storing enough state
+ // in `Decl` to notice that the line number did not change.
+ self.comp.work_queue.writeItemAssumeCapacity(.{ .update_line_number = decl });
+ },
+ .macho => {
+ // TODO Implement for MachO
+ },
+ .c, .wasm => {},
+ }
+ }
+ } else {
+ const new_decl = try self.createNewDecl(&container_scope.base, name, decl_i, name_hash, contents_hash);
+ container_scope.decls.putAssumeCapacity(new_decl, {});
+ if (fn_proto.getExternExportInlineToken()) |maybe_export_token| {
+ if (tree.token_ids[maybe_export_token] == .Keyword_export) {
+ self.comp.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl });
+ }
+ }
+ }
+ } else if (src_decl.castTag(.VarDecl)) |var_decl| {
+ const name_loc = tree.token_locs[var_decl.name_token];
+ const name = tree.tokenSliceLoc(name_loc);
+ const name_hash = container_scope.fullyQualifiedNameHash(name);
+ const contents_hash = std.zig.hashSrc(tree.getNodeSource(src_decl));
+ if (self.decl_table.get(name_hash)) |decl| {
+ // Update the AST Node index of the decl, even if its contents are unchanged, it may
+ // have been re-ordered.
+ decl.src_index = decl_i;
+ if (deleted_decls.remove(decl) == null) {
+ decl.analysis = .sema_failure;
+ const err_msg = try Compilation.ErrorMsg.create(self.gpa, name_loc.start, "redefinition of '{}'", .{decl.name});
+ errdefer err_msg.destroy(self.gpa);
+ try self.failed_decls.putNoClobber(self.gpa, decl, err_msg);
+ } else if (!srcHashEql(decl.contents_hash, contents_hash)) {
+ try self.markOutdatedDecl(decl);
+ decl.contents_hash = contents_hash;
+ }
+ } else {
+ const new_decl = try self.createNewDecl(&container_scope.base, name, decl_i, name_hash, contents_hash);
+ container_scope.decls.putAssumeCapacity(new_decl, {});
+ if (var_decl.getExternExportToken()) |maybe_export_token| {
+ if (tree.token_ids[maybe_export_token] == .Keyword_export) {
+ self.comp.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl });
+ }
+ }
+ }
+ } else if (src_decl.castTag(.Comptime)) |comptime_node| {
+ const name_index = self.getNextAnonNameIndex();
+ const name = try std.fmt.allocPrint(self.gpa, "__comptime_{}", .{name_index});
+ defer self.gpa.free(name);
+
+ const name_hash = container_scope.fullyQualifiedNameHash(name);
+ const contents_hash = std.zig.hashSrc(tree.getNodeSource(src_decl));
+
+ const new_decl = try self.createNewDecl(&container_scope.base, name, decl_i, name_hash, contents_hash);
+ container_scope.decls.putAssumeCapacity(new_decl, {});
+ self.comp.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl });
+ } else if (src_decl.castTag(.ContainerField)) |container_field| {
+ log.err("TODO: analyze container field", .{});
+ } else if (src_decl.castTag(.TestDecl)) |test_decl| {
+ log.err("TODO: analyze test decl", .{});
+ } else if (src_decl.castTag(.Use)) |use_decl| {
+ log.err("TODO: analyze usingnamespace decl", .{});
+ } else {
+ unreachable;
+ }
+ }
+ // Handle explicitly deleted decls from the source code. Not to be confused
+ // with when we delete decls because they are no longer referenced.
+ for (deleted_decls.items()) |entry| {
+ log.debug("noticed '{}' deleted from source\n", .{entry.key.name});
+ try self.deleteDecl(entry.key);
+ }
+}
+
+pub fn analyzeRootZIRModule(self: *Module, root_scope: *Scope.ZIRModule) !void {
+ // We may be analyzing it for the first time, or this may be
+ // an incremental update. This code handles both cases.
+ const src_module = try self.getSrcModule(root_scope);
+
+ try self.comp.work_queue.ensureUnusedCapacity(src_module.decls.len);
+ try root_scope.decls.ensureCapacity(self.gpa, src_module.decls.len);
+
+ var exports_to_resolve = std.ArrayList(*zir.Decl).init(self.gpa);
+ defer exports_to_resolve.deinit();
+
+ // Keep track of the decls that we expect to see in this file so that
+ // we know which ones have been deleted.
+ var deleted_decls = std.AutoArrayHashMap(*Decl, void).init(self.gpa);
+ defer deleted_decls.deinit();
+ try deleted_decls.ensureCapacity(self.decl_table.items().len);
+ for (self.decl_table.items()) |entry| {
+ deleted_decls.putAssumeCapacityNoClobber(entry.value, {});
+ }
+
+ for (src_module.decls) |src_decl, decl_i| {
+ const name_hash = root_scope.fullyQualifiedNameHash(src_decl.name);
+ if (self.decl_table.get(name_hash)) |decl| {
+ deleted_decls.removeAssertDiscard(decl);
+ if (!srcHashEql(src_decl.contents_hash, decl.contents_hash)) {
+ try self.markOutdatedDecl(decl);
+ decl.contents_hash = src_decl.contents_hash;
+ }
+ } else {
+ const new_decl = try self.createNewDecl(
+ &root_scope.base,
+ src_decl.name,
+ decl_i,
+ name_hash,
+ src_decl.contents_hash,
+ );
+ root_scope.decls.appendAssumeCapacity(new_decl);
+ if (src_decl.inst.cast(zir.Inst.Export)) |export_inst| {
+ try exports_to_resolve.append(src_decl);
+ }
+ }
+ }
+ for (exports_to_resolve.items) |export_decl| {
+ _ = try zir_sema.resolveZirDecl(self, &root_scope.base, export_decl);
+ }
+ // Handle explicitly deleted decls from the source code. Not to be confused
+ // with when we delete decls because they are no longer referenced.
+ for (deleted_decls.items()) |entry| {
+ log.debug("noticed '{}' deleted from source\n", .{entry.key.name});
+ try self.deleteDecl(entry.key);
+ }
+}
+
+pub fn deleteDecl(self: *Module, decl: *Decl) !void {
+ try self.deletion_set.ensureCapacity(self.gpa, self.deletion_set.items.len + decl.dependencies.items().len);
+
+ // Remove from the namespace it resides in. In the case of an anonymous Decl it will
+ // not be present in the set, and this does nothing.
+ decl.scope.removeDecl(decl);
+
+ log.debug("deleting decl '{}'\n", .{decl.name});
+ const name_hash = decl.fullyQualifiedNameHash();
+ self.decl_table.removeAssertDiscard(name_hash);
+ // Remove itself from its dependencies, because we are about to destroy the decl pointer.
+ for (decl.dependencies.items()) |entry| {
+ const dep = entry.key;
+ dep.removeDependant(decl);
+ if (dep.dependants.items().len == 0 and !dep.deletion_flag) {
+ // We don't recursively perform a deletion here, because during the update,
+ // another reference to it may turn up.
+ dep.deletion_flag = true;
+ self.deletion_set.appendAssumeCapacity(dep);
+ }
+ }
+ // Anything that depends on this deleted decl certainly needs to be re-analyzed.
+ for (decl.dependants.items()) |entry| {
+ const dep = entry.key;
+ dep.removeDependency(decl);
+ if (dep.analysis != .outdated) {
+ // TODO Move this failure possibility to the top of the function.
+ try self.markOutdatedDecl(dep);
+ }
+ }
+ if (self.failed_decls.remove(decl)) |entry| {
+ entry.value.destroy(self.gpa);
+ }
+ self.deleteDeclExports(decl);
+ self.comp.bin_file.freeDecl(decl);
+ decl.destroy(self.gpa);
+}
+
+/// Delete all the Export objects that are caused by this Decl. Re-analysis of
+/// this Decl will cause them to be re-created (or not).
+fn deleteDeclExports(self: *Module, decl: *Decl) void {
+ const kv = self.export_owners.remove(decl) orelse return;
+
+ for (kv.value) |exp| {
+ if (self.decl_exports.getEntry(exp.exported_decl)) |decl_exports_kv| {
+ // Remove exports with owner_decl matching the regenerating decl.
+ const list = decl_exports_kv.value;
+ var i: usize = 0;
+ var new_len = list.len;
+ while (i < new_len) {
+ if (list[i].owner_decl == decl) {
+ mem.copyBackwards(*Export, list[i..], list[i + 1 .. new_len]);
+ new_len -= 1;
+ } else {
+ i += 1;
+ }
+ }
+ decl_exports_kv.value = self.gpa.shrink(list, new_len);
+ if (new_len == 0) {
+ self.decl_exports.removeAssertDiscard(exp.exported_decl);
+ }
+ }
+ if (self.comp.bin_file.cast(link.File.Elf)) |elf| {
+ elf.deleteExport(exp.link);
+ }
+ if (self.failed_exports.remove(exp)) |entry| {
+ entry.value.destroy(self.gpa);
+ }
+ _ = self.symbol_exports.remove(exp.options.name);
+ self.gpa.free(exp.options.name);
+ self.gpa.destroy(exp);
+ }
+ self.gpa.free(kv.value);
+}
+
+pub fn analyzeFnBody(self: *Module, decl: *Decl, func: *Fn) !void {
+ const tracy = trace(@src());
+ defer tracy.end();
+
+ // Use the Decl's arena for function memory.
+ var arena = decl.typed_value.most_recent.arena.?.promote(self.gpa);
+ defer decl.typed_value.most_recent.arena.?.* = arena.state;
+ var inner_block: Scope.Block = .{
+ .parent = null,
+ .func = func,
+ .decl = decl,
+ .instructions = .{},
+ .arena = &arena.allocator,
+ .is_comptime = false,
+ };
+ defer inner_block.instructions.deinit(self.gpa);
+
+ const fn_zir = func.analysis.queued;
+ defer fn_zir.arena.promote(self.gpa).deinit();
+ func.analysis = .{ .in_progress = {} };
+ log.debug("set {} to in_progress\n", .{decl.name});
+
+ try zir_sema.analyzeBody(self, &inner_block.base, fn_zir.body);
+
+ const instructions = try arena.allocator.dupe(*Inst, inner_block.instructions.items);
+ func.analysis = .{ .success = .{ .instructions = instructions } };
+ log.debug("set {} to success\n", .{decl.name});
+}
+
+fn markOutdatedDecl(self: *Module, decl: *Decl) !void {
+ log.debug("mark {} outdated\n", .{decl.name});
+ try self.comp.work_queue.writeItem(.{ .analyze_decl = decl });
+ if (self.failed_decls.remove(decl)) |entry| {
+ entry.value.destroy(self.gpa);
+ }
+ decl.analysis = .outdated;
+}
+
+fn allocateNewDecl(
+ self: *Module,
+ scope: *Scope,
+ src_index: usize,
+ contents_hash: std.zig.SrcHash,
+) !*Decl {
+ const new_decl = try self.gpa.create(Decl);
+ new_decl.* = .{
+ .name = "",
+ .scope = scope.namespace(),
+ .src_index = src_index,
+ .typed_value = .{ .never_succeeded = {} },
+ .analysis = .unreferenced,
+ .deletion_flag = false,
+ .contents_hash = contents_hash,
+ .link = switch (self.comp.bin_file.tag) {
+ .coff => .{ .coff = link.File.Coff.TextBlock.empty },
+ .elf => .{ .elf = link.File.Elf.TextBlock.empty },
+ .macho => .{ .macho = link.File.MachO.TextBlock.empty },
+ .c => .{ .c = {} },
+ .wasm => .{ .wasm = {} },
+ },
+ .fn_link = switch (self.comp.bin_file.tag) {
+ .coff => .{ .coff = {} },
+ .elf => .{ .elf = link.File.Elf.SrcFn.empty },
+ .macho => .{ .macho = link.File.MachO.SrcFn.empty },
+ .c => .{ .c = {} },
+ .wasm => .{ .wasm = null },
+ },
+ .generation = 0,
+ .is_pub = false,
+ };
+ return new_decl;
+}
+
+fn createNewDecl(
+ self: *Module,
+ scope: *Scope,
+ decl_name: []const u8,
+ src_index: usize,
+ name_hash: Scope.NameHash,
+ contents_hash: std.zig.SrcHash,
+) !*Decl {
+ try self.decl_table.ensureCapacity(self.gpa, self.decl_table.items().len + 1);
+ const new_decl = try self.allocateNewDecl(scope, src_index, contents_hash);
+ errdefer self.gpa.destroy(new_decl);
+ new_decl.name = try mem.dupeZ(self.gpa, u8, decl_name);
+ self.decl_table.putAssumeCapacityNoClobber(name_hash, new_decl);
+ return new_decl;
+}
+
+/// Get error value for error tag `name`.
+pub fn getErrorValue(self: *Module, name: []const u8) !std.StringHashMapUnmanaged(u16).Entry {
+ const gop = try self.global_error_set.getOrPut(self.gpa, name);
+ if (gop.found_existing)
+ return gop.entry.*;
+ errdefer self.global_error_set.removeAssertDiscard(name);
+
+ gop.entry.key = try self.gpa.dupe(u8, name);
+ gop.entry.value = @intCast(u16, self.global_error_set.count() - 1);
+ return gop.entry.*;
+}
+
+pub fn requireFunctionBlock(self: *Module, scope: *Scope, src: usize) !*Scope.Block {
+ return scope.cast(Scope.Block) orelse
+ return self.fail(scope, src, "instruction illegal outside function body", .{});
+}
+
+pub fn requireRuntimeBlock(self: *Module, scope: *Scope, src: usize) !*Scope.Block {
+ const block = try self.requireFunctionBlock(scope, src);
+ if (block.is_comptime) {
+ return self.fail(scope, src, "unable to resolve comptime value", .{});
+ }
+ return block;
+}
+
+pub fn resolveConstValue(self: *Module, scope: *Scope, base: *Inst) !Value {
+ return (try self.resolveDefinedValue(scope, base)) orelse
+ return self.fail(scope, base.src, "unable to resolve comptime value", .{});
+}
+
+pub fn resolveDefinedValue(self: *Module, scope: *Scope, base: *Inst) !?Value {
+ if (base.value()) |val| {
+ if (val.isUndef()) {
+ return self.fail(scope, base.src, "use of undefined value here causes undefined behavior", .{});
+ }
+ return val;
+ }
+ return null;
+}
+
+pub fn analyzeExport(self: *Module, scope: *Scope, src: usize, borrowed_symbol_name: []const u8, exported_decl: *Decl) !void {
+ try self.ensureDeclAnalyzed(exported_decl);
+ const typed_value = exported_decl.typed_value.most_recent.typed_value;
+ switch (typed_value.ty.zigTypeTag()) {
+ .Fn => {},
+ else => return self.fail(scope, src, "unable to export type '{}'", .{typed_value.ty}),
+ }
+
+ try self.decl_exports.ensureCapacity(self.gpa, self.decl_exports.items().len + 1);
+ try self.export_owners.ensureCapacity(self.gpa, self.export_owners.items().len + 1);
+
+ const new_export = try self.gpa.create(Export);
+ errdefer self.gpa.destroy(new_export);
+
+ const symbol_name = try self.gpa.dupe(u8, borrowed_symbol_name);
+ errdefer self.gpa.free(symbol_name);
+
+ const owner_decl = scope.decl().?;
+
+ new_export.* = .{
+ .options = .{ .name = symbol_name },
+ .src = src,
+ .link = .{},
+ .owner_decl = owner_decl,
+ .exported_decl = exported_decl,
+ .status = .in_progress,
+ };
+
+ // Add to export_owners table.
+ const eo_gop = self.export_owners.getOrPutAssumeCapacity(owner_decl);
+ if (!eo_gop.found_existing) {
+ eo_gop.entry.value = &[0]*Export{};
+ }
+ eo_gop.entry.value = try self.gpa.realloc(eo_gop.entry.value, eo_gop.entry.value.len + 1);
+ eo_gop.entry.value[eo_gop.entry.value.len - 1] = new_export;
+ errdefer eo_gop.entry.value = self.gpa.shrink(eo_gop.entry.value, eo_gop.entry.value.len - 1);
+
+ // Add to exported_decl table.
+ const de_gop = self.decl_exports.getOrPutAssumeCapacity(exported_decl);
+ if (!de_gop.found_existing) {
+ de_gop.entry.value = &[0]*Export{};
+ }
+ de_gop.entry.value = try self.gpa.realloc(de_gop.entry.value, de_gop.entry.value.len + 1);
+ de_gop.entry.value[de_gop.entry.value.len - 1] = new_export;
+ errdefer de_gop.entry.value = self.gpa.shrink(de_gop.entry.value, de_gop.entry.value.len - 1);
+
+ if (self.symbol_exports.get(symbol_name)) |_| {
+ try self.failed_exports.ensureCapacity(self.gpa, self.failed_exports.items().len + 1);
+ self.failed_exports.putAssumeCapacityNoClobber(new_export, try Compilation.ErrorMsg.create(
+ self.gpa,
+ src,
+ "exported symbol collision: {}",
+ .{symbol_name},
+ ));
+ // TODO: add a note
+ new_export.status = .failed;
+ return;
+ }
+
+ try self.symbol_exports.putNoClobber(self.gpa, symbol_name, new_export);
+ self.comp.bin_file.updateDeclExports(self, exported_decl, de_gop.entry.value) catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ else => {
+ try self.failed_exports.ensureCapacity(self.gpa, self.failed_exports.items().len + 1);
+ self.failed_exports.putAssumeCapacityNoClobber(new_export, try Compilation.ErrorMsg.create(
+ self.gpa,
+ src,
+ "unable to export: {}",
+ .{@errorName(err)},
+ ));
+ new_export.status = .failed_retryable;
+ },
+ };
+}
+
+pub fn addNoOp(
+ self: *Module,
+ block: *Scope.Block,
+ src: usize,
+ ty: Type,
+ comptime tag: Inst.Tag,
+) !*Inst {
+ const inst = try block.arena.create(tag.Type());
+ inst.* = .{
+ .base = .{
+ .tag = tag,
+ .ty = ty,
+ .src = src,
+ },
+ };
+ try block.instructions.append(self.gpa, &inst.base);
+ return &inst.base;
+}
+
+pub fn addUnOp(
+ self: *Module,
+ block: *Scope.Block,
+ src: usize,
+ ty: Type,
+ tag: Inst.Tag,
+ operand: *Inst,
+) !*Inst {
+ const inst = try block.arena.create(Inst.UnOp);
+ inst.* = .{
+ .base = .{
+ .tag = tag,
+ .ty = ty,
+ .src = src,
+ },
+ .operand = operand,
+ };
+ try block.instructions.append(self.gpa, &inst.base);
+ return &inst.base;
+}
+
+pub fn addBinOp(
+ self: *Module,
+ block: *Scope.Block,
+ src: usize,
+ ty: Type,
+ tag: Inst.Tag,
+ lhs: *Inst,
+ rhs: *Inst,
+) !*Inst {
+ const inst = try block.arena.create(Inst.BinOp);
+ inst.* = .{
+ .base = .{
+ .tag = tag,
+ .ty = ty,
+ .src = src,
+ },
+ .lhs = lhs,
+ .rhs = rhs,
+ };
+ try block.instructions.append(self.gpa, &inst.base);
+ return &inst.base;
+}
+
+pub fn addArg(self: *Module, block: *Scope.Block, src: usize, ty: Type, name: [*:0]const u8) !*Inst {
+ const inst = try block.arena.create(Inst.Arg);
+ inst.* = .{
+ .base = .{
+ .tag = .arg,
+ .ty = ty,
+ .src = src,
+ },
+ .name = name,
+ };
+ try block.instructions.append(self.gpa, &inst.base);
+ return &inst.base;
+}
+
+pub fn addBr(
+ self: *Module,
+ scope_block: *Scope.Block,
+ src: usize,
+ target_block: *Inst.Block,
+ operand: *Inst,
+) !*Inst {
+ const inst = try scope_block.arena.create(Inst.Br);
+ inst.* = .{
+ .base = .{
+ .tag = .br,
+ .ty = Type.initTag(.noreturn),
+ .src = src,
+ },
+ .operand = operand,
+ .block = target_block,
+ };
+ try scope_block.instructions.append(self.gpa, &inst.base);
+ return &inst.base;
+}
+
+pub fn addCondBr(
+ self: *Module,
+ block: *Scope.Block,
+ src: usize,
+ condition: *Inst,
+ then_body: ir.Body,
+ else_body: ir.Body,
+) !*Inst {
+ const inst = try block.arena.create(Inst.CondBr);
+ inst.* = .{
+ .base = .{
+ .tag = .condbr,
+ .ty = Type.initTag(.noreturn),
+ .src = src,
+ },
+ .condition = condition,
+ .then_body = then_body,
+ .else_body = else_body,
+ };
+ try block.instructions.append(self.gpa, &inst.base);
+ return &inst.base;
+}
+
+pub fn addCall(
+ self: *Module,
+ block: *Scope.Block,
+ src: usize,
+ ty: Type,
+ func: *Inst,
+ args: []const *Inst,
+) !*Inst {
+ const inst = try block.arena.create(Inst.Call);
+ inst.* = .{
+ .base = .{
+ .tag = .call,
+ .ty = ty,
+ .src = src,
+ },
+ .func = func,
+ .args = args,
+ };
+ try block.instructions.append(self.gpa, &inst.base);
+ return &inst.base;
+}
+
+pub fn constInst(self: *Module, scope: *Scope, src: usize, typed_value: TypedValue) !*Inst {
+ const const_inst = try scope.arena().create(Inst.Constant);
+ const_inst.* = .{
+ .base = .{
+ .tag = Inst.Constant.base_tag,
+ .ty = typed_value.ty,
+ .src = src,
+ },
+ .val = typed_value.val,
+ };
+ return &const_inst.base;
+}
+
+pub fn constType(self: *Module, scope: *Scope, src: usize, ty: Type) !*Inst {
+ return self.constInst(scope, src, .{
+ .ty = Type.initTag(.type),
+ .val = try ty.toValue(scope.arena()),
+ });
+}
+
+pub fn constVoid(self: *Module, scope: *Scope, src: usize) !*Inst {
+ return self.constInst(scope, src, .{
+ .ty = Type.initTag(.void),
+ .val = Value.initTag(.void_value),
+ });
+}
+
+pub fn constNoReturn(self: *Module, scope: *Scope, src: usize) !*Inst {
+ return self.constInst(scope, src, .{
+ .ty = Type.initTag(.noreturn),
+ .val = Value.initTag(.unreachable_value),
+ });
+}
+
+pub fn constUndef(self: *Module, scope: *Scope, src: usize, ty: Type) !*Inst {
+ return self.constInst(scope, src, .{
+ .ty = ty,
+ .val = Value.initTag(.undef),
+ });
+}
+
+pub fn constBool(self: *Module, scope: *Scope, src: usize, v: bool) !*Inst {
+ return self.constInst(scope, src, .{
+ .ty = Type.initTag(.bool),
+ .val = ([2]Value{ Value.initTag(.bool_false), Value.initTag(.bool_true) })[@boolToInt(v)],
+ });
+}
+
+pub fn constIntUnsigned(self: *Module, scope: *Scope, src: usize, ty: Type, int: u64) !*Inst {
+ const int_payload = try scope.arena().create(Value.Payload.Int_u64);
+ int_payload.* = .{ .int = int };
+
+ return self.constInst(scope, src, .{
+ .ty = ty,
+ .val = Value.initPayload(&int_payload.base),
+ });
+}
+
+pub fn constIntSigned(self: *Module, scope: *Scope, src: usize, ty: Type, int: i64) !*Inst {
+ const int_payload = try scope.arena().create(Value.Payload.Int_i64);
+ int_payload.* = .{ .int = int };
+
+ return self.constInst(scope, src, .{
+ .ty = ty,
+ .val = Value.initPayload(&int_payload.base),
+ });
+}
+
+pub fn constIntBig(self: *Module, scope: *Scope, src: usize, ty: Type, big_int: BigIntConst) !*Inst {
+ const val_payload = if (big_int.positive) blk: {
+ if (big_int.to(u64)) |x| {
+ return self.constIntUnsigned(scope, src, ty, x);
+ } else |err| switch (err) {
+ error.NegativeIntoUnsigned => unreachable,
+ error.TargetTooSmall => {}, // handled below
+ }
+ const big_int_payload = try scope.arena().create(Value.Payload.IntBigPositive);
+ big_int_payload.* = .{ .limbs = big_int.limbs };
+ break :blk &big_int_payload.base;
+ } else blk: {
+ if (big_int.to(i64)) |x| {
+ return self.constIntSigned(scope, src, ty, x);
+ } else |err| switch (err) {
+ error.NegativeIntoUnsigned => unreachable,
+ error.TargetTooSmall => {}, // handled below
+ }
+ const big_int_payload = try scope.arena().create(Value.Payload.IntBigNegative);
+ big_int_payload.* = .{ .limbs = big_int.limbs };
+ break :blk &big_int_payload.base;
+ };
+
+ return self.constInst(scope, src, .{
+ .ty = ty,
+ .val = Value.initPayload(val_payload),
+ });
+}
+
+pub fn createAnonymousDecl(
+ self: *Module,
+ scope: *Scope,
+ decl_arena: *std.heap.ArenaAllocator,
+ typed_value: TypedValue,
+) !*Decl {
+ const name_index = self.getNextAnonNameIndex();
+ const scope_decl = scope.decl().?;
+ const name = try std.fmt.allocPrint(self.gpa, "{}__anon_{}", .{ scope_decl.name, name_index });
+ defer self.gpa.free(name);
+ const name_hash = scope.namespace().fullyQualifiedNameHash(name);
+ const src_hash: std.zig.SrcHash = undefined;
+ const new_decl = try self.createNewDecl(scope, name, scope_decl.src_index, name_hash, src_hash);
+ const decl_arena_state = try decl_arena.allocator.create(std.heap.ArenaAllocator.State);
+
+ decl_arena_state.* = decl_arena.state;
+ new_decl.typed_value = .{
+ .most_recent = .{
+ .typed_value = typed_value,
+ .arena = decl_arena_state,
+ },
+ };
+ new_decl.analysis = .complete;
+ new_decl.generation = self.generation;
+
+ // TODO: This generates the Decl into the machine code file if it is of a type that is non-zero size.
+ // We should be able to further improve the compiler to not omit Decls which are only referenced at
+ // compile-time and not runtime.
+ if (typed_value.ty.hasCodeGenBits()) {
+ try self.comp.bin_file.allocateDeclIndexes(new_decl);
+ try self.comp.work_queue.writeItem(.{ .codegen_decl = new_decl });
+ }
+
+ return new_decl;
+}
+
+fn getNextAnonNameIndex(self: *Module) usize {
+ return @atomicRmw(usize, &self.next_anon_name_index, .Add, 1, .Monotonic);
+}
+
+pub fn lookupDeclName(self: *Module, scope: *Scope, ident_name: []const u8) ?*Decl {
+ const namespace = scope.namespace();
+ const name_hash = namespace.fullyQualifiedNameHash(ident_name);
+ return self.decl_table.get(name_hash);
+}
+
+pub fn analyzeDeclRef(self: *Module, scope: *Scope, src: usize, decl: *Decl) InnerError!*Inst {
+ const scope_decl = scope.decl().?;
+ try self.declareDeclDependency(scope_decl, decl);
+ self.ensureDeclAnalyzed(decl) catch |err| {
+ if (scope.cast(Scope.Block)) |block| {
+ if (block.func) |func| {
+ func.analysis = .dependency_failure;
+ } else {
+ block.decl.analysis = .dependency_failure;
+ }
+ } else {
+ scope_decl.analysis = .dependency_failure;
+ }
+ return err;
+ };
+
+ const decl_tv = try decl.typedValue();
+ if (decl_tv.val.tag() == .variable) {
+ return self.analyzeVarRef(scope, src, decl_tv);
+ }
+ const ty = try self.simplePtrType(scope, src, decl_tv.ty, false, .One);
+ const val_payload = try scope.arena().create(Value.Payload.DeclRef);
+ val_payload.* = .{ .decl = decl };
+
+ return self.constInst(scope, src, .{
+ .ty = ty,
+ .val = Value.initPayload(&val_payload.base),
+ });
+}
+
+fn analyzeVarRef(self: *Module, scope: *Scope, src: usize, tv: TypedValue) InnerError!*Inst {
+ const variable = tv.val.cast(Value.Payload.Variable).?.variable;
+
+ const ty = try self.simplePtrType(scope, src, tv.ty, variable.is_mutable, .One);
+ if (!variable.is_mutable and !variable.is_extern) {
+ const val_payload = try scope.arena().create(Value.Payload.RefVal);
+ val_payload.* = .{ .val = variable.init };
+ return self.constInst(scope, src, .{
+ .ty = ty,
+ .val = Value.initPayload(&val_payload.base),
+ });
+ }
+
+ const b = try self.requireRuntimeBlock(scope, src);
+ const inst = try b.arena.create(Inst.VarPtr);
+ inst.* = .{
+ .base = .{
+ .tag = .varptr,
+ .ty = ty,
+ .src = src,
+ },
+ .variable = variable,
+ };
+ try b.instructions.append(self.gpa, &inst.base);
+ return &inst.base;
+}
+
+pub fn analyzeDeref(self: *Module, scope: *Scope, src: usize, ptr: *Inst, ptr_src: usize) InnerError!*Inst {
+ const elem_ty = switch (ptr.ty.zigTypeTag()) {
+ .Pointer => ptr.ty.elemType(),
+ else => return self.fail(scope, ptr_src, "expected pointer, found '{}'", .{ptr.ty}),
+ };
+ if (ptr.value()) |val| {
+ return self.constInst(scope, src, .{
+ .ty = elem_ty,
+ .val = try val.pointerDeref(scope.arena()),
+ });
+ }
+
+ const b = try self.requireRuntimeBlock(scope, src);
+ return self.addUnOp(b, src, elem_ty, .load, ptr);
+}
+
+pub fn analyzeDeclRefByName(self: *Module, scope: *Scope, src: usize, decl_name: []const u8) InnerError!*Inst {
+ const decl = self.lookupDeclName(scope, decl_name) orelse
+ return self.fail(scope, src, "decl '{}' not found", .{decl_name});
+ return self.analyzeDeclRef(scope, src, decl);
+}
+
+pub fn wantSafety(self: *Module, scope: *Scope) bool {
+ // TODO take into account scope's safety overrides
+ return switch (self.optimizeMode()) {
+ .Debug => true,
+ .ReleaseSafe => true,
+ .ReleaseFast => false,
+ .ReleaseSmall => false,
+ };
+}
+
+pub fn analyzeIsNull(
+ self: *Module,
+ scope: *Scope,
+ src: usize,
+ operand: *Inst,
+ invert_logic: bool,
+) InnerError!*Inst {
+ if (operand.value()) |opt_val| {
+ const is_null = opt_val.isNull();
+ const bool_value = if (invert_logic) !is_null else is_null;
+ return self.constBool(scope, src, bool_value);
+ }
+ const b = try self.requireRuntimeBlock(scope, src);
+ const inst_tag: Inst.Tag = if (invert_logic) .isnonnull else .isnull;
+ return self.addUnOp(b, src, Type.initTag(.bool), inst_tag, operand);
+}
+
+pub fn analyzeIsErr(self: *Module, scope: *Scope, src: usize, operand: *Inst) InnerError!*Inst {
+ return self.fail(scope, src, "TODO implement analysis of iserr", .{});
+}
+
+pub fn analyzeSlice(self: *Module, scope: *Scope, src: usize, array_ptr: *Inst, start: *Inst, end_opt: ?*Inst, sentinel_opt: ?*Inst) InnerError!*Inst {
+ const ptr_child = switch (array_ptr.ty.zigTypeTag()) {
+ .Pointer => array_ptr.ty.elemType(),
+ else => return self.fail(scope, src, "expected pointer, found '{}'", .{array_ptr.ty}),
+ };
+
+ var array_type = ptr_child;
+ const elem_type = switch (ptr_child.zigTypeTag()) {
+ .Array => ptr_child.elemType(),
+ .Pointer => blk: {
+ if (ptr_child.isSinglePointer()) {
+ if (ptr_child.elemType().zigTypeTag() == .Array) {
+ array_type = ptr_child.elemType();
+ break :blk ptr_child.elemType().elemType();
+ }
+
+ return self.fail(scope, src, "slice of single-item pointer", .{});
+ }
+ break :blk ptr_child.elemType();
+ },
+ else => return self.fail(scope, src, "slice of non-array type '{}'", .{ptr_child}),
+ };
+
+ const slice_sentinel = if (sentinel_opt) |sentinel| blk: {
+ const casted = try self.coerce(scope, elem_type, sentinel);
+ break :blk try self.resolveConstValue(scope, casted);
+ } else null;
+
+ var return_ptr_size: std.builtin.TypeInfo.Pointer.Size = .Slice;
+ var return_elem_type = elem_type;
+ if (end_opt) |end| {
+ if (end.value()) |end_val| {
+ if (start.value()) |start_val| {
+ const start_u64 = start_val.toUnsignedInt();
+ const end_u64 = end_val.toUnsignedInt();
+ if (start_u64 > end_u64) {
+ return self.fail(scope, src, "out of bounds slice", .{});
+ }
+
+ const len = end_u64 - start_u64;
+ const array_sentinel = if (array_type.zigTypeTag() == .Array and end_u64 == array_type.arrayLen())
+ array_type.sentinel()
+ else
+ slice_sentinel;
+ return_elem_type = try self.arrayType(scope, len, array_sentinel, elem_type);
+ return_ptr_size = .One;
+ }
+ }
+ }
+ const return_type = try self.ptrType(
+ scope,
+ src,
+ return_elem_type,
+ if (end_opt == null) slice_sentinel else null,
+ 0, // TODO alignment
+ 0,
+ 0,
+ !ptr_child.isConstPtr(),
+ ptr_child.isAllowzeroPtr(),
+ ptr_child.isVolatilePtr(),
+ return_ptr_size,
+ );
+
+ return self.fail(scope, src, "TODO implement analysis of slice", .{});
+}
+
+/// Asserts that lhs and rhs types are both numeric.
+pub fn cmpNumeric(
+ self: *Module,
+ scope: *Scope,
+ src: usize,
+ lhs: *Inst,
+ rhs: *Inst,
+ op: std.math.CompareOperator,
+) !*Inst {
+ assert(lhs.ty.isNumeric());
+ assert(rhs.ty.isNumeric());
+
+ const lhs_ty_tag = lhs.ty.zigTypeTag();
+ const rhs_ty_tag = rhs.ty.zigTypeTag();
+
+ if (lhs_ty_tag == .Vector and rhs_ty_tag == .Vector) {
+ if (lhs.ty.arrayLen() != rhs.ty.arrayLen()) {
+ return self.fail(scope, src, "vector length mismatch: {} and {}", .{
+ lhs.ty.arrayLen(),
+ rhs.ty.arrayLen(),
+ });
+ }
+ return self.fail(scope, src, "TODO implement support for vectors in cmpNumeric", .{});
+ } else if (lhs_ty_tag == .Vector or rhs_ty_tag == .Vector) {
+ return self.fail(scope, src, "mixed scalar and vector operands to comparison operator: '{}' and '{}'", .{
+ lhs.ty,
+ rhs.ty,
+ });
+ }
+
+ if (lhs.value()) |lhs_val| {
+ if (rhs.value()) |rhs_val| {
+ return self.constBool(scope, src, Value.compare(lhs_val, op, rhs_val));
+ }
+ }
+
+ // TODO handle comparisons against lazy zero values
+ // Some values can be compared against zero without being runtime known or without forcing
+ // a full resolution of their value, for example `@sizeOf(@Frame(function))` is known to
+ // always be nonzero, and we benefit from not forcing the full evaluation and stack frame layout
+ // of this function if we don't need to.
+
+ // It must be a runtime comparison.
+ const b = try self.requireRuntimeBlock(scope, src);
+ // For floats, emit a float comparison instruction.
+ const lhs_is_float = switch (lhs_ty_tag) {
+ .Float, .ComptimeFloat => true,
+ else => false,
+ };
+ const rhs_is_float = switch (rhs_ty_tag) {
+ .Float, .ComptimeFloat => true,
+ else => false,
+ };
+ if (lhs_is_float and rhs_is_float) {
+ // Implicit cast the smaller one to the larger one.
+ const dest_type = x: {
+ if (lhs_ty_tag == .ComptimeFloat) {
+ break :x rhs.ty;
+ } else if (rhs_ty_tag == .ComptimeFloat) {
+ break :x lhs.ty;
+ }
+ if (lhs.ty.floatBits(self.getTarget()) >= rhs.ty.floatBits(self.getTarget())) {
+ break :x lhs.ty;
+ } else {
+ break :x rhs.ty;
+ }
+ };
+ const casted_lhs = try self.coerce(scope, dest_type, lhs);
+ const casted_rhs = try self.coerce(scope, dest_type, rhs);
+ return self.addBinOp(b, src, dest_type, Inst.Tag.fromCmpOp(op), casted_lhs, casted_rhs);
+ }
+ // For mixed unsigned integer sizes, implicit cast both operands to the larger integer.
+ // For mixed signed and unsigned integers, implicit cast both operands to a signed
+ // integer with + 1 bit.
+ // For mixed floats and integers, extract the integer part from the float, cast that to
+ // a signed integer with mantissa bits + 1, and if there was any non-integral part of the float,
+ // add/subtract 1.
+ const lhs_is_signed = if (lhs.value()) |lhs_val|
+ lhs_val.compareWithZero(.lt)
+ else
+ (lhs.ty.isFloat() or lhs.ty.isSignedInt());
+ const rhs_is_signed = if (rhs.value()) |rhs_val|
+ rhs_val.compareWithZero(.lt)
+ else
+ (rhs.ty.isFloat() or rhs.ty.isSignedInt());
+ const dest_int_is_signed = lhs_is_signed or rhs_is_signed;
+
+ var dest_float_type: ?Type = null;
+
+ var lhs_bits: usize = undefined;
+ if (lhs.value()) |lhs_val| {
+ if (lhs_val.isUndef())
+ return self.constUndef(scope, src, Type.initTag(.bool));
+ const is_unsigned = if (lhs_is_float) x: {
+ var bigint_space: Value.BigIntSpace = undefined;
+ var bigint = try lhs_val.toBigInt(&bigint_space).toManaged(self.gpa);
+ defer bigint.deinit();
+ const zcmp = lhs_val.orderAgainstZero();
+ if (lhs_val.floatHasFraction()) {
+ switch (op) {
+ .eq => return self.constBool(scope, src, false),
+ .neq => return self.constBool(scope, src, true),
+ else => {},
+ }
+ if (zcmp == .lt) {
+ try bigint.addScalar(bigint.toConst(), -1);
+ } else {
+ try bigint.addScalar(bigint.toConst(), 1);
+ }
+ }
+ lhs_bits = bigint.toConst().bitCountTwosComp();
+ break :x (zcmp != .lt);
+ } else x: {
+ lhs_bits = lhs_val.intBitCountTwosComp();
+ break :x (lhs_val.orderAgainstZero() != .lt);
+ };
+ lhs_bits += @boolToInt(is_unsigned and dest_int_is_signed);
+ } else if (lhs_is_float) {
+ dest_float_type = lhs.ty;
+ } else {
+ const int_info = lhs.ty.intInfo(self.getTarget());
+ lhs_bits = int_info.bits + @boolToInt(!int_info.signed and dest_int_is_signed);
+ }
+
+ var rhs_bits: usize = undefined;
+ if (rhs.value()) |rhs_val| {
+ if (rhs_val.isUndef())
+ return self.constUndef(scope, src, Type.initTag(.bool));
+ const is_unsigned = if (rhs_is_float) x: {
+ var bigint_space: Value.BigIntSpace = undefined;
+ var bigint = try rhs_val.toBigInt(&bigint_space).toManaged(self.gpa);
+ defer bigint.deinit();
+ const zcmp = rhs_val.orderAgainstZero();
+ if (rhs_val.floatHasFraction()) {
+ switch (op) {
+ .eq => return self.constBool(scope, src, false),
+ .neq => return self.constBool(scope, src, true),
+ else => {},
+ }
+ if (zcmp == .lt) {
+ try bigint.addScalar(bigint.toConst(), -1);
+ } else {
+ try bigint.addScalar(bigint.toConst(), 1);
+ }
+ }
+ rhs_bits = bigint.toConst().bitCountTwosComp();
+ break :x (zcmp != .lt);
+ } else x: {
+ rhs_bits = rhs_val.intBitCountTwosComp();
+ break :x (rhs_val.orderAgainstZero() != .lt);
+ };
+ rhs_bits += @boolToInt(is_unsigned and dest_int_is_signed);
+ } else if (rhs_is_float) {
+ dest_float_type = rhs.ty;
+ } else {
+ const int_info = rhs.ty.intInfo(self.getTarget());
+ rhs_bits = int_info.bits + @boolToInt(!int_info.signed and dest_int_is_signed);
+ }
+
+ const dest_type = if (dest_float_type) |ft| ft else blk: {
+ const max_bits = std.math.max(lhs_bits, rhs_bits);
+ const casted_bits = std.math.cast(u16, max_bits) catch |err| switch (err) {
+ error.Overflow => return self.fail(scope, src, "{} exceeds maximum integer bit count", .{max_bits}),
+ };
+ break :blk try self.makeIntType(scope, dest_int_is_signed, casted_bits);
+ };
+ const casted_lhs = try self.coerce(scope, dest_type, lhs);
+ const casted_rhs = try self.coerce(scope, dest_type, rhs);
+
+ return self.addBinOp(b, src, Type.initTag(.bool), Inst.Tag.fromCmpOp(op), casted_lhs, casted_rhs);
+}
+
+fn wrapOptional(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !*Inst {
+ if (inst.value()) |val| {
+ return self.constInst(scope, inst.src, .{ .ty = dest_type, .val = val });
+ }
+
+ const b = try self.requireRuntimeBlock(scope, inst.src);
+ return self.addUnOp(b, inst.src, dest_type, .wrap_optional, inst);
+}
+
+fn makeIntType(self: *Module, scope: *Scope, signed: bool, bits: u16) !Type {
+ if (signed) {
+ const int_payload = try scope.arena().create(Type.Payload.IntSigned);
+ int_payload.* = .{ .bits = bits };
+ return Type.initPayload(&int_payload.base);
+ } else {
+ const int_payload = try scope.arena().create(Type.Payload.IntUnsigned);
+ int_payload.* = .{ .bits = bits };
+ return Type.initPayload(&int_payload.base);
+ }
+}
+
+pub fn resolvePeerTypes(self: *Module, scope: *Scope, instructions: []*Inst) !Type {
+ if (instructions.len == 0)
+ return Type.initTag(.noreturn);
+
+ if (instructions.len == 1)
+ return instructions[0].ty;
+
+ var prev_inst = instructions[0];
+ for (instructions[1..]) |next_inst| {
+ if (next_inst.ty.eql(prev_inst.ty))
+ continue;
+ if (next_inst.ty.zigTypeTag() == .NoReturn)
+ continue;
+ if (prev_inst.ty.zigTypeTag() == .NoReturn) {
+ prev_inst = next_inst;
+ continue;
+ }
+ if (next_inst.ty.zigTypeTag() == .Undefined)
+ continue;
+ if (prev_inst.ty.zigTypeTag() == .Undefined) {
+ prev_inst = next_inst;
+ continue;
+ }
+ if (prev_inst.ty.isInt() and
+ next_inst.ty.isInt() and
+ prev_inst.ty.isSignedInt() == next_inst.ty.isSignedInt())
+ {
+ if (prev_inst.ty.intInfo(self.getTarget()).bits < next_inst.ty.intInfo(self.getTarget()).bits) {
+ prev_inst = next_inst;
+ }
+ continue;
+ }
+ if (prev_inst.ty.isFloat() and next_inst.ty.isFloat()) {
+ if (prev_inst.ty.floatBits(self.getTarget()) < next_inst.ty.floatBits(self.getTarget())) {
+ prev_inst = next_inst;
+ }
+ continue;
+ }
+
+ // TODO error notes pointing out each type
+ return self.fail(scope, next_inst.src, "incompatible types: '{}' and '{}'", .{ prev_inst.ty, next_inst.ty });
+ }
+
+ return prev_inst.ty;
+}
+
+pub fn coerce(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !*Inst {
+ // If the types are the same, we can return the operand.
+ if (dest_type.eql(inst.ty))
+ return inst;
+
+ const in_memory_result = coerceInMemoryAllowed(dest_type, inst.ty);
+ if (in_memory_result == .ok) {
+ return self.bitcast(scope, dest_type, inst);
+ }
+
+ // undefined to anything
+ if (inst.value()) |val| {
+ if (val.isUndef() or inst.ty.zigTypeTag() == .Undefined) {
+ return self.constInst(scope, inst.src, .{ .ty = dest_type, .val = val });
+ }
+ }
+ assert(inst.ty.zigTypeTag() != .Undefined);
+
+ // null to ?T
+ if (dest_type.zigTypeTag() == .Optional and inst.ty.zigTypeTag() == .Null) {
+ return self.constInst(scope, inst.src, .{ .ty = dest_type, .val = Value.initTag(.null_value) });
+ }
+
+ // T to ?T
+ if (dest_type.zigTypeTag() == .Optional) {
+ var buf: Type.Payload.PointerSimple = undefined;
+ const child_type = dest_type.optionalChild(&buf);
+ if (child_type.eql(inst.ty)) {
+ return self.wrapOptional(scope, dest_type, inst);
+ } else if (try self.coerceNum(scope, child_type, inst)) |some| {
+ return self.wrapOptional(scope, dest_type, some);
+ }
+ }
+
+ // *[N]T to []T
+ if (inst.ty.isSinglePointer() and dest_type.isSlice() and
+ (!inst.ty.isConstPtr() or dest_type.isConstPtr()))
+ {
+ const array_type = inst.ty.elemType();
+ const dst_elem_type = dest_type.elemType();
+ if (array_type.zigTypeTag() == .Array and
+ coerceInMemoryAllowed(dst_elem_type, array_type.elemType()) == .ok)
+ {
+ return self.coerceArrayPtrToSlice(scope, dest_type, inst);
+ }
+ }
+
+ // comptime known number to other number
+ if (try self.coerceNum(scope, dest_type, inst)) |some|
+ return some;
+
+ // integer widening
+ if (inst.ty.zigTypeTag() == .Int and dest_type.zigTypeTag() == .Int) {
+ assert(inst.value() == null); // handled above
+
+ const src_info = inst.ty.intInfo(self.getTarget());
+ const dst_info = dest_type.intInfo(self.getTarget());
+ if ((src_info.signed == dst_info.signed and dst_info.bits >= src_info.bits) or
+ // small enough unsigned ints can get casted to large enough signed ints
+ (src_info.signed and !dst_info.signed and dst_info.bits > src_info.bits))
+ {
+ const b = try self.requireRuntimeBlock(scope, inst.src);
+ return self.addUnOp(b, inst.src, dest_type, .intcast, inst);
+ }
+ }
+
+ // float widening
+ if (inst.ty.zigTypeTag() == .Float and dest_type.zigTypeTag() == .Float) {
+ assert(inst.value() == null); // handled above
+
+ const src_bits = inst.ty.floatBits(self.getTarget());
+ const dst_bits = dest_type.floatBits(self.getTarget());
+ if (dst_bits >= src_bits) {
+ const b = try self.requireRuntimeBlock(scope, inst.src);
+ return self.addUnOp(b, inst.src, dest_type, .floatcast, inst);
+ }
+ }
+
+ return self.fail(scope, inst.src, "expected {}, found {}", .{ dest_type, inst.ty });
+}
+
+pub fn coerceNum(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !?*Inst {
+ const val = inst.value() orelse return null;
+ const src_zig_tag = inst.ty.zigTypeTag();
+ const dst_zig_tag = dest_type.zigTypeTag();
+
+ if (dst_zig_tag == .ComptimeInt or dst_zig_tag == .Int) {
+ if (src_zig_tag == .Float or src_zig_tag == .ComptimeFloat) {
+ if (val.floatHasFraction()) {
+ return self.fail(scope, inst.src, "fractional component prevents float value {} from being casted to type '{}'", .{ val, inst.ty });
+ }
+ return self.fail(scope, inst.src, "TODO float to int", .{});
+ } else if (src_zig_tag == .Int or src_zig_tag == .ComptimeInt) {
+ if (!val.intFitsInType(dest_type, self.getTarget())) {
+ return self.fail(scope, inst.src, "type {} cannot represent integer value {}", .{ inst.ty, val });
+ }
+ return self.constInst(scope, inst.src, .{ .ty = dest_type, .val = val });
+ }
+ } else if (dst_zig_tag == .ComptimeFloat or dst_zig_tag == .Float) {
+ if (src_zig_tag == .Float or src_zig_tag == .ComptimeFloat) {
+ const res = val.floatCast(scope.arena(), dest_type, self.getTarget()) catch |err| switch (err) {
+ error.Overflow => return self.fail(
+ scope,
+ inst.src,
+ "cast of value {} to type '{}' loses information",
+ .{ val, dest_type },
+ ),
+ error.OutOfMemory => return error.OutOfMemory,
+ };
+ return self.constInst(scope, inst.src, .{ .ty = dest_type, .val = res });
+ } else if (src_zig_tag == .Int or src_zig_tag == .ComptimeInt) {
+ return self.fail(scope, inst.src, "TODO int to float", .{});
+ }
+ }
+ return null;
+}
+
+pub fn storePtr(self: *Module, scope: *Scope, src: usize, ptr: *Inst, uncasted_value: *Inst) !*Inst {
+ if (ptr.ty.isConstPtr())
+ return self.fail(scope, src, "cannot assign to constant", .{});
+
+ const elem_ty = ptr.ty.elemType();
+ const value = try self.coerce(scope, elem_ty, uncasted_value);
+ if (elem_ty.onePossibleValue() != null)
+ return self.constVoid(scope, src);
+
+ // TODO handle comptime pointer writes
+ // TODO handle if the element type requires comptime
+
+ const b = try self.requireRuntimeBlock(scope, src);
+ return self.addBinOp(b, src, Type.initTag(.void), .store, ptr, value);
+}
+
+pub fn bitcast(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !*Inst {
+ if (inst.value()) |val| {
+ // Keep the comptime Value representation; take the new type.
+ return self.constInst(scope, inst.src, .{ .ty = dest_type, .val = val });
+ }
+ // TODO validate the type size and other compile errors
+ const b = try self.requireRuntimeBlock(scope, inst.src);
+ return self.addUnOp(b, inst.src, dest_type, .bitcast, inst);
+}
+
+fn coerceArrayPtrToSlice(self: *Module, scope: *Scope, dest_type: Type, inst: *Inst) !*Inst {
+ if (inst.value()) |val| {
+ // The comptime Value representation is compatible with both types.
+ return self.constInst(scope, inst.src, .{ .ty = dest_type, .val = val });
+ }
+ return self.fail(scope, inst.src, "TODO implement coerceArrayPtrToSlice runtime instruction", .{});
+}
+
+pub fn fail(self: *Module, scope: *Scope, src: usize, comptime format: []const u8, args: anytype) InnerError {
+ @setCold(true);
+ const err_msg = try Compilation.ErrorMsg.create(self.gpa, src, format, args);
+ return self.failWithOwnedErrorMsg(scope, src, err_msg);
+}
+
+pub fn failTok(
+ self: *Module,
+ scope: *Scope,
+ token_index: ast.TokenIndex,
+ comptime format: []const u8,
+ args: anytype,
+) InnerError {
+ @setCold(true);
+ const src = scope.tree().token_locs[token_index].start;
+ return self.fail(scope, src, format, args);
+}
+
+pub fn failNode(
+ self: *Module,
+ scope: *Scope,
+ ast_node: *ast.Node,
+ comptime format: []const u8,
+ args: anytype,
+) InnerError {
+ @setCold(true);
+ const src = scope.tree().token_locs[ast_node.firstToken()].start;
+ return self.fail(scope, src, format, args);
+}
+
+fn failWithOwnedErrorMsg(self: *Module, scope: *Scope, src: usize, err_msg: *Compilation.ErrorMsg) InnerError {
+ {
+ errdefer err_msg.destroy(self.gpa);
+ try self.failed_decls.ensureCapacity(self.gpa, self.failed_decls.items().len + 1);
+ try self.failed_files.ensureCapacity(self.gpa, self.failed_files.items().len + 1);
+ }
+ switch (scope.tag) {
+ .decl => {
+ const decl = scope.cast(Scope.DeclAnalysis).?.decl;
+ decl.analysis = .sema_failure;
+ decl.generation = self.generation;
+ self.failed_decls.putAssumeCapacityNoClobber(decl, err_msg);
+ },
+ .block => {
+ const block = scope.cast(Scope.Block).?;
+ if (block.func) |func| {
+ func.analysis = .sema_failure;
+ } else {
+ block.decl.analysis = .sema_failure;
+ block.decl.generation = self.generation;
+ }
+ self.failed_decls.putAssumeCapacityNoClobber(block.decl, err_msg);
+ },
+ .gen_zir => {
+ const gen_zir = scope.cast(Scope.GenZIR).?;
+ gen_zir.decl.analysis = .sema_failure;
+ gen_zir.decl.generation = self.generation;
+ self.failed_decls.putAssumeCapacityNoClobber(gen_zir.decl, err_msg);
+ },
+ .local_val => {
+ const gen_zir = scope.cast(Scope.LocalVal).?.gen_zir;
+ gen_zir.decl.analysis = .sema_failure;
+ gen_zir.decl.generation = self.generation;
+ self.failed_decls.putAssumeCapacityNoClobber(gen_zir.decl, err_msg);
+ },
+ .local_ptr => {
+ const gen_zir = scope.cast(Scope.LocalPtr).?.gen_zir;
+ gen_zir.decl.analysis = .sema_failure;
+ gen_zir.decl.generation = self.generation;
+ self.failed_decls.putAssumeCapacityNoClobber(gen_zir.decl, err_msg);
+ },
+ .zir_module => {
+ const zir_module = scope.cast(Scope.ZIRModule).?;
+ zir_module.status = .loaded_sema_failure;
+ self.failed_files.putAssumeCapacityNoClobber(scope, err_msg);
+ },
+ .file => unreachable,
+ .container => unreachable,
+ }
+ return error.AnalysisFail;
+}
+
+const InMemoryCoercionResult = enum {
+ ok,
+ no_match,
+};
+
+fn coerceInMemoryAllowed(dest_type: Type, src_type: Type) InMemoryCoercionResult {
+ if (dest_type.eql(src_type))
+ return .ok;
+
+ // TODO: implement more of this function
+
+ return .no_match;
+}
+
+fn srcHashEql(a: std.zig.SrcHash, b: std.zig.SrcHash) bool {
+ return @bitCast(u128, a) == @bitCast(u128, b);
+}
+
+pub fn intAdd(allocator: *Allocator, lhs: Value, rhs: Value) !Value {
+ // TODO is this a performance issue? maybe we should try the operation without
+ // resorting to BigInt first.
+ var lhs_space: Value.BigIntSpace = undefined;
+ var rhs_space: Value.BigIntSpace = undefined;
+ const lhs_bigint = lhs.toBigInt(&lhs_space);
+ const rhs_bigint = rhs.toBigInt(&rhs_space);
+ const limbs = try allocator.alloc(
+ std.math.big.Limb,
+ std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
+ );
+ var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
+ result_bigint.add(lhs_bigint, rhs_bigint);
+ const result_limbs = result_bigint.limbs[0..result_bigint.len];
+
+ const val_payload = if (result_bigint.positive) blk: {
+ const val_payload = try allocator.create(Value.Payload.IntBigPositive);
+ val_payload.* = .{ .limbs = result_limbs };
+ break :blk &val_payload.base;
+ } else blk: {
+ const val_payload = try allocator.create(Value.Payload.IntBigNegative);
+ val_payload.* = .{ .limbs = result_limbs };
+ break :blk &val_payload.base;
+ };
+
+ return Value.initPayload(val_payload);
+}
+
+pub fn intSub(allocator: *Allocator, lhs: Value, rhs: Value) !Value {
+ // TODO is this a performance issue? maybe we should try the operation without
+ // resorting to BigInt first.
+ var lhs_space: Value.BigIntSpace = undefined;
+ var rhs_space: Value.BigIntSpace = undefined;
+ const lhs_bigint = lhs.toBigInt(&lhs_space);
+ const rhs_bigint = rhs.toBigInt(&rhs_space);
+ const limbs = try allocator.alloc(
+ std.math.big.Limb,
+ std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1,
+ );
+ var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
+ result_bigint.sub(lhs_bigint, rhs_bigint);
+ const result_limbs = result_bigint.limbs[0..result_bigint.len];
+
+ const val_payload = if (result_bigint.positive) blk: {
+ const val_payload = try allocator.create(Value.Payload.IntBigPositive);
+ val_payload.* = .{ .limbs = result_limbs };
+ break :blk &val_payload.base;
+ } else blk: {
+ const val_payload = try allocator.create(Value.Payload.IntBigNegative);
+ val_payload.* = .{ .limbs = result_limbs };
+ break :blk &val_payload.base;
+ };
+
+ return Value.initPayload(val_payload);
+}
+
+pub fn floatAdd(self: *Module, scope: *Scope, float_type: Type, src: usize, lhs: Value, rhs: Value) !Value {
+ var bit_count = switch (float_type.tag()) {
+ .comptime_float => 128,
+ else => float_type.floatBits(self.getTarget()),
+ };
+
+ const allocator = scope.arena();
+ const val_payload = switch (bit_count) {
+ 16 => {
+ return self.fail(scope, src, "TODO Implement addition for soft floats", .{});
+ },
+ 32 => blk: {
+ const lhs_val = lhs.toFloat(f32);
+ const rhs_val = rhs.toFloat(f32);
+ const val_payload = try allocator.create(Value.Payload.Float_32);
+ val_payload.* = .{ .val = lhs_val + rhs_val };
+ break :blk &val_payload.base;
+ },
+ 64 => blk: {
+ const lhs_val = lhs.toFloat(f64);
+ const rhs_val = rhs.toFloat(f64);
+ const val_payload = try allocator.create(Value.Payload.Float_64);
+ val_payload.* = .{ .val = lhs_val + rhs_val };
+ break :blk &val_payload.base;
+ },
+ 128 => {
+ return self.fail(scope, src, "TODO Implement addition for big floats", .{});
+ },
+ else => unreachable,
+ };
+
+ return Value.initPayload(val_payload);
+}
+
+pub fn floatSub(self: *Module, scope: *Scope, float_type: Type, src: usize, lhs: Value, rhs: Value) !Value {
+ var bit_count = switch (float_type.tag()) {
+ .comptime_float => 128,
+ else => float_type.floatBits(self.getTarget()),
+ };
+
+ const allocator = scope.arena();
+ const val_payload = switch (bit_count) {
+ 16 => {
+ return self.fail(scope, src, "TODO Implement substraction for soft floats", .{});
+ },
+ 32 => blk: {
+ const lhs_val = lhs.toFloat(f32);
+ const rhs_val = rhs.toFloat(f32);
+ const val_payload = try allocator.create(Value.Payload.Float_32);
+ val_payload.* = .{ .val = lhs_val - rhs_val };
+ break :blk &val_payload.base;
+ },
+ 64 => blk: {
+ const lhs_val = lhs.toFloat(f64);
+ const rhs_val = rhs.toFloat(f64);
+ const val_payload = try allocator.create(Value.Payload.Float_64);
+ val_payload.* = .{ .val = lhs_val - rhs_val };
+ break :blk &val_payload.base;
+ },
+ 128 => {
+ return self.fail(scope, src, "TODO Implement substraction for big floats", .{});
+ },
+ else => unreachable,
+ };
+
+ return Value.initPayload(val_payload);
+}
+
+pub fn simplePtrType(self: *Module, scope: *Scope, src: usize, elem_ty: Type, mutable: bool, size: std.builtin.TypeInfo.Pointer.Size) Allocator.Error!Type {
+ if (!mutable and size == .Slice and elem_ty.eql(Type.initTag(.u8))) {
+ return Type.initTag(.const_slice_u8);
+ }
+ // TODO stage1 type inference bug
+ const T = Type.Tag;
+
+ const type_payload = try scope.arena().create(Type.Payload.PointerSimple);
+ type_payload.* = .{
+ .base = .{
+ .tag = switch (size) {
+ .One => if (mutable) T.single_mut_pointer else T.single_const_pointer,
+ .Many => if (mutable) T.many_mut_pointer else T.many_const_pointer,
+ .C => if (mutable) T.c_mut_pointer else T.c_const_pointer,
+ .Slice => if (mutable) T.mut_slice else T.const_slice,
+ },
+ },
+ .pointee_type = elem_ty,
+ };
+ return Type.initPayload(&type_payload.base);
+}
+
+pub fn ptrType(
+ self: *Module,
+ scope: *Scope,
+ src: usize,
+ elem_ty: Type,
+ sentinel: ?Value,
+ @"align": u32,
+ bit_offset: u16,
+ host_size: u16,
+ mutable: bool,
+ @"allowzero": bool,
+ @"volatile": bool,
+ size: std.builtin.TypeInfo.Pointer.Size,
+) Allocator.Error!Type {
+ assert(host_size == 0 or bit_offset < host_size * 8);
+
+ // TODO check if type can be represented by simplePtrType
+ const type_payload = try scope.arena().create(Type.Payload.Pointer);
+ type_payload.* = .{
+ .pointee_type = elem_ty,
+ .sentinel = sentinel,
+ .@"align" = @"align",
+ .bit_offset = bit_offset,
+ .host_size = host_size,
+ .@"allowzero" = @"allowzero",
+ .mutable = mutable,
+ .@"volatile" = @"volatile",
+ .size = size,
+ };
+ return Type.initPayload(&type_payload.base);
+}
+
+pub fn optionalType(self: *Module, scope: *Scope, child_type: Type) Allocator.Error!Type {
+ return Type.initPayload(switch (child_type.tag()) {
+ .single_const_pointer => blk: {
+ const payload = try scope.arena().create(Type.Payload.PointerSimple);
+ payload.* = .{
+ .base = .{ .tag = .optional_single_const_pointer },
+ .pointee_type = child_type.elemType(),
+ };
+ break :blk &payload.base;
+ },
+ .single_mut_pointer => blk: {
+ const payload = try scope.arena().create(Type.Payload.PointerSimple);
+ payload.* = .{
+ .base = .{ .tag = .optional_single_mut_pointer },
+ .pointee_type = child_type.elemType(),
+ };
+ break :blk &payload.base;
+ },
+ else => blk: {
+ const payload = try scope.arena().create(Type.Payload.Optional);
+ payload.* = .{
+ .child_type = child_type,
+ };
+ break :blk &payload.base;
+ },
+ });
+}
+
+pub fn arrayType(self: *Module, scope: *Scope, len: u64, sentinel: ?Value, elem_type: Type) Allocator.Error!Type {
+ if (elem_type.eql(Type.initTag(.u8))) {
+ if (sentinel) |some| {
+ if (some.eql(Value.initTag(.zero))) {
+ const payload = try scope.arena().create(Type.Payload.Array_u8_Sentinel0);
+ payload.* = .{
+ .len = len,
+ };
+ return Type.initPayload(&payload.base);
+ }
+ } else {
+ const payload = try scope.arena().create(Type.Payload.Array_u8);
+ payload.* = .{
+ .len = len,
+ };
+ return Type.initPayload(&payload.base);
+ }
+ }
+
+ if (sentinel) |some| {
+ const payload = try scope.arena().create(Type.Payload.ArraySentinel);
+ payload.* = .{
+ .len = len,
+ .sentinel = some,
+ .elem_type = elem_type,
+ };
+ return Type.initPayload(&payload.base);
+ }
+
+ const payload = try scope.arena().create(Type.Payload.Array);
+ payload.* = .{
+ .len = len,
+ .elem_type = elem_type,
+ };
+ return Type.initPayload(&payload.base);
+}
+
+pub fn errorUnionType(self: *Module, scope: *Scope, error_set: Type, payload: Type) Allocator.Error!Type {
+ assert(error_set.zigTypeTag() == .ErrorSet);
+ if (error_set.eql(Type.initTag(.anyerror)) and payload.eql(Type.initTag(.void))) {
+ return Type.initTag(.anyerror_void_error_union);
+ }
+
+ const result = try scope.arena().create(Type.Payload.ErrorUnion);
+ result.* = .{
+ .error_set = error_set,
+ .payload = payload,
+ };
+ return Type.initPayload(&result.base);
+}
+
+pub fn anyframeType(self: *Module, scope: *Scope, return_type: Type) Allocator.Error!Type {
+ const result = try scope.arena().create(Type.Payload.AnyFrame);
+ result.* = .{
+ .return_type = return_type,
+ };
+ return Type.initPayload(&result.base);
+}
+
+pub fn dumpInst(self: *Module, scope: *Scope, inst: *Inst) void {
+ const zir_module = scope.namespace();
+ const source = zir_module.getSource(self) catch @panic("dumpInst failed to get source");
+ const loc = std.zig.findLineColumn(source, inst.src);
+ if (inst.tag == .constant) {
+ std.debug.print("constant ty={} val={} src={}:{}:{}\n", .{
+ inst.ty,
+ inst.castTag(.constant).?.val,
+ zir_module.subFilePath(),
+ loc.line + 1,
+ loc.column + 1,
+ });
+ } else if (inst.deaths == 0) {
+ std.debug.print("{} ty={} src={}:{}:{}\n", .{
+ @tagName(inst.tag),
+ inst.ty,
+ zir_module.subFilePath(),
+ loc.line + 1,
+ loc.column + 1,
+ });
+ } else {
+ std.debug.print("{} ty={} deaths={b} src={}:{}:{}\n", .{
+ @tagName(inst.tag),
+ inst.ty,
+ inst.deaths,
+ zir_module.subFilePath(),
+ loc.line + 1,
+ loc.column + 1,
+ });
+ }
+}
+
+pub const PanicId = enum {
+ unreach,
+ unwrap_null,
+};
+
+pub fn addSafetyCheck(mod: *Module, parent_block: *Scope.Block, ok: *Inst, panic_id: PanicId) !void {
+ const block_inst = try parent_block.arena.create(Inst.Block);
+ block_inst.* = .{
+ .base = .{
+ .tag = Inst.Block.base_tag,
+ .ty = Type.initTag(.void),
+ .src = ok.src,
+ },
+ .body = .{
+ .instructions = try parent_block.arena.alloc(*Inst, 1), // Only need space for the condbr.
+ },
+ };
+
+ const ok_body: ir.Body = .{
+ .instructions = try parent_block.arena.alloc(*Inst, 1), // Only need space for the brvoid.
+ };
+ const brvoid = try parent_block.arena.create(Inst.BrVoid);
+ brvoid.* = .{
+ .base = .{
+ .tag = .brvoid,
+ .ty = Type.initTag(.noreturn),
+ .src = ok.src,
+ },
+ .block = block_inst,
+ };
+ ok_body.instructions[0] = &brvoid.base;
+
+ var fail_block: Scope.Block = .{
+ .parent = parent_block,
+ .func = parent_block.func,
+ .decl = parent_block.decl,
+ .instructions = .{},
+ .arena = parent_block.arena,
+ .is_comptime = parent_block.is_comptime,
+ };
+ defer fail_block.instructions.deinit(mod.gpa);
+
+ _ = try mod.safetyPanic(&fail_block, ok.src, panic_id);
+
+ const fail_body: ir.Body = .{ .instructions = try parent_block.arena.dupe(*Inst, fail_block.instructions.items) };
+
+ const condbr = try parent_block.arena.create(Inst.CondBr);
+ condbr.* = .{
+ .base = .{
+ .tag = .condbr,
+ .ty = Type.initTag(.noreturn),
+ .src = ok.src,
+ },
+ .condition = ok,
+ .then_body = ok_body,
+ .else_body = fail_body,
+ };
+ block_inst.body.instructions[0] = &condbr.base;
+
+ try parent_block.instructions.append(mod.gpa, &block_inst.base);
+}
+
+pub fn safetyPanic(mod: *Module, block: *Scope.Block, src: usize, panic_id: PanicId) !*Inst {
+ // TODO Once we have a panic function to call, call it here instead of breakpoint.
+ _ = try mod.addNoOp(block, src, Type.initTag(.void), .breakpoint);
+ return mod.addNoOp(block, src, Type.initTag(.noreturn), .unreach);
+}
+
+pub fn getTarget(self: Module) Target {
+ return self.comp.bin_file.options.target;
+}
+
+pub fn optimizeMode(self: Module) std.builtin.Mode {
+ return self.comp.bin_file.options.optimize_mode;
+}
src-self-hosted/zir.zig
@@ -10,7 +10,7 @@ const Type = @import("type.zig").Type;
const Value = @import("value.zig").Value;
const TypedValue = @import("TypedValue.zig");
const ir = @import("ir.zig");
-const IrModule = @import("Module.zig");
+const IrModule = @import("ZigModule.zig");
/// This struct is relevent only for the ZIR Module text format. It is not used for
/// semantic analysis of Zig source code.
src-self-hosted/zir_sema.zig
@@ -16,7 +16,7 @@ const TypedValue = @import("TypedValue.zig");
const assert = std.debug.assert;
const ir = @import("ir.zig");
const zir = @import("zir.zig");
-const Module = @import("Module.zig");
+const Module = @import("ZigModule.zig");
const Inst = ir.Inst;
const Body = ir.Body;
const trace = @import("tracy.zig").trace;
@@ -199,10 +199,10 @@ pub fn analyzeZirDecl(mod: *Module, decl: *Decl, src_decl: *zir.Decl) InnerError
// We don't fully codegen the decl until later, but we do need to reserve a global
// offset table index for it. This allows us to codegen decls out of dependency order,
// increasing how many computations can be done in parallel.
- try mod.bin_file.allocateDeclIndexes(decl);
- try mod.work_queue.writeItem(.{ .codegen_decl = decl });
+ try mod.comp.bin_file.allocateDeclIndexes(decl);
+ try mod.comp.work_queue.writeItem(.{ .codegen_decl = decl });
} else if (prev_type_has_bits) {
- mod.bin_file.freeDecl(decl);
+ mod.comp.bin_file.freeDecl(decl);
}
return type_changed;
test/stage2/test.zig
@@ -1,8 +1,11 @@
const std = @import("std");
const TestContext = @import("../../src-self-hosted/test.zig").TestContext;
-// self-hosted does not yet support PE executable files / COFF object files
-// or mach-o files. So we do these test cases cross compiling for x86_64-linux.
+// Self-hosted has differing levels of support for various architectures. For now we pass explicit
+// target parameters to each test case. At some point we will take this to the next level and have
+// a set of targets that all test cases run on unless specifically overridden. For now, each test
+// case applies to only the specified target.
+
const linux_x64 = std.zig.CrossTarget{
.cpu_arch = .x86_64,
.os_tag = .linux,