Commit 4056bb92e6
Changed files (13)
lib/std/cache_hash.zig
@@ -5,7 +5,6 @@
// and substantial portions of the software.
const std = @import("std.zig");
const crypto = std.crypto;
-const Hasher = crypto.auth.siphash.SipHash128(1, 3); // provides enough collision resistance for the CacheHash use cases, while being one of our fastest options right now
const fs = std.fs;
const base64 = std.base64;
const ArrayList = std.ArrayList;
@@ -23,6 +22,14 @@ const BASE64_DIGEST_LEN = base64.Base64Encoder.calcSize(BIN_DIGEST_LEN);
const MANIFEST_FILE_SIZE_MAX = 50 * 1024 * 1024;
+/// The type used for hashing file contents. Currently, this is SipHash128(1, 3), because it
+/// provides enough collision resistance for the CacheHash use cases, while being one of our
+/// fastest options right now.
+pub const Hasher = crypto.auth.siphash.SipHash128(1, 3);
+
+/// Initial state, that can be copied.
+pub const hasher_init: Hasher = Hasher.init(&[_]u8{0} ** Hasher.minimum_key_length);
+
pub const File = struct {
path: ?[]const u8,
max_file_size: ?usize,
@@ -45,52 +52,82 @@ pub const File = struct {
/// CacheHash manages project-local `zig-cache` directories.
/// This is not a general-purpose cache.
-/// It was designed to be fast and simple, not to withstand attacks using specially-crafted input.
+/// It is designed to be fast and simple, not to withstand attacks using specially-crafted input.
pub const CacheHash = struct {
allocator: *Allocator,
- hasher_init: Hasher, // initial state, that can be copied
- hasher: Hasher, // current state for incremental hashing
+ /// Current state for incremental hashing.
+ hasher: Hasher,
manifest_dir: fs.Dir,
manifest_file: ?fs.File,
manifest_dirty: bool,
+ owns_manifest_dir: bool,
files: ArrayList(File),
b64_digest: [BASE64_DIGEST_LEN]u8,
/// Be sure to call release after successful initialization.
pub fn init(allocator: *Allocator, dir: fs.Dir, manifest_dir_path: []const u8) !CacheHash {
- const hasher_init = Hasher.init(&[_]u8{0} ** Hasher.minimum_key_length);
return CacheHash{
.allocator = allocator,
- .hasher_init = hasher_init,
.hasher = hasher_init,
.manifest_dir = try dir.makeOpenPath(manifest_dir_path, .{}),
.manifest_file = null,
.manifest_dirty = false,
+ .owns_manifest_dir = true,
+ .files = ArrayList(File).init(allocator),
+ .b64_digest = undefined,
+ };
+ }
+
+ /// Allows one to fork a CacheHash instance into another one, which does not require an additional
+ /// directory handle to be opened. The new instance inherits the hash state.
+ pub fn clone(self: CacheHash) CacheHash {
+ assert(self.manifest_file == null);
+ assert(files.items.len == 0);
+ return .{
+ .allocator = self.allocator,
+ .hasher = self.hasher,
+ .manifest_dir = self.manifest_dir,
+ .manifest_file = null,
+ .manifest_dirty = false,
+ .owns_manifest_dir = false,
.files = ArrayList(File).init(allocator),
.b64_digest = undefined,
};
}
/// Record a slice of bytes as an dependency of the process being cached
- pub fn addSlice(self: *CacheHash, val: []const u8) void {
+ pub fn addBytes(self: *CacheHash, bytes: []const u8) void {
assert(self.manifest_file == null);
- self.hasher.update(val);
- self.hasher.update(&[_]u8{0});
+ self.hasher.update(mem.asBytes(&bytes.len));
+ self.hasher.update(bytes);
}
- /// Convert the input value into bytes and record it as a dependency of the
- /// process being cached
- pub fn add(self: *CacheHash, val: anytype) void {
+ pub fn addListOfBytes(self: *CacheHash, list_of_bytes: []const []const u8) void {
assert(self.manifest_file == null);
- const valPtr = switch (@typeInfo(@TypeOf(val))) {
- .Int => &val,
- .Pointer => val,
- else => &val,
- };
+ self.add(list_of_bytes.items.len);
+ for (list_of_bytes) |bytes| self.addBytes(bytes);
+ }
+
+ /// Convert the input value into bytes and record it as a dependency of the process being cached.
+ pub fn add(self: *CacheHash, x: anytype) void {
+ assert(self.manifest_file == null);
+
+ switch (@TypeOf(x)) {
+ std.builtin.Version => {
+ self.add(x.major);
+ self.add(x.minor);
+ self.add(x.patch);
+ return;
+ },
+ else => {},
+ }
- self.addSlice(mem.asBytes(valPtr));
+ switch (@typeInfo(@TypeOf(x))) {
+ .Bool, .Int, .Enum, .Array => self.addBytes(mem.asBytes(&x)),
+ else => @compileError("unable to hash type " ++ @typeName(@TypeOf(x))),
+ }
}
/// Add a file as a dependency of process being cached. When `CacheHash.hit` is
@@ -122,7 +159,7 @@ pub const CacheHash = struct {
.bin_digest = undefined,
};
- self.addSlice(resolved_path);
+ self.addBytes(resolved_path);
return idx;
}
@@ -143,7 +180,7 @@ pub const CacheHash = struct {
base64_encoder.encode(self.b64_digest[0..], &bin_digest);
- self.hasher = self.hasher_init;
+ self.hasher = hasher_init;
self.hasher.update(&bin_digest);
const manifest_file_path = try fmt.allocPrint(self.allocator, "{}.txt", .{self.b64_digest});
@@ -244,7 +281,7 @@ pub const CacheHash = struct {
}
var actual_digest: [BIN_DIGEST_LEN]u8 = undefined;
- try hashFile(this_file, &actual_digest, self.hasher_init);
+ try hashFile(this_file, &actual_digest);
if (!mem.eql(u8, &cache_hash_file.bin_digest, &actual_digest)) {
cache_hash_file.bin_digest = actual_digest;
@@ -262,7 +299,7 @@ pub const CacheHash = struct {
// cache miss
// keep the manifest file open
// reset the hash
- self.hasher = self.hasher_init;
+ self.hasher = hasher_init;
self.hasher.update(&bin_digest);
// Remove files not in the initial hash
@@ -310,7 +347,7 @@ pub const CacheHash = struct {
// Hash while reading from disk, to keep the contents in the cpu cache while
// doing hashing.
- var hasher = self.hasher_init;
+ var hasher = hasher_init;
var off: usize = 0;
while (true) {
// give me everything you've got, captain
@@ -323,7 +360,7 @@ pub const CacheHash = struct {
ch_file.contents = contents;
} else {
- try hashFile(file, &ch_file.bin_digest, self.hasher_init);
+ try hashFile(file, &ch_file.bin_digest);
}
self.hasher.update(&ch_file.bin_digest);
@@ -435,11 +472,12 @@ pub const CacheHash = struct {
file.deinit(self.allocator);
}
self.files.deinit();
- self.manifest_dir.close();
+ if (self.owns_manifest_dir)
+ self.manifest_dir.close();
}
};
-fn hashFile(file: fs.File, bin_digest: []u8, hasher_init: anytype) !void {
+fn hashFile(file: fs.File, bin_digest: []u8) !void {
var buf: [1024]u8 = undefined;
var hasher = hasher_init;
@@ -509,7 +547,7 @@ test "cache file and then recall it" {
ch.add(true);
ch.add(@as(u16, 1234));
- ch.add("1234");
+ ch.addBytes("1234");
_ = try ch.addFile(temp_file, null);
// There should be nothing in the cache
@@ -523,7 +561,7 @@ test "cache file and then recall it" {
ch.add(true);
ch.add(@as(u16, 1234));
- ch.add("1234");
+ ch.addBytes("1234");
_ = try ch.addFile(temp_file, null);
// Cache hit! We just "built" the same file
@@ -577,7 +615,7 @@ test "check that changing a file makes cache fail" {
var ch = try CacheHash.init(testing.allocator, cwd, temp_manifest_dir);
defer ch.release();
- ch.add("1234");
+ ch.addBytes("1234");
const temp_file_idx = try ch.addFile(temp_file, 100);
// There should be nothing in the cache
@@ -594,7 +632,7 @@ test "check that changing a file makes cache fail" {
var ch = try CacheHash.init(testing.allocator, cwd, temp_manifest_dir);
defer ch.release();
- ch.add("1234");
+ ch.addBytes("1234");
const temp_file_idx = try ch.addFile(temp_file, 100);
// A file that we depend on has been updated, so the cache should not contain an entry for it
@@ -628,7 +666,7 @@ test "no file inputs" {
var ch = try CacheHash.init(testing.allocator, cwd, temp_manifest_dir);
defer ch.release();
- ch.add("1234");
+ ch.addBytes("1234");
// There should be nothing in the cache
testing.expectEqual(@as(?[BASE64_DIGEST_LEN]u8, null), try ch.hit());
@@ -639,7 +677,7 @@ test "no file inputs" {
var ch = try CacheHash.init(testing.allocator, cwd, temp_manifest_dir);
defer ch.release();
- ch.add("1234");
+ ch.addBytes("1234");
digest2 = (try ch.hit()).?;
}
@@ -674,7 +712,7 @@ test "CacheHashes with files added after initial hash work" {
var ch = try CacheHash.init(testing.allocator, cwd, temp_manifest_dir);
defer ch.release();
- ch.add("1234");
+ ch.addBytes("1234");
_ = try ch.addFile(temp_file1, null);
// There should be nothing in the cache
@@ -688,7 +726,7 @@ test "CacheHashes with files added after initial hash work" {
var ch = try CacheHash.init(testing.allocator, cwd, temp_manifest_dir);
defer ch.release();
- ch.add("1234");
+ ch.addBytes("1234");
_ = try ch.addFile(temp_file1, null);
digest2 = (try ch.hit()).?;
@@ -707,7 +745,7 @@ test "CacheHashes with files added after initial hash work" {
var ch = try CacheHash.init(testing.allocator, cwd, temp_manifest_dir);
defer ch.release();
- ch.add("1234");
+ ch.addBytes("1234");
_ = try ch.addFile(temp_file1, null);
// A file that we depend on has been updated, so the cache should not contain an entry for it
lib/std/target.zig
@@ -465,8 +465,6 @@ pub const Target = struct {
};
pub const ObjectFormat = enum {
- /// TODO Get rid of this one.
- unknown,
coff,
pe,
elf,
lib/std/zig.zig
@@ -71,17 +71,52 @@ pub fn binNameAlloc(
target: std.Target,
output_mode: std.builtin.OutputMode,
link_mode: ?std.builtin.LinkMode,
+ object_format: ?std.Target.ObjectFormat,
) error{OutOfMemory}![]u8 {
- switch (output_mode) {
- .Exe => return std.fmt.allocPrint(allocator, "{}{}", .{ root_name, target.exeFileExt() }),
- .Lib => {
- const suffix = switch (link_mode orelse .Static) {
- .Static => target.staticLibSuffix(),
- .Dynamic => target.dynamicLibSuffix(),
- };
- return std.fmt.allocPrint(allocator, "{}{}{}", .{ target.libPrefix(), root_name, suffix });
+ switch (object_format orelse target.getObjectFormat()) {
+ .coff, .pe => switch (output_mode) {
+ .Exe => {
+ const suffix = switch (target.os.tag) {
+ .uefi => ".efi",
+ else => ".exe",
+ };
+ return std.fmt.allocPrint(allocator, "{}{}", .{ root_name, suffix });
+ },
+ .Lib => {
+ const suffix = switch (link_mode orelse .Static) {
+ .Static => ".lib",
+ .Dynamic => ".dll",
+ };
+ return std.fmt.allocPrint(allocator, "{}{}{}", .{ target.libPrefix(), root_name, suffix });
+ },
+ .Obj => return std.fmt.allocPrint(allocator, "{}.obj", .{root_name}),
+ },
+ .elf => switch (output_mode) {
+ .Exe => return allocator.dupe(u8, root_name),
+ .Lib => {
+ const suffix = switch (link_mode orelse .Static) {
+ .Static => ".a",
+ .Dynamic => ".so",
+ };
+ return std.fmt.allocPrint(allocator, "{}{}{}", .{ target.libPrefix(), root_name, suffix });
+ },
+ .Obj => return std.fmt.allocPrint(allocator, "{}.o", .{root_name}),
+ },
+ .macho => switch (output_mode) {
+ .Exe => return allocator.dupe(u8, root_name),
+ .Lib => {
+ const suffix = switch (link_mode orelse .Static) {
+ .Static => ".a",
+ .Dynamic => ".dylib",
+ };
+ return std.fmt.allocPrint(allocator, "{}{}{}", .{ target.libPrefix(), root_name, suffix });
+ },
+ .Obj => return std.fmt.allocPrint(allocator, "{}.o", .{root_name}),
},
- .Obj => return std.fmt.allocPrint(allocator, "{}{}", .{ root_name, target.oFileExt() }),
+ .wasm => return std.fmt.allocPrint(allocator, "{}.wasm", .{root_name}),
+ .c => return std.fmt.allocPrint(allocator, "{}.c", .{root_name}),
+ .hex => return std.fmt.allocPrint(allocator, "{}.ihex", .{root_name}),
+ .raw => return std.fmt.allocPrint(allocator, "{}.bin", .{root_name}),
}
}
src-self-hosted/link/C.zig
@@ -25,6 +25,9 @@ error_msg: *Module.ErrorMsg = undefined,
pub fn openPath(allocator: *Allocator, dir: fs.Dir, sub_path: []const u8, options: link.Options) !*File {
assert(options.object_format == .c);
+ if (options.use_llvm) return error.LLVM_HasNoCBackend;
+ if (options.use_lld) return error.LLD_HasNoCBackend;
+
const file = try dir.createFile(sub_path, .{ .truncate = true, .read = true, .mode = link.determineMode(options) });
errdefer file.close();
src-self-hosted/link/Coff.zig
@@ -113,6 +113,9 @@ pub const SrcFn = void;
pub fn openPath(allocator: *Allocator, dir: fs.Dir, sub_path: []const u8, options: link.Options) !*link.File {
assert(options.object_format == .coff);
+ if (options.use_llvm) return error.LLVM_BackendIsTODO_ForCoff; // TODO
+ if (options.use_lld) return error.LLD_LinkingIsTODO_ForCoff; // TODO
+
const file = try dir.createFile(sub_path, .{ .truncate = false, .read = true, .mode = link.determineMode(options) });
errdefer file.close();
src-self-hosted/link/Elf.zig
@@ -219,6 +219,9 @@ pub const SrcFn = struct {
pub fn openPath(allocator: *Allocator, dir: fs.Dir, sub_path: []const u8, options: link.Options) !*File {
assert(options.object_format == .elf);
+ if (options.use_llvm) return error.LLVM_BackendIsTODO_ForELF; // TODO
+ if (options.use_lld) return error.LLD_LinkingIsTODOForELF; // TODO
+
const file = try dir.createFile(sub_path, .{ .truncate = false, .read = true, .mode = link.determineMode(options) });
errdefer file.close();
@@ -235,7 +238,7 @@ pub fn openPath(allocator: *Allocator, dir: fs.Dir, sub_path: []const u8, option
/// Returns error.IncrFailed if incremental update could not be performed.
fn openFile(allocator: *Allocator, file: fs.File, options: link.Options) !Elf {
- switch (options.output_mode) {
+ switch (options.effectiveOutputMode()) {
.Exe => {},
.Obj => {},
.Lib => return error.IncrFailed,
@@ -264,7 +267,7 @@ fn openFile(allocator: *Allocator, file: fs.File, options: link.Options) !Elf {
/// Truncates the existing file contents and overwrites the contents.
/// Returns an error if `file` is not already open with +read +write +seek abilities.
fn createFile(allocator: *Allocator, file: fs.File, options: link.Options) !Elf {
- switch (options.output_mode) {
+ switch (options.effectiveOutputMode()) {
.Exe => {},
.Obj => {},
.Lib => return error.TODOImplementWritingLibFiles,
@@ -861,8 +864,8 @@ pub fn flush(self: *Elf, module: *Module) !void {
},
}
// Write the form for the compile unit, which must match the abbrev table above.
- const name_strp = try self.makeDebugString(self.base.options.root_pkg.root_src_path);
- const comp_dir_strp = try self.makeDebugString(self.base.options.root_pkg.root_src_dir_path);
+ const name_strp = try self.makeDebugString(self.base.options.root_pkg.?.root_src_path);
+ const comp_dir_strp = try self.makeDebugString(self.base.options.root_pkg.?.root_src_dir_path);
const producer_strp = try self.makeDebugString(link.producer_string);
// Currently only one compilation unit is supported, so the address range is simply
// identical to the main program header virtual address and memory size.
@@ -1031,7 +1034,7 @@ pub fn flush(self: *Elf, module: *Module) !void {
0, // include_directories (none except the compilation unit cwd)
});
// file_names[0]
- di_buf.appendSliceAssumeCapacity(self.base.options.root_pkg.root_src_path); // relative path name
+ di_buf.appendSliceAssumeCapacity(self.base.options.root_pkg.?.root_src_path); // relative path name
di_buf.appendSliceAssumeCapacity(&[_]u8{
0, // null byte for the relative path name
0, // directory_index
@@ -1195,7 +1198,7 @@ pub fn flush(self: *Elf, module: *Module) !void {
}
self.shdr_table_dirty = false;
}
- if (self.entry_addr == null and self.base.options.output_mode == .Exe) {
+ if (self.entry_addr == null and self.base.options.effectiveOutputMode() == .Exe) {
log.debug("flushing. no_entry_point_found = true\n", .{});
self.error_flags.no_entry_point_found = true;
} else {
@@ -1255,7 +1258,7 @@ fn writeElfHeader(self: *Elf) !void {
assert(index == 16);
- const elf_type = switch (self.base.options.output_mode) {
+ const elf_type = switch (self.base.options.effectiveOutputMode()) {
.Exe => elf.ET.EXEC,
.Obj => elf.ET.REL,
.Lib => switch (self.base.options.link_mode) {
@@ -2430,8 +2433,8 @@ fn dbgLineNeededHeaderBytes(self: Elf) u32 {
directory_count * 8 + file_name_count * 8 +
// These are encoded as DW.FORM_string rather than DW.FORM_strp as we would like
// because of a workaround for readelf and gdb failing to understand DWARFv5 correctly.
- self.base.options.root_pkg.root_src_dir_path.len +
- self.base.options.root_pkg.root_src_path.len);
+ self.base.options.root_pkg.?.root_src_dir_path.len +
+ self.base.options.root_pkg.?.root_src_path.len);
}
fn dbgInfoNeededHeaderBytes(self: Elf) u32 {
src-self-hosted/link/MachO.zig
@@ -137,6 +137,9 @@ pub const SrcFn = struct {
pub fn openPath(allocator: *Allocator, dir: fs.Dir, sub_path: []const u8, options: link.Options) !*File {
assert(options.object_format == .macho);
+ if (options.use_llvm) return error.LLVM_BackendIsTODO_ForMachO; // TODO
+ if (options.use_lld) return error.LLD_LinkingIsTODO_ForMachO; // TODO
+
const file = try dir.createFile(sub_path, .{ .truncate = false, .read = true, .mode = link.determineMode(options) });
errdefer file.close();
src-self-hosted/link/Wasm.zig
@@ -52,6 +52,9 @@ funcs: std.ArrayListUnmanaged(*Module.Decl) = .{},
pub fn openPath(allocator: *Allocator, dir: fs.Dir, sub_path: []const u8, options: link.Options) !*link.File {
assert(options.object_format == .wasm);
+ if (options.use_llvm) return error.LLVM_BackendIsTODO_ForWasm; // TODO
+ if (options.use_lld) return error.LLD_LinkingIsTODO_ForWasm; // TODO
+
// TODO: read the file and keep vaild parts instead of truncating
const file = try dir.createFile(sub_path, .{ .truncate = true, .read = true });
errdefer file.close();
src-self-hosted/link.zig
@@ -16,14 +16,36 @@ pub const Options = struct {
object_format: std.builtin.ObjectFormat,
optimize_mode: std.builtin.Mode,
root_name: []const u8,
- root_pkg: *const Package,
+ root_pkg: ?*const Package,
/// Used for calculating how much space to reserve for symbols in case the binary file
/// does not already have a symbol table.
symbol_count_hint: u64 = 32,
/// Used for calculating how much space to reserve for executable program code in case
- /// the binary file deos not already have such a section.
+ /// the binary file does not already have such a section.
program_code_size_hint: u64 = 256 * 1024,
entry_addr: ?u64 = null,
+ /// Set to `true` to omit debug info.
+ strip: bool = false,
+ /// If this is true then this link code is responsible for outputting an object
+ /// file and then using LLD to link it together with the link options and other objects.
+ /// Otherwise (depending on `use_llvm`) this link code directly outputs and updates the final binary.
+ use_lld: bool = false,
+ /// If this is true then this link code is responsible for making an LLVM IR Module,
+ /// outputting it to an object file, and then linking that together with link options and
+ /// other objects.
+ /// Otherwise (depending on `use_lld`) this link code directly outputs and updates the final binary.
+ use_llvm: bool = false,
+
+ objects: []const []const u8 = &[0][]const u8{},
+ framework_dirs: []const []const u8 = &[0][]const u8{},
+ frameworks: []const []const u8 = &[0][]const u8{},
+ system_libs: []const []const u8 = &[0][]const u8{},
+ lib_dirs: []const []const u8 = &[0][]const u8{},
+ rpath_list: []const []const u8 = &[0][]const u8{},
+
+ pub fn effectiveOutputMode(options: Options) std.builtin.OutputMode {
+ return if (options.use_lld) .Obj else options.output_mode;
+ }
};
pub const File = struct {
@@ -67,14 +89,13 @@ pub const File = struct {
/// and does not cause Illegal Behavior. This operation is not atomic.
pub fn openPath(allocator: *Allocator, dir: fs.Dir, sub_path: []const u8, options: Options) !*File {
switch (options.object_format) {
- .unknown => unreachable,
.coff, .pe => return Coff.openPath(allocator, dir, sub_path, options),
.elf => return Elf.openPath(allocator, dir, sub_path, options),
.macho => return MachO.openPath(allocator, dir, sub_path, options),
.wasm => return Wasm.openPath(allocator, dir, sub_path, options),
.c => return C.openPath(allocator, dir, sub_path, options),
- .hex => return error.TODOImplementHex,
- .raw => return error.TODOImplementRaw,
+ .hex => return error.HexObjectFormatUnimplemented,
+ .raw => return error.RawObjectFormatUnimplemented,
}
}
src-self-hosted/main.zig
@@ -13,6 +13,7 @@ const Package = @import("Package.zig");
const zir = @import("zir.zig");
const build_options = @import("build_options");
const warn = std.log.warn;
+const introspect = @import("introspect.zig");
fn fatal(comptime format: []const u8, args: anytype) noreturn {
std.log.emerg(format, args);
@@ -231,7 +232,6 @@ pub fn buildOutputType(
var root_src_file: ?[]const u8 = null;
var version: std.builtin.Version = .{ .major = 0, .minor = 0, .patch = 0 };
var strip = false;
- var emit_h = true;
var watch = false;
var debug_tokenize = false;
var debug_ast_tree = false;
@@ -248,6 +248,7 @@ pub fn buildOutputType(
var target_dynamic_linker: ?[]const u8 = null;
var target_ofmt: ?[]const u8 = null;
var output_mode: std.builtin.OutputMode = undefined;
+ var emit_h: Emit = undefined;
var ensure_libc_on_non_freestanding = false;
var ensure_libcpp_on_non_freestanding = false;
var have_libc = false;
@@ -269,6 +270,9 @@ pub fn buildOutputType(
var linker_z_nodelete = false;
var linker_z_defs = false;
var stack_size_override: u64 = 0;
+ var use_llvm: ?bool = null;
+ var use_lld: ?bool = null;
+ var use_clang: ?bool = null;
var system_libs = std.ArrayList([]const u8).init(gpa);
defer system_libs.deinit();
@@ -296,6 +300,10 @@ pub fn buildOutputType(
if (arg_mode == .build) {
output_mode = arg_mode.build;
+ emit_h = switch (output_mode) {
+ .Exe => .no,
+ .Obj, .Lib => .yes_default_path,
+ };
const args = all_args[2..];
var i: usize = 0;
@@ -416,6 +424,18 @@ pub fn buildOutputType(
want_pic = true;
} else if (mem.eql(u8, arg, "-fno-PIC")) {
want_pic = false;
+ } else if (mem.eql(u8, arg, "-fLLVM")) {
+ use_llvm = true;
+ } else if (mem.eql(u8, arg, "-fno-LLVM")) {
+ use_llvm = false;
+ } else if (mem.eql(u8, arg, "-fLLD")) {
+ use_lld = true;
+ } else if (mem.eql(u8, arg, "-fno-LLD")) {
+ use_lld = false;
+ } else if (mem.eql(u8, arg, "-fClang")) {
+ use_clang = true;
+ } else if (mem.eql(u8, arg, "-fno-Clang")) {
+ use_clang = false;
} else if (mem.eql(u8, arg, "-rdynamic")) {
rdynamic = true;
} else if (mem.eql(u8, arg, "-femit-bin")) {
@@ -430,6 +450,12 @@ pub fn buildOutputType(
emit_zir = .{ .yes = arg["-femit-zir=".len..] };
} else if (mem.eql(u8, arg, "-fno-emit-zir")) {
emit_zir = .no;
+ } else if (mem.eql(u8, arg, "-femit-h")) {
+ emit_h = .yes_default_path;
+ } else if (mem.startsWith(u8, arg, "-femit-h=")) {
+ emit_h = .{ .yes = arg["-femit-h=".len..] };
+ } else if (mem.eql(u8, arg, "-fno-emit-h")) {
+ emit_h = .no;
} else if (mem.eql(u8, arg, "-dynamic")) {
link_mode = .Dynamic;
} else if (mem.eql(u8, arg, "-static")) {
@@ -491,7 +517,7 @@ pub fn buildOutputType(
}
}
} else {
- emit_h = false;
+ emit_h = .no;
strip = true;
ensure_libc_on_non_freestanding = true;
ensure_libcpp_on_non_freestanding = arg_mode == .cpp;
@@ -874,14 +900,6 @@ pub fn buildOutputType(
}
}
- if (system_libs.items.len != 0) {
- fatal("linking against system libraries not yet supported", .{});
- }
-
- const src_path = root_src_file orelse {
- fatal("expected at least one file argument", .{});
- };
-
const object_format: ?std.Target.ObjectFormat = blk: {
const ofmt = target_ofmt orelse break :blk null;
if (mem.eql(u8, ofmt, "elf")) {
@@ -909,11 +927,14 @@ pub fn buildOutputType(
.no => {
fatal("-fno-emit-bin not supported yet", .{});
},
- .yes_default_path => if (object_format != null and object_format.? == .c)
- try std.fmt.allocPrint(arena, "{}.c", .{root_name})
- else
- try std.zig.binNameAlloc(arena, root_name, target_info.target, output_mode, link_mode),
-
+ .yes_default_path => try std.zig.binNameAlloc(
+ arena,
+ root_name,
+ target_info.target,
+ output_mode,
+ link_mode,
+ object_format,
+ ),
.yes => |p| p,
};
@@ -930,10 +951,25 @@ pub fn buildOutputType(
.yes => |p| p,
};
- const root_pkg = try Package.create(gpa, fs.cwd(), ".", src_path);
- defer root_pkg.destroy();
+ const root_pkg = if (root_src_file) |src_path| try Package.create(gpa, fs.cwd(), ".", src_path) else null;
+ defer if (root_pkg) |pkg| pkg.destroy();
- var module = try Module.init(gpa, .{
+ const emit_h_path: ?[]const u8 = switch (emit_h) {
+ .yes => |p| p,
+ .no => null,
+ .yes_default_path => try std.fmt.allocPrint(arena, "{}.h", .{root_name}),
+ };
+
+ // TODO Remove this, we'll have this error emitted lazily only if the features would end
+ // up actually getting used.
+ //if (!build_options.have_llvm) {
+ // if ((use_llvm orelse false) or (use_lld orelse false) or (use_clang orelse false))
+ // fatal("-fLLVM, -fLLD, and -fClang unavailable: compiler not built with LLVM extensions enabled", .{});
+ //}
+
+ const compiler_id = try introspect.resolveCompilerId(gpa);
+
+ var module = Module.init(gpa, .{
.root_name = root_name,
.target = target_info.target,
.output_mode = output_mode,
@@ -944,7 +980,39 @@ pub fn buildOutputType(
.object_format = object_format,
.optimize_mode = build_mode,
.keep_source_files_loaded = zir_out_path != null,
- });
+ .clang_argv = clang_argv.items,
+ .lib_dirs = lib_dirs.items,
+ .rpath_list = rpath_list.items,
+ .c_source_files = c_source_files.items,
+ .link_objects = link_objects.items,
+ .framework_dirs = framework_dirs.items,
+ .frameworks = frameworks.items,
+ .system_libs = system_libs.items,
+ .emit_h = emit_h_path,
+ .have_libc = have_libc,
+ .have_libcpp = have_libcpp,
+ .want_pic = want_pic,
+ .want_sanitize_c = want_sanitize_c,
+ .use_llvm = use_llvm,
+ .use_lld = use_lld,
+ .use_clang = use_clang,
+ .rdynamic = rdynamic,
+ .linker_script = linker_script,
+ .version_script = version_script,
+ .disable_c_depfile = disable_c_depfile,
+ .override_soname = override_soname,
+ .linker_optimization = linker_optimization,
+ .linker_gc_sections = linker_gc_sections,
+ .linker_allow_shlib_undefined = linker_allow_shlib_undefined,
+ .linker_bind_global_refs_locally = linker_bind_global_refs_locally,
+ .linker_z_nodelete = linker_z_nodelete,
+ .linker_z_defs = linker_z_defs,
+ .stack_size_override = stack_size_override,
+ .compiler_id = compiler_id,
+ .strip = strip,
+ }) catch |err| {
+ fatal("unable to initialize module: {}", .{@errorName(err)});
+ };
defer module.deinit();
const stdin = std.io.getStdIn().inStream();
src-self-hosted/Module.zig
@@ -22,11 +22,12 @@ const trace = @import("tracy.zig").trace;
const liveness = @import("liveness.zig");
const astgen = @import("astgen.zig");
const zir_sema = @import("zir_sema.zig");
+const build_options = @import("build_options");
/// General-purpose allocator. Used for both temporary and long-term storage.
gpa: *Allocator,
-/// Pointer to externally managed resource.
-root_pkg: *Package,
+/// Pointer to externally managed resource. `null` if there is no zig file being compiled.
+root_pkg: ?*Package,
/// Module owns this resource.
/// The `Scope` is either a `Scope.ZIRModule` or `Scope.File`.
root_scope: *Scope,
@@ -48,22 +49,26 @@ export_owners: std.AutoArrayHashMapUnmanaged(*Decl, []*Export) = .{},
/// Maps fully qualified namespaced names to the Decl struct for them.
decl_table: std.ArrayHashMapUnmanaged(Scope.NameHash, *Decl, Scope.name_hash_hash, Scope.name_hash_eql, false) = .{},
+c_object_table: std.AutoArrayHashMapUnmanaged(*CObject, void) = .{},
+
link_error_flags: link.File.ErrorFlags = .{},
work_queue: std.fifo.LinearFifo(WorkItem, .Dynamic),
/// We optimize memory usage for a compilation with no compile errors by storing the
/// error messages and mapping outside of `Decl`.
-/// The ErrorMsg memory is owned by the decl, using Module's allocator.
+/// The ErrorMsg memory is owned by the decl, using Module's general purpose allocator.
/// Note that a Decl can succeed but the Fn it represents can fail. In this case,
/// a Decl can have a failed_decls entry but have analysis status of success.
failed_decls: std.AutoArrayHashMapUnmanaged(*Decl, *ErrorMsg) = .{},
/// Using a map here for consistency with the other fields here.
-/// The ErrorMsg memory is owned by the `Scope`, using Module's allocator.
+/// The ErrorMsg memory is owned by the `Scope`, using Module's general purpose allocator.
failed_files: std.AutoArrayHashMapUnmanaged(*Scope, *ErrorMsg) = .{},
/// Using a map here for consistency with the other fields here.
-/// The ErrorMsg memory is owned by the `Export`, using Module's allocator.
+/// The ErrorMsg memory is owned by the `Export`, using Module's general purpose allocator.
failed_exports: std.AutoArrayHashMapUnmanaged(*Export, *ErrorMsg) = .{},
+/// The ErrorMsg memory is owned by the `CObject`, using Module's general purpose allocator.
+failed_c_objects: std.AutoArrayHashMapUnmanaged(*CObject, *ErrorMsg) = .{},
/// Incrementing integer used to compare against the corresponding Decl
/// field to determine whether a Decl's status applies to an ongoing update, or a
@@ -79,10 +84,15 @@ deletion_set: std.ArrayListUnmanaged(*Decl) = .{},
/// Owned by Module.
root_name: []u8,
keep_source_files_loaded: bool,
+use_clang: bool,
/// Error tags and their values, tag names are duped with mod.gpa.
global_error_set: std.StringHashMapUnmanaged(u16) = .{},
+c_source_files: []const []const u8,
+clang_argv: []const []const u8,
+cache: std.cache_hash.CacheHash,
+
pub const InnerError = error{ OutOfMemory, AnalysisFail };
const WorkItem = union(enum) {
@@ -95,6 +105,9 @@ const WorkItem = union(enum) {
/// The source file containing the Decl has been updated, and so the
/// Decl may need its line number information updated in the debug info.
update_line_number: *Decl,
+ /// Invoke the Clang compiler to create an object file, which gets linked
+ /// with the Module.
+ c_object: *CObject,
};
pub const Export = struct {
@@ -230,6 +243,7 @@ pub const Decl = struct {
const src_decl = module.decls[self.src_index];
return src_decl.inst.src;
},
+ .none => unreachable,
.file, .block => unreachable,
.gen_zir => unreachable,
.local_val => unreachable,
@@ -282,6 +296,30 @@ pub const Decl = struct {
}
};
+pub const CObject = struct {
+ /// Relative to cwd. Owned by arena.
+ src_path: []const u8,
+ /// Owned by arena.
+ extra_flags: []const []const u8,
+ arena: std.heap.ArenaAllocator.State,
+ status: union(enum) {
+ new,
+ /// This is the output object path. Owned by gpa.
+ success: []u8,
+ /// There will be a corresponding ErrorMsg in Module.failed_c_objects.
+ /// This is the C source file contents (used for printing error messages). Owned by gpa.
+ failure: []u8,
+ },
+
+ pub fn destroy(self: *CObject, gpa: *Allocator) void {
+ switch (self.status) {
+ .new => {},
+ .failure, .success => |data| gpa.free(data),
+ }
+ self.arena.promote(gpa).deinit();
+ }
+};
+
/// Fn struct memory is owned by the Decl's TypedValue.Managed arena allocator.
pub const Fn = struct {
/// This memory owned by the Decl's TypedValue.Managed arena allocator.
@@ -361,6 +399,7 @@ pub const Scope = struct {
.zir_module => return &self.cast(ZIRModule).?.contents.module.arena.allocator,
.file => unreachable,
.container => unreachable,
+ .none => unreachable,
}
}
@@ -376,6 +415,7 @@ pub const Scope = struct {
.zir_module => null,
.file => null,
.container => null,
+ .none => unreachable,
};
}
@@ -390,6 +430,7 @@ pub const Scope = struct {
.decl => return self.cast(DeclAnalysis).?.decl.scope,
.file => return &self.cast(File).?.root_container.base,
.zir_module, .container => return self,
+ .none => unreachable,
}
}
@@ -406,6 +447,7 @@ pub const Scope = struct {
.file => unreachable,
.zir_module => return self.cast(ZIRModule).?.fullyQualifiedNameHash(name),
.container => return self.cast(Container).?.fullyQualifiedNameHash(name),
+ .none => unreachable,
}
}
@@ -414,6 +456,7 @@ pub const Scope = struct {
switch (self.tag) {
.file => return self.cast(File).?.contents.tree,
.zir_module => unreachable,
+ .none => unreachable,
.decl => return self.cast(DeclAnalysis).?.decl.scope.cast(Container).?.file_scope.contents.tree,
.block => return self.cast(Block).?.decl.scope.cast(Container).?.file_scope.contents.tree,
.gen_zir => return self.cast(GenZIR).?.decl.scope.cast(Container).?.file_scope.contents.tree,
@@ -434,6 +477,7 @@ pub const Scope = struct {
.zir_module => unreachable,
.file => unreachable,
.container => unreachable,
+ .none => unreachable,
};
}
@@ -444,6 +488,7 @@ pub const Scope = struct {
.container => return @fieldParentPtr(Container, "base", base).file_scope.sub_file_path,
.file => return @fieldParentPtr(File, "base", base).sub_file_path,
.zir_module => return @fieldParentPtr(ZIRModule, "base", base).sub_file_path,
+ .none => unreachable,
.block => unreachable,
.gen_zir => unreachable,
.local_val => unreachable,
@@ -456,6 +501,7 @@ pub const Scope = struct {
switch (base.tag) {
.file => return @fieldParentPtr(File, "base", base).unload(gpa),
.zir_module => return @fieldParentPtr(ZIRModule, "base", base).unload(gpa),
+ .none => {},
.block => unreachable,
.gen_zir => unreachable,
.local_val => unreachable,
@@ -470,6 +516,7 @@ pub const Scope = struct {
.container => return @fieldParentPtr(Container, "base", base).file_scope.getSource(module),
.file => return @fieldParentPtr(File, "base", base).getSource(module),
.zir_module => return @fieldParentPtr(ZIRModule, "base", base).getSource(module),
+ .none => unreachable,
.gen_zir => unreachable,
.local_val => unreachable,
.local_ptr => unreachable,
@@ -483,6 +530,7 @@ pub const Scope = struct {
switch (base.tag) {
.container => return @fieldParentPtr(Container, "base", base).removeDecl(child),
.zir_module => return @fieldParentPtr(ZIRModule, "base", base).removeDecl(child),
+ .none => unreachable,
.file => unreachable,
.block => unreachable,
.gen_zir => unreachable,
@@ -505,6 +553,10 @@ pub const Scope = struct {
scope_zir_module.deinit(gpa);
gpa.destroy(scope_zir_module);
},
+ .none => {
+ const scope_none = @fieldParentPtr(None, "base", base);
+ gpa.destroy(scope_none);
+ },
.block => unreachable,
.gen_zir => unreachable,
.local_val => unreachable,
@@ -527,6 +579,8 @@ pub const Scope = struct {
zir_module,
/// .zig source code.
file,
+ /// There is no .zig or .zir source code being compiled in this Module.
+ none,
/// struct, enum or union, every .file contains one of these.
container,
block,
@@ -622,7 +676,7 @@ pub const Scope = struct {
pub fn getSource(self: *File, module: *Module) ![:0]const u8 {
switch (self.source) {
.unloaded => {
- const source = try module.root_pkg.root_src_dir.readFileAllocOptions(
+ const source = try module.root_pkg.?.root_src_dir.readFileAllocOptions(
module.gpa,
self.sub_file_path,
std.math.maxInt(u32),
@@ -638,6 +692,12 @@ pub const Scope = struct {
}
};
+ /// For when there is no top level scope because there are no .zig files being compiled.
+ pub const None = struct {
+ pub const base_tag: Tag = .none;
+ base: Scope = Scope{ .tag = base_tag },
+ };
+
pub const ZIRModule = struct {
pub const base_tag: Tag = .zir_module;
base: Scope = Scope{ .tag = base_tag },
@@ -720,7 +780,7 @@ pub const Scope = struct {
pub fn getSource(self: *ZIRModule, module: *Module) ![:0]const u8 {
switch (self.source) {
.unloaded => {
- const source = try module.root_pkg.root_src_dir.readFileAllocOptions(
+ const source = try module.root_pkg.?.root_src_dir.readFileAllocOptions(
module.gpa,
self.sub_file_path,
std.math.maxInt(u32),
@@ -855,20 +915,81 @@ pub const AllErrors = struct {
pub const InitOptions = struct {
target: std.Target,
root_name: []const u8,
- root_pkg: *Package,
+ root_pkg: ?*Package,
output_mode: std.builtin.OutputMode,
bin_file_dir: ?std.fs.Dir = null,
bin_file_path: []const u8,
+ emit_h: ?[]const u8 = null,
link_mode: ?std.builtin.LinkMode = null,
object_format: ?std.builtin.ObjectFormat = null,
optimize_mode: std.builtin.Mode = .Debug,
keep_source_files_loaded: bool = false,
+ clang_argv: []const []const u8 = &[0][]const u8{},
+ lib_dirs: []const []const u8 = &[0][]const u8{},
+ rpath_list: []const []const u8 = &[0][]const u8{},
+ c_source_files: []const []const u8 = &[0][]const u8{},
+ link_objects: []const []const u8 = &[0][]const u8{},
+ framework_dirs: []const []const u8 = &[0][]const u8{},
+ frameworks: []const []const u8 = &[0][]const u8{},
+ system_libs: []const []const u8 = &[0][]const u8{},
+ have_libc: bool = false,
+ have_libcpp: bool = false,
+ want_pic: ?bool = null,
+ want_sanitize_c: ?bool = null,
+ use_llvm: ?bool = null,
+ use_lld: ?bool = null,
+ use_clang: ?bool = null,
+ rdynamic: bool = false,
+ strip: bool = false,
+ linker_script: ?[]const u8 = null,
+ version_script: ?[]const u8 = null,
+ disable_c_depfile: bool = false,
+ override_soname: ?[]const u8 = null,
+ linker_optimization: ?[]const u8 = null,
+ linker_gc_sections: ?bool = null,
+ linker_allow_shlib_undefined: ?bool = null,
+ linker_bind_global_refs_locally: ?bool = null,
+ linker_z_nodelete: bool = false,
+ linker_z_defs: bool = false,
+ stack_size_override: u64 = 0,
+ compiler_id: [16]u8,
};
pub fn init(gpa: *Allocator, options: InitOptions) !Module {
const root_name = try gpa.dupe(u8, options.root_name);
errdefer gpa.free(root_name);
+ const ofmt = options.object_format orelse options.target.getObjectFormat();
+
+ // Make a decision on whether to use LLD or our own linker.
+ const use_lld = if (options.use_lld) |explicit| explicit else blk: {
+ if (!build_options.have_llvm)
+ break :blk false;
+
+ if (ofmt == .c)
+ break :blk false;
+
+ // Our linker can't handle objects or most advanced options yet.
+ if (options.link_objects.len != 0 or
+ options.c_source_files.len != 0 or
+ options.frameworks.len != 0 or
+ options.system_libs.len != 0 or
+ options.have_libc or options.have_libcpp or
+ options.linker_script != null or options.version_script != null)
+ {
+ break :blk true;
+ }
+ break :blk false;
+ };
+
+ // Make a decision on whether to use LLVM or our own backend.
+ const use_llvm = if (options.use_llvm) |explicit| explicit else blk: {
+ // We would want to prefer LLVM for release builds when it is available, however
+ // we don't have an LLVM backend yet :)
+ // We would also want to prefer LLVM for architectures that we don't have self-hosted support for too.
+ break :blk false;
+ };
+
const bin_file_dir = options.bin_file_dir orelse std.fs.cwd();
const bin_file = try link.File.openPath(gpa, bin_file_dir, options.bin_file_path, .{
.root_name = root_name,
@@ -876,39 +997,130 @@ pub fn init(gpa: *Allocator, options: InitOptions) !Module {
.target = options.target,
.output_mode = options.output_mode,
.link_mode = options.link_mode orelse .Static,
- .object_format = options.object_format orelse options.target.getObjectFormat(),
+ .object_format = ofmt,
.optimize_mode = options.optimize_mode,
+ .use_lld = use_lld,
+ .use_llvm = use_llvm,
+ .objects = options.link_objects,
+ .frameworks = options.frameworks,
+ .framework_dirs = options.framework_dirs,
+ .system_libs = options.system_libs,
+ .lib_dirs = options.lib_dirs,
+ .rpath_list = options.rpath_list,
+ .strip = options.strip,
});
errdefer bin_file.destroy();
const root_scope = blk: {
- if (mem.endsWith(u8, options.root_pkg.root_src_path, ".zig")) {
- const root_scope = try gpa.create(Scope.File);
- root_scope.* = .{
- .sub_file_path = options.root_pkg.root_src_path,
- .source = .{ .unloaded = {} },
- .contents = .{ .not_available = {} },
- .status = .never_loaded,
- .root_container = .{
- .file_scope = root_scope,
+ if (options.root_pkg) |root_pkg| {
+ if (mem.endsWith(u8, root_pkg.root_src_path, ".zig")) {
+ const root_scope = try gpa.create(Scope.File);
+ root_scope.* = .{
+ .sub_file_path = root_pkg.root_src_path,
+ .source = .{ .unloaded = {} },
+ .contents = .{ .not_available = {} },
+ .status = .never_loaded,
+ .root_container = .{
+ .file_scope = root_scope,
+ .decls = .{},
+ },
+ };
+ break :blk &root_scope.base;
+ } else if (mem.endsWith(u8, root_pkg.root_src_path, ".zir")) {
+ const root_scope = try gpa.create(Scope.ZIRModule);
+ root_scope.* = .{
+ .sub_file_path = root_pkg.root_src_path,
+ .source = .{ .unloaded = {} },
+ .contents = .{ .not_available = {} },
+ .status = .never_loaded,
.decls = .{},
- },
- };
- break :blk &root_scope.base;
- } else if (mem.endsWith(u8, options.root_pkg.root_src_path, ".zir")) {
- const root_scope = try gpa.create(Scope.ZIRModule);
- root_scope.* = .{
- .sub_file_path = options.root_pkg.root_src_path,
- .source = .{ .unloaded = {} },
- .contents = .{ .not_available = {} },
- .status = .never_loaded,
- .decls = .{},
- };
- break :blk &root_scope.base;
+ };
+ break :blk &root_scope.base;
+ } else {
+ unreachable;
+ }
} else {
- unreachable;
+ const root_scope = try gpa.create(Scope.None);
+ root_scope.* = .{};
+ break :blk &root_scope.base;
+ }
+ };
+
+ // We put everything into the cache hash except for the root source file, because we want to
+ // find the same binary and incrementally update it even if the file contents changed.
+ const cache_dir = if (options.root_pkg) |root_pkg| root_pkg.root_src_dir else std.fs.cwd();
+ var cache = try std.cache_hash.CacheHash.init(gpa, cache_dir, "zig-cache");
+ errdefer cache.release();
+
+ // Now we will prepare hash state initializations to avoid redundantly computing hashes.
+ // First we add common things between things that apply to zig source and all c source files.
+ cache.add(options.compiler_id);
+ cache.add(options.optimize_mode);
+ cache.add(options.target.cpu.arch);
+ cache.addBytes(options.target.cpu.model.name);
+ cache.add(options.target.cpu.features.ints);
+ cache.add(options.target.os.tag);
+ switch (options.target.os.tag) {
+ .linux => {
+ cache.add(options.target.os.version_range.linux.range.min);
+ cache.add(options.target.os.version_range.linux.range.max);
+ cache.add(options.target.os.version_range.linux.glibc);
+ },
+ .windows => {
+ cache.add(options.target.os.version_range.windows.min);
+ cache.add(options.target.os.version_range.windows.max);
+ },
+ .freebsd,
+ .macosx,
+ .ios,
+ .tvos,
+ .watchos,
+ .netbsd,
+ .openbsd,
+ .dragonfly,
+ => {
+ cache.add(options.target.os.version_range.semver.min);
+ cache.add(options.target.os.version_range.semver.max);
+ },
+ else => {},
+ }
+ cache.add(options.target.abi);
+ cache.add(ofmt);
+ // TODO PIC (see detect_pic from codegen.cpp)
+ cache.add(bin_file.options.link_mode);
+ cache.add(options.strip);
+
+ // Make a decision on whether to use Clang for translate-c and compiling C files.
+ const use_clang = if (options.use_clang) |explicit| explicit else blk: {
+ if (build_options.have_llvm) {
+ // Can't use it if we don't have it!
+ break :blk false;
}
+ // It's not planned to do our own translate-c or C compilation.
+ break :blk true;
};
+ var c_object_table = std.AutoArrayHashMapUnmanaged(*CObject, void){};
+ errdefer {
+ for (c_object_table.items()) |entry| entry.key.destroy(gpa);
+ c_object_table.deinit(gpa);
+ }
+ // Add a `CObject` for each `c_source_files`.
+ try c_object_table.ensureCapacity(gpa, options.c_source_files.len);
+ for (options.c_source_files) |c_source_file| {
+ var local_arena = std.heap.ArenaAllocator.init(gpa);
+ errdefer local_arena.deinit();
+
+ const c_object = try local_arena.allocator.create(CObject);
+ const src_path = try local_arena.allocator.dupe(u8, c_source_file);
+
+ c_object.* = .{
+ .status = .{ .new = {} },
+ .src_path = src_path,
+ .extra_flags = &[0][]const u8{},
+ .arena = local_arena.state,
+ };
+ c_object_table.putAssumeCapacityNoClobber(c_object, {});
+ }
return Module{
.gpa = gpa,
@@ -920,6 +1132,11 @@ pub fn init(gpa: *Allocator, options: InitOptions) !Module {
.bin_file = bin_file,
.work_queue = std.fifo.LinearFifo(WorkItem, .Dynamic).init(gpa),
.keep_source_files_loaded = options.keep_source_files_loaded,
+ .use_clang = use_clang,
+ .clang_argv = options.clang_argv,
+ .c_source_files = options.c_source_files,
+ .cache = cache,
+ .c_object_table = c_object_table,
};
}
@@ -935,11 +1152,21 @@ pub fn deinit(self: *Module) void {
}
self.decl_table.deinit(gpa);
+ for (self.c_object_table.items()) |entry| {
+ entry.key.destroy(gpa);
+ }
+ self.c_object_table.deinit(gpa);
+
for (self.failed_decls.items()) |entry| {
entry.value.destroy(gpa);
}
self.failed_decls.deinit(gpa);
+ for (self.failed_c_objects.items()) |entry| {
+ entry.value.destroy(gpa);
+ }
+ self.failed_c_objects.deinit(gpa);
+
for (self.failed_files.items()) |entry| {
entry.value.destroy(gpa);
}
@@ -969,6 +1196,7 @@ pub fn deinit(self: *Module) void {
gpa.free(entry.key);
}
self.global_error_set.deinit(gpa);
+ self.cache.release();
self.* = undefined;
}
@@ -995,7 +1223,15 @@ pub fn update(self: *Module) !void {
self.generation += 1;
- // TODO Use the cache hash file system to detect which source files changed.
+ // For compiling C objects, we rely on the cache hash system to avoid duplicating work.
+ // TODO Look into caching this data in memory to improve performance.
+ // Add a WorkItem for each C object.
+ try self.work_queue.ensureUnusedCapacity(self.c_object_table.items().len);
+ for (self.c_object_table.items()) |entry| {
+ self.work_queue.writeItemAssumeCapacity(.{ .c_object = entry.key });
+ }
+
+ // TODO Detect which source files changed.
// Until then we simulate a full cache miss. Source files could have been loaded for any reason;
// to force a refresh we unload now.
if (self.root_scope.cast(Scope.File)) |zig_file| {
@@ -1053,6 +1289,7 @@ pub fn makeBinFileWritable(self: *Module) !void {
pub fn totalErrorCount(self: *Module) usize {
const total = self.failed_decls.items().len +
+ self.failed_c_objects.items().len +
self.failed_files.items().len +
self.failed_exports.items().len;
return if (total == 0) @boolToInt(self.link_error_flags.no_entry_point_found) else total;
@@ -1065,6 +1302,12 @@ pub fn getAllErrorsAlloc(self: *Module) !AllErrors {
var errors = std.ArrayList(AllErrors.Message).init(self.gpa);
defer errors.deinit();
+ for (self.failed_c_objects.items()) |entry| {
+ const c_object = entry.key;
+ const err_msg = entry.value;
+ const source = c_object.status.failure;
+ try AllErrors.add(&arena, &errors, c_object.src_path, source, err_msg.*);
+ }
for (self.failed_files.items()) |entry| {
const scope = entry.key;
const err_msg = entry.value;
@@ -1085,8 +1328,14 @@ pub fn getAllErrorsAlloc(self: *Module) !AllErrors {
}
if (errors.items.len == 0 and self.link_error_flags.no_entry_point_found) {
+ const global_err_src_path = blk: {
+ if (self.root_pkg) |root_pkg| break :blk root_pkg.root_src_path;
+ if (self.c_source_files.len != 0) break :blk self.c_source_files[0];
+ if (self.bin_file.options.objects.len != 0) break :blk self.bin_file.options.objects[0];
+ break :blk "(no file)";
+ };
try errors.append(.{
- .src_path = self.root_pkg.root_src_path,
+ .src_path = global_err_src_path,
.line = 0,
.column = 0,
.byte_offset = 0,
@@ -1175,6 +1424,41 @@ pub fn performAllTheWork(self: *Module) error{OutOfMemory}!void {
decl.analysis = .codegen_failure_retryable;
};
},
+ .c_object => |c_object| {
+ // Free the previous attempt.
+ switch (c_object.status) {
+ .new => {},
+ .success => |o_file_path| {
+ self.gpa.free(o_file_path);
+ c_object.status = .{ .new = {} };
+ },
+ .failure => |source| {
+ self.failed_c_objects.removeAssertDiscard(c_object);
+ self.gpa.free(source);
+
+ c_object.status = .{ .new = {} };
+ },
+ }
+ if (!build_options.have_llvm) {
+ try self.failed_c_objects.ensureCapacity(self.gpa, self.failed_c_objects.items().len + 1);
+ self.failed_c_objects.putAssumeCapacityNoClobber(c_object, try ErrorMsg.create(
+ self.gpa,
+ 0,
+ "clang not available: compiler not built with LLVM extensions enabled",
+ .{},
+ ));
+ c_object.status = .{ .failure = "" };
+ continue;
+ }
+ try self.failed_c_objects.ensureCapacity(self.gpa, self.failed_c_objects.items().len + 1);
+ self.failed_c_objects.putAssumeCapacityNoClobber(c_object, try ErrorMsg.create(
+ self.gpa,
+ 0,
+ "TODO: implement invoking clang to compile C source files",
+ .{},
+ ));
+ c_object.status = .{ .failure = "" };
+ },
};
}
@@ -3161,6 +3445,7 @@ fn failWithOwnedErrorMsg(self: *Module, scope: *Scope, src: usize, err_msg: *Err
zir_module.status = .loaded_sema_failure;
self.failed_files.putAssumeCapacityNoClobber(scope, err_msg);
},
+ .none => unreachable,
.file => unreachable,
.container => unreachable,
}
src-self-hosted/test.zig
@@ -9,6 +9,7 @@ const enable_qemu: bool = build_options.enable_qemu;
const enable_wine: bool = build_options.enable_wine;
const enable_wasmtime: bool = build_options.enable_wasmtime;
const glibc_multi_install_dir: ?[]const u8 = build_options.glibc_multi_install_dir;
+const introspect = @import("introspect.zig");
const cheader = @embedFile("link/cbe.h");
@@ -435,7 +436,10 @@ pub const TestContext = struct {
const root_pkg = try Package.create(allocator, tmp.dir, ".", tmp_src_path);
defer root_pkg.destroy();
- const bin_name = try std.zig.binNameAlloc(arena, "test_case", target, case.output_mode, null);
+ const ofmt: ?std.builtin.ObjectFormat = if (case.cbe) .c else null;
+ const bin_name = try std.zig.binNameAlloc(arena, "test_case", target, case.output_mode, null, ofmt);
+
+ const compiler_id = try introspect.resolveCompilerId(arena);
var module = try Module.init(allocator, .{
.root_name = "test_case",
@@ -450,7 +454,8 @@ pub const TestContext = struct {
.bin_file_path = bin_name,
.root_pkg = root_pkg,
.keep_source_files_loaded = true,
- .object_format = if (case.cbe) .c else null,
+ .object_format = ofmt,
+ .compiler_id = compiler_id,
});
defer module.deinit();
@@ -693,23 +698,23 @@ pub const TestContext = struct {
}
var interpreter = spu.Interpreter(struct {
- RAM: [0x10000]u8 = undefined,
+ RAM: [0x10000]u8 = undefined,
- pub fn read8(bus: @This(), addr: u16) u8 {
- return bus.RAM[addr];
- }
- pub fn read16(bus: @This(), addr: u16) u16 {
- return std.mem.readIntLittle(u16, bus.RAM[addr..][0..2]);
- }
+ pub fn read8(bus: @This(), addr: u16) u8 {
+ return bus.RAM[addr];
+ }
+ pub fn read16(bus: @This(), addr: u16) u16 {
+ return std.mem.readIntLittle(u16, bus.RAM[addr..][0..2]);
+ }
- pub fn write8(bus: *@This(), addr: u16, val: u8) void {
- bus.RAM[addr] = val;
- }
+ pub fn write8(bus: *@This(), addr: u16, val: u8) void {
+ bus.RAM[addr] = val;
+ }
- pub fn write16(bus: *@This(), addr: u16, val: u16) void {
- std.mem.writeIntLittle(u16, bus.RAM[addr..][0..2], val);
- }
- }){
+ pub fn write16(bus: *@This(), addr: u16, val: u16) void {
+ std.mem.writeIntLittle(u16, bus.RAM[addr..][0..2], val);
+ }
+ }){
.bus = .{},
};
build.zig
@@ -139,6 +139,7 @@ pub fn build(b: *Builder) !void {
const is_wasmtime_enabled = b.option(bool, "enable-wasmtime", "Use Wasmtime to enable and run WASI libstd tests") orelse false;
const glibc_multi_dir = b.option([]const u8, "enable-foreign-glibc", "Provide directory with glibc installations to run cross compiled tests that link glibc");
+ test_stage2.addBuildOption(bool, "have_llvm", enable_llvm);
test_stage2.addBuildOption(bool, "enable_qemu", is_qemu_enabled);
test_stage2.addBuildOption(bool, "enable_wine", is_wine_enabled);
test_stage2.addBuildOption(bool, "enable_wasmtime", is_wasmtime_enabled);