Commit 619159cf48
Changed files (9)
lib
lib/std/fs/file.zig
@@ -527,6 +527,30 @@ pub const File = struct {
}
}
+ pub fn copyRange(in: File, in_offset: u64, out: File, out_offset: u64, len: usize) PWriteError!usize {
+ // TODO take advantage of copy_file_range OS APIs
+ var buf: [8 * 4096]u8 = undefined;
+ const adjusted_count = math.min(buf.len, len);
+ const amt_read = try in.pread(buf[0..adjusted_count], in_offset);
+ if (amt_read == 0) return 0;
+ return out.pwrite(buf[0..amt_read], out_offset);
+ }
+
+ /// Returns the number of bytes copied. If the number read is smaller than `buffer.len`, it
+ /// means the in file reached the end. Reaching the end of a file is not an error condition.
+ pub fn copyRangeAll(in: File, in_offset: u64, out: File, out_offset: u64, len: usize) PWriteError!usize {
+ var total_bytes_copied = 0;
+ var in_off = in_offset;
+ var out_off = out_offset;
+ while (total_bytes_copied < len) {
+ const amt_copied = try copyRange(in, in_off, out, out_off, len - total_bytes_copied);
+ if (amt_copied == 0) return total_bytes_copied;
+ total_bytes_copied += amt_copied;
+ in_off += amt_copied;
+ out_off += amt_copied;
+ }
+ }
+
pub const WriteFileOptions = struct {
in_offset: u64 = 0,
lib/std/array_list.zig
@@ -149,10 +149,15 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
/// Append the slice of items to the list. Allocates more
/// memory as necessary.
pub fn appendSlice(self: *Self, items: SliceConst) !void {
+ try self.ensureCapacity(self.items.len + items.len);
+ self.appendSliceAssumeCapacity(items);
+ }
+
+ /// Append the slice of items to the list, asserting the capacity is already
+ /// enough to store the new items.
+ pub fn appendSliceAssumeCapacity(self: *Self, items: SliceConst) void {
const oldlen = self.items.len;
const newlen = self.items.len + items.len;
-
- try self.ensureCapacity(newlen);
self.items.len = newlen;
mem.copy(T, self.items[oldlen..], items);
}
@@ -378,10 +383,16 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
/// Append the slice of items to the list. Allocates more
/// memory as necessary.
pub fn appendSlice(self: *Self, allocator: *Allocator, items: SliceConst) !void {
+ try self.ensureCapacity(allocator, self.items.len + items.len);
+ self.appendSliceAssumeCapacity(items);
+ }
+
+ /// Append the slice of items to the list, asserting the capacity is enough
+ /// to store the new items.
+ pub fn appendSliceAssumeCapacity(self: *Self, items: SliceConst) void {
const oldlen = self.items.len;
const newlen = self.items.len + items.len;
- try self.ensureCapacity(allocator, newlen);
self.items.len = newlen;
mem.copy(T, self.items[oldlen..], items);
}
lib/std/hash_map.zig
@@ -10,7 +10,7 @@ const Wyhash = std.hash.Wyhash;
const Allocator = mem.Allocator;
const builtin = @import("builtin");
-const want_modification_safety = builtin.mode != .ReleaseFast;
+const want_modification_safety = std.debug.runtime_safety;
const debug_u32 = if (want_modification_safety) u32 else void;
pub fn AutoHashMap(comptime K: type, comptime V: type) type {
@@ -219,6 +219,10 @@ pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u3
return put_result.old_kv;
}
+ pub fn putAssumeCapacityNoClobber(self: *Self, key: K, value: V) void {
+ assert(self.putAssumeCapacity(key, value) == null);
+ }
+
pub fn get(hm: *const Self, key: K) ?*KV {
if (hm.entries.len == 0) {
return null;
src-self-hosted/ir/text.zig
@@ -406,8 +406,8 @@ pub const ErrorMsg = struct {
pub const Module = struct {
decls: []*Inst,
- errors: []ErrorMsg,
arena: std.heap.ArenaAllocator.State,
+ error_msg: ?ErrorMsg = null,
pub const Body = struct {
instructions: []*Inst,
@@ -415,7 +415,6 @@ pub const Module = struct {
pub fn deinit(self: *Module, allocator: *Allocator) void {
allocator.free(self.decls);
- allocator.free(self.errors);
self.arena.promote(allocator).deinit();
self.* = undefined;
}
@@ -576,22 +575,21 @@ pub fn parse(allocator: *Allocator, source: [:0]const u8) Allocator.Error!Module
.i = 0,
.source = source,
.global_name_map = &global_name_map,
- .errors = .{},
.decls = .{},
};
errdefer parser.arena.deinit();
parser.parseRoot() catch |err| switch (err) {
error.ParseFailure => {
- assert(parser.errors.items.len != 0);
+ assert(parser.error_msg != null);
},
else => |e| return e,
};
return Module{
.decls = parser.decls.toOwnedSlice(allocator),
- .errors = parser.errors.toOwnedSlice(allocator),
.arena = parser.arena.state,
+ .error_msg = parser.error_msg,
};
}
@@ -600,9 +598,9 @@ const Parser = struct {
arena: std.heap.ArenaAllocator,
i: usize,
source: [:0]const u8,
- errors: std.ArrayListUnmanaged(ErrorMsg),
decls: std.ArrayListUnmanaged(*Inst),
global_name_map: *std.StringHashMap(usize),
+ error_msg: ?ErrorMsg = null,
const Body = struct {
instructions: std.ArrayList(*Inst),
@@ -776,10 +774,9 @@ const Parser = struct {
fn fail(self: *Parser, comptime format: []const u8, args: var) InnerError {
@setCold(true);
- const msg = try std.fmt.allocPrint(&self.arena.allocator, format, args);
- (try self.errors.addOne()).* = .{
+ self.error_msg = ErrorMsg{
.byte_offset = self.i,
- .msg = msg,
+ .msg = try std.fmt.allocPrint(&self.arena.allocator, format, args),
};
return error.ParseFailure;
}
@@ -971,7 +968,6 @@ pub fn emit_zir(allocator: *Allocator, old_module: ir.Module) !Module {
return Module{
.decls = ctx.decls.toOwnedSlice(),
.arena = ctx.arena,
- .errors = &[0]ErrorMsg{},
};
}
src-self-hosted/ir.zig
@@ -5,6 +5,7 @@ const ArrayListUnmanaged = std.ArrayListUnmanaged;
const LinkedList = std.TailQueue;
const Value = @import("value.zig").Value;
const Type = @import("type.zig").Type;
+const TypedValue = @import("TypedValue.zig");
const assert = std.debug.assert;
const BigIntConst = std.math.big.int.Const;
const BigIntMutable = std.math.big.int.Mutable;
@@ -167,11 +168,6 @@ pub const Inst = struct {
};
};
-pub const TypedValue = struct {
- ty: Type,
- val: Value,
-};
-
fn swapRemoveElem(allocator: *Allocator, comptime T: type, item: T, list: *ArrayListUnmanaged(T)) void {
var i: usize = 0;
while (i < list.items.len) {
@@ -192,46 +188,125 @@ pub const Module = struct {
root_scope: *Scope.ZIRModule,
/// Pointer to externally managed resource.
bin_file: *link.ElfFile,
- failed_decls: ArrayListUnmanaged(*Decl) = .{},
- failed_fns: ArrayListUnmanaged(*Fn) = .{},
- failed_files: ArrayListUnmanaged(*Scope.ZIRModule) = .{},
+ /// It's rare for a decl to be exported, so we save memory by having a sparse map of
+ /// Decl pointers to details about them being exported.
+ /// The Export memory is owned by the `export_owners` table; the slice itself is owned by this table.
+ decl_exports: std.AutoHashMap(*Decl, []*Export),
+ /// This models the Decls that perform exports, so that `decl_exports` can be updated when a Decl
+ /// is modified. Note that the key of this table is not the Decl being exported, but the Decl that
+ /// is performing the export of another Decl.
+ /// This table owns the Export memory.
+ export_owners: std.AutoHashMap(*Decl, []*Export),
+ /// Maps fully qualified namespaced names to the Decl struct for them.
decl_table: std.AutoHashMap(Decl.Hash, *Decl),
+
optimize_mode: std.builtin.Mode,
link_error_flags: link.ElfFile.ErrorFlags = .{},
+ /// We optimize memory usage for a compilation with no compile errors by storing the
+ /// error messages and mapping outside of `Decl`.
+ /// The ErrorMsg memory is owned by the decl, using Module's allocator.
+ failed_decls: std.AutoHashMap(*Decl, *ErrorMsg),
+ /// We optimize memory usage for a compilation with no compile errors by storing the
+ /// error messages and mapping outside of `Fn`.
+ /// The ErrorMsg memory is owned by the `Fn`, using Module's allocator.
+ failed_fns: std.AutoHashMap(*Fn, *ErrorMsg),
+ /// Using a map here for consistency with the other fields here.
+ /// The ErrorMsg memory is owned by the `Scope.ZIRModule`, using Module's allocator.
+ failed_files: std.AutoHashMap(*Scope.ZIRModule, *ErrorMsg),
+ /// Using a map here for consistency with the other fields here.
+ /// The ErrorMsg memory is owned by the `Export`, using Module's allocator.
+ failed_exports: std.AutoHashMap(*Export, *ErrorMsg),
+
+ pub const Export = struct {
+ options: std.builtin.ExportOptions,
+ /// Byte offset into the file that contains the export directive.
+ src: usize,
+ /// Represents the position of the export, if any, in the output file.
+ link: link.ElfFile.Export,
+ /// The Decl that performs the export. Note that this is *not* the Decl being exported.
+ owner_decl: *Decl,
+ status: enum { in_progress, failed, complete },
+ };
+
pub const Decl = struct {
- /// Contains the memory for `typed_value` and this `Decl` itself.
- /// If the Decl is a function, also contains that memory.
- /// If the decl has any export nodes, also contains that memory.
- /// TODO look into using a more memory efficient arena that will cost less bytes per decl.
- /// This one has a minimum allocation of 4096 bytes.
- arena: std.heap.ArenaAllocator.State,
/// This name is relative to the containing namespace of the decl. It uses a null-termination
/// to save bytes, since there can be a lot of decls in a compilation. The null byte is not allowed
/// in symbol names, because executable file formats use null-terminated strings for symbol names.
+ /// All Decls have names, even values that are not bound to a zig namespace. This is necessary for
+ /// mapping them to an address in the output file.
+ /// Memory owned by this decl, using Module's allocator.
name: [*:0]const u8,
- /// It's rare for a decl to be exported, and it's even rarer for a decl to be mapped to more
- /// than one export, so we use a linked list to save memory.
- export_node: ?*LinkedList(std.builtin.ExportOptions).Node = null,
+ /// The direct parent container of the Decl. This field will need to get more fleshed out when
+ /// self-hosted supports proper struct types and Zig AST => ZIR.
+ /// Reference to externally owned memory.
+ scope: *Scope.ZIRModule,
/// Byte offset into the source file that contains this declaration.
/// This is the base offset that src offsets within this Decl are relative to.
src: usize,
+ /// The most recent value of the Decl after a successful semantic analysis.
+ /// The tag for this union is determined by the tag value of the analysis field.
+ typed_value: union {
+ never_succeeded,
+ most_recent: TypedValue.Managed,
+ },
/// Represents the "shallow" analysis status. For example, for decls that are functions,
/// the function type is analyzed with this set to `in_progress`, however, the semantic
/// analysis of the function body is performed with this value set to `success`. Functions
/// have their own analysis status field.
- analysis: union(enum) {
- in_progress,
- failure: ErrorMsg,
- success: TypedValue,
+ analysis: enum {
+ initial_in_progress,
+ /// This Decl might be OK but it depends on another one which did not successfully complete
+ /// semantic analysis. This Decl never had a value computed.
+ initial_dependency_failure,
+ /// Semantic analysis failure. This Decl never had a value computed.
+ /// There will be a corresponding ErrorMsg in Module.failed_decls.
+ initial_sema_failure,
+ /// In this case the `typed_value.most_recent` can still be accessed.
+ /// There will be a corresponding ErrorMsg in Module.failed_decls.
+ codegen_failure,
+ /// This Decl might be OK but it depends on another one which did not successfully complete
+ /// semantic analysis. There is a most recent value available.
+ repeat_dependency_failure,
+ /// Semantic anlaysis failure, but the `typed_value.most_recent` can be accessed.
+ /// There will be a corresponding ErrorMsg in Module.failed_decls.
+ repeat_sema_failure,
+ /// Completed successfully before; the `typed_value.most_recent` can be accessed, and
+ /// new semantic analysis is in progress.
+ repeat_in_progress,
+ /// Everything is done and updated.
+ complete,
},
- /// The direct container of the Decl. This field will need to get more fleshed out when
- /// self-hosted supports proper struct types and Zig AST => ZIR.
- scope: *Scope.ZIRModule,
+
+ /// Represents the position of the code, if any, in the output file.
+ /// This is populated regardless of semantic analysis and code generation.
+ /// This value is `undefined` if the type has no runtime bits.
+ link: link.ElfFile.Decl,
+
+ /// The set of other decls whose typed_value could possibly change if this Decl's
+ /// typed_value is modified.
+ /// TODO look into using a lightweight map/set data structure rather than a linear array.
+ dependants: ArrayListUnmanaged(*Decl) = .{},
+
+ pub fn typedValue(self: Decl) ?TypedValue {
+ switch (self.analysis) {
+ .initial_in_progress,
+ .initial_dependency_failure,
+ .initial_sema_failure,
+ => return null,
+ .codegen_failure,
+ .repeat_dependency_failure,
+ .repeat_sema_failure,
+ .repeat_in_progress,
+ .complete,
+ => return self.typed_value.most_recent,
+ }
+ }
pub fn destroy(self: *Decl, allocator: *Allocator) void {
- var arena = self.arena.promote(allocator);
- arena.deinit();
+ allocator.free(mem.spanZ(u8, self.name));
+ if (self.typedValue()) |tv| tv.deinit(allocator);
+ allocator.destroy(self);
}
pub const Hash = [16]u8;
@@ -252,8 +327,10 @@ pub const Module = struct {
pub const Fn = struct {
fn_type: Type,
analysis: union(enum) {
+ queued,
in_progress: *Analysis,
- failure: ErrorMsg,
+ /// There will be a corresponding ErrorMsg in Module.failed_fns
+ failure,
success: Body,
},
/// The direct container of the Fn. This field will need to get more fleshed out when
@@ -290,68 +367,36 @@ pub const Module = struct {
/// Relative to the owning package's root_src_dir.
/// Reference to external memory, not owned by ZIRModule.
sub_file_path: []const u8,
- contents: union(enum) {
+ source: union {
unloaded,
- parse_failure: ParseFailure,
- success: Contents,
+ bytes: [:0]const u8,
},
- pub const ParseFailure = struct {
- source: [:0]const u8,
- errors: []ErrorMsg,
-
- pub fn deinit(self: *ParseFailure, allocator: *Allocator) void {
- allocator.free(self.errors);
- allocator.free(source);
- }
- };
- pub const Contents = struct {
- source: [:0]const u8,
+ contents: union {
+ not_available,
module: *text.Module,
- };
+ },
+ status: enum {
+ unloaded,
+ unloaded_parse_failure,
+ loaded_parse_failure,
+ loaded_success,
+ },
pub fn deinit(self: *ZIRModule, allocator: *Allocator) void {
- switch (self.contents) {
- .unloaded => {},
- .parse_failure => |pf| pd.deinit(allocator),
- .success => |contents| {
+ switch (self.status) {
+ .unloaded,
+ .unloaded_parse_failure,
+ => {},
+ .loaded_success => {
+ allocator.free(contents.source);
+ self.contents.module.deinit(allocator);
+ },
+ .loaded_parse_failure => {
allocator.free(contents.source);
- contents.src_zir_module.deinit(allocator);
},
}
self.* = undefined;
}
-
- pub fn loadContents(self: *ZIRModule, allocator: *Allocator) !*Contents {
- if (self.contents) |contents| return contents;
-
- const max_size = std.math.maxInt(u32);
- const source = try self.root_pkg_dir.readFileAllocOptions(allocator, self.root_src_path, max_size, 1, 0);
- errdefer allocator.free(source);
-
- var errors = std.ArrayList(ErrorMsg).init(allocator);
- defer errors.deinit();
-
- var src_zir_module = try text.parse(allocator, source, &errors);
- errdefer src_zir_module.deinit(allocator);
-
- switch (self.contents) {
- .parse_failure => |pf| pf.deinit(allocator),
- .unloaded => {},
- .success => unreachable,
- }
-
- if (errors.items.len != 0) {
- self.contents = .{ .parse_failure = errors.toOwnedSlice() };
- return error.ParseFailure;
- }
- self.contents = .{
- .success = .{
- .source = source,
- .module = src_zir_module,
- },
- };
- return &self.contents.success;
- }
};
/// This is a temporary structure, references to it are valid only
@@ -436,7 +481,7 @@ pub const Module = struct {
// Analyze the root source file now.
self.analyzeRoot(self.root_scope) catch |err| switch (err) {
error.AnalysisFail => {
- assert(self.totalErrorCount() != 0);
+ assert(self.failed_files.size != 0);
},
else => |e| return e,
};
@@ -446,9 +491,10 @@ pub const Module = struct {
}
pub fn totalErrorCount(self: *Module) usize {
- return self.failed_decls.items.len +
- self.failed_fns.items.len +
- self.failed_decls.items.len +
+ return self.failed_decls.size +
+ self.failed_fns.size +
+ self.failed_decls.size +
+ self.failed_exports.size +
@boolToInt(self.link_error_flags.no_entry_point_found);
}
@@ -459,26 +505,42 @@ pub const Module = struct {
var errors = std.ArrayList(AllErrors.Message).init(self.allocator);
defer errors.deinit();
- for (self.failed_files.items) |scope| {
- const source = scope.parse_failure.source;
- for (scope.parse_failure.errors) |parse_error| {
- AllErrors.add(&arena, &errors, scope.sub_file_path, source, parse_error);
+ {
+ var it = self.failed_files.iterator();
+ while (it.next()) |kv| {
+ const scope = kv.key;
+ const err_msg = kv.value;
+ const source = scope.parse_failure.source;
+ AllErrors.add(&arena, &errors, scope.sub_file_path, source, err_msg);
}
}
-
- for (self.failed_fns.items) |func| {
- const source = func.scope.success.source;
- for (func.analysis.failure) |err_msg| {
+ {
+ var it = self.failed_fns.iterator();
+ while (it.next()) |kv| {
+ const func = kv.key;
+ const err_msg = kv.value;
+ const source = func.scope.success.source;
AllErrors.add(&arena, &errors, func.scope.sub_file_path, source, err_msg);
}
}
-
- for (self.failed_decls.items) |decl| {
- const source = decl.scope.success.source;
- for (decl.analysis.failure) |err_msg| {
+ {
+ var it = self.failed_decls.iterator();
+ while (it.next()) |kv| {
+ const decl = kv.key;
+ const err_msg = kv.value;
+ const source = decl.scope.success.source;
AllErrors.add(&arena, &errors, decl.scope.sub_file_path, source, err_msg);
}
}
+ {
+ var it = self.failed_exports.iterator();
+ while (it.next()) |kv| {
+ const decl = kv.key.owner_decl;
+ const err_msg = kv.value;
+ const source = decl.scope.success.source;
+ try AllErrors.add(&arena, &errors, decl.scope.sub_file_path, source, err_msg);
+ }
+ }
if (self.link_error_flags.no_entry_point_found) {
try errors.append(.{
@@ -508,23 +570,81 @@ pub const Module = struct {
// Here we simulate adding a source file which was previously not part of the compilation,
// which means scanning the decls looking for exports.
// TODO also identify decls that need to be deleted.
- const contents = blk: {
- // Clear parse errors.
- swapRemoveElem(self.allocator, *Scope.ZIRModule, root_scope, self.failed_files);
- try self.failed_files.ensureCapacity(self.allocator, self.failed_files.items.len + 1);
- break :blk root_scope.loadContents(self.allocator) catch |err| switch (err) {
- error.ParseFailure => {
- self.failed_files.appendAssumeCapacity(root_scope);
+ const src_module = switch (root_scope.status) {
+ .unloaded => blk: {
+ try self.failed_files.ensureCapacity(self.failed_files.size + 1);
+
+ var keep_source = false;
+ const source = try self.root_pkg_dir.readFileAllocOptions(
+ self.allocator,
+ self.root_src_path,
+ std.math.maxInt(u32),
+ 1,
+ 0,
+ );
+ defer if (!keep_source) self.allocator.free(source);
+
+ var keep_zir_module = false;
+ const zir_module = try self.allocator.create(text.Module);
+ defer if (!keep_zir_module) self.allocator.destroy(zir_module);
+
+ zir_module.* = try text.parse(self.allocator, source);
+ defer if (!keep_zir_module) zir_module.deinit(self.allocator);
+
+ if (zir_module.error_msg) |src_err_msg| {
+ self.failed_files.putAssumeCapacityNoClobber(
+ root_scope,
+ try ErrorMsg.create(self.allocator, src_err_msg.byte_offset, "{}", .{src_err_msg.msg}),
+ );
+ root_scope.status = .loaded_parse_failure;
+ root_scope.source = .{ .bytes = source };
+ keep_source = true;
return error.AnalysisFail;
- },
- else => |e| return e,
- };
+ }
+
+ root_scope.status = .loaded_success;
+ root_scope.source = .{ .bytes = source };
+ keep_source = true;
+ root_scope.contents = .{ .module = zir_module };
+ keep_zir_module = true;
+
+ break :blk zir_module;
+ },
+
+ .unloaded_parse_failure, .loaded_parse_failure => return error.AnalysisFail,
+ .loaded_success => root_scope.contents.module,
};
+
+ // Here we ensure enough queue capacity to store all the decls, so that later we can use
+ // appendAssumeCapacity.
+ try self.analysis_queue.ensureCapacity(self.analysis_queue.items.len + contents.module.decls.len);
+
for (contents.module.decls) |decl| {
if (decl.cast(text.Inst.Export)) |export_inst| {
try analyzeExport(self, &root_scope.base, export_inst);
}
}
+
+ while (self.analysis_queue.popOrNull()) |work_item| {
+ switch (work_item) {
+ .decl => |decl| switch (decl.analysis) {
+ .success => |typed_value| {
+ var arena = decl.arena.promote(self.allocator);
+ const update_result = self.bin_file.updateDecl(
+ self.*,
+ typed_value,
+ decl.export_node,
+ decl.fullyQualifiedNameHash(),
+ &arena.allocator,
+ );
+ decl.arena = arena.state;
+ if (try update_result) |err_msg| {
+ decl.analysis = .{ .codegen_failure = err_msg };
+ }
+ },
+ },
+ }
+ }
}
fn resolveDecl(self: *Module, scope: *Scope, old_inst: *text.Inst) InnerError!*Decl {
@@ -548,21 +668,41 @@ pub const Module = struct {
break :blk new_decl;
};
- var decl_scope: Scope.DeclAnalysis = .{ .decl = new_decl };
+ swapRemoveElem(self.allocator, *Scope.ZIRModule, root_scope, self.failed_decls);
+ var decl_scope: Scope.DeclAnalysis = .{
+ .base = .{ .parent = scope },
+ .decl = new_decl,
+ };
const typed_value = self.analyzeInstConst(&decl_scope.base, old_inst) catch |err| switch (err) {
- error.AnalysisFail => return error.AnalysisFail,
+ error.AnalysisFail => {
+ assert(new_decl.analysis == .failure);
+ return error.AnalysisFail;
+ },
else => |e| return e,
};
new_decl.analysis = .{ .success = typed_value };
- if (try self.bin_file.updateDecl(self.*, typed_value, new_decl.export_node, hash)) |err_msg| {
- new_decl.analysis = .{ .success = typed_value };
- } else |err| {
- return err;
- }
+ // We ensureCapacity when scanning for decls.
+ self.analysis_queue.appendAssumeCapacity(.{ .decl = new_decl });
return new_decl;
}
}
+ fn resolveCompleteDecl(self: *Module, scope: *Scope, old_inst: *text.Inst) InnerError!*Decl {
+ const decl = try self.resolveDecl(scope, old_inst);
+ switch (decl.analysis) {
+ .initial_in_progress => unreachable,
+ .repeat_in_progress => unreachable,
+ .initial_dependency_failure,
+ .repeat_dependency_failure,
+ .initial_sema_failure,
+ .repeat_sema_failure,
+ .codegen_failure,
+ => return error.AnalysisFail,
+
+ .complete => return decl,
+ }
+ }
+
fn resolveInst(self: *Module, scope: *Scope, old_inst: *text.Inst) InnerError!*Inst {
if (scope.cast(Scope.Block)) |block| {
if (block.func.inst_table.get(old_inst)) |kv| {
@@ -570,7 +710,7 @@ pub const Module = struct {
}
}
- const decl = try self.resolveDecl(scope, old_inst);
+ const decl = try self.resolveCompleteDecl(scope, old_inst);
const decl_ref = try self.analyzeDeclRef(scope, old_inst.src, decl);
return self.analyzeDeref(scope, old_inst.src, decl_ref);
}
@@ -621,29 +761,52 @@ pub const Module = struct {
}
fn analyzeExport(self: *Module, scope: *Scope, export_inst: *text.Inst.Export) !void {
+ try self.decl_exports.ensureCapacity(self.decl_exports.size + 1);
+ try self.export_owners.ensureCapacity(self.export_owners.size + 1);
const symbol_name = try self.resolveConstString(scope, export_inst.positionals.symbol_name);
- const decl = try self.resolveDecl(scope, export_inst.positionals.value);
+ const exported_decl = try self.resolveCompleteDecl(scope, export_inst.positionals.value);
+ const typed_value = exported_decl.typed_value.most_recent.typed_value;
+ switch (typed_value.ty.zigTypeTag()) {
+ .Fn => {},
+ else => return self.fail(
+ scope,
+ export_inst.positionals.value.src,
+ "unable to export type '{}'",
+ .{typed_value.ty},
+ ),
+ }
+ const new_export = try self.allocator.create(Export);
+ errdefer self.allocator.destroy(new_export);
- switch (decl.analysis) {
- .in_progress => unreachable,
- .failure => return error.AnalysisFail,
- .success => |typed_value| switch (typed_value.ty.zigTypeTag()) {
- .Fn => {},
- else => return self.fail(
- scope,
- export_inst.positionals.value.src,
- "unable to export type '{}'",
- .{typed_value.ty},
- ),
- },
+ const owner_decl = scope.getDecl();
+
+ new_export.* = .{
+ .options = .{ .data = .{ .name = symbol_name } },
+ .src = export_inst.base.src,
+ .link = .{},
+ .owner_decl = owner_decl,
+ .status = .in_progress,
+ };
+
+ // Add to export_owners table.
+ const eo_gop = self.export_owners.getOrPut(owner_decl) catch unreachable;
+ if (!eo_gop.found_existing) {
+ eo_gop.kv.value = &[0]*Export{};
+ }
+ eo_gop.kv.value = try self.allocator.realloc(eo_gop.kv.value, eo_gop.kv.value.len + 1);
+ eo_gop.kv.value[eo_gop.kv.value.len - 1] = new_export;
+ errdefer eo_gop.kv.value = self.allocator.shrink(eo_gop.kv.value, eo_gop.kv.value.len - 1);
+
+ // Add to exported_decl table.
+ const de_gop = self.decl_exports.getOrPut(exported_decl) catch unreachable;
+ if (!de_gop.found_existing) {
+ de_gop.kv.value = &[0]*Export{};
}
- const Node = LinkedList(std.builtin.ExportOptions).Node;
- export_node = try decl.arena.promote(self.allocator).allocator.create(Node);
- export_node.* = .{ .data = .{ .name = symbol_name } };
- decl.export_node = export_node;
+ de_gop.kv.value = try self.allocator.realloc(de_gop.kv.value, de_gop.kv.value.len + 1);
+ de_gop.kv.value[de_gop.kv.value.len - 1] = new_export;
+ errdefer de_gop.kv.value = self.allocator.shrink(de_gop.kv.value, de_gop.kv.value.len - 1);
- // TODO Avoid double update in the case of exporting a decl that we just created.
- self.bin_file.updateDeclExports();
+ try self.bin_file.updateDeclExports(self, decl, de_gop.kv.value);
}
/// TODO should not need the cast on the last parameter at the callsites
@@ -1636,6 +1799,31 @@ pub const Module = struct {
pub const ErrorMsg = struct {
byte_offset: usize,
msg: []const u8,
+
+ pub fn create(allocator: *Allocator, byte_offset: usize, comptime format: []const u8, args: var) !*ErrorMsg {
+ const self = try allocator.create(ErrorMsg);
+ errdefer allocator.destroy(ErrorMsg);
+ self.* = init(allocator, byte_offset, format, args);
+ return self;
+ }
+
+ /// Assumes the ErrorMsg struct and msg were both allocated with allocator.
+ pub fn destroy(self: *ErrorMsg, allocator: *Allocator) void {
+ self.deinit(allocator);
+ allocator.destroy(self);
+ }
+
+ pub fn init(allocator: *Allocator, byte_offset: usize, comptime format: []const u8, args: var) !ErrorMsg {
+ return ErrorMsg{
+ .byte_offset = byte_offset,
+ .msg = try std.fmt.allocPrint(allocator, format, args),
+ };
+ }
+
+ pub fn deinit(self: *ErrorMsg, allocator: *Allocator) void {
+ allocator.free(err_msg.msg);
+ self.* = undefined;
+ }
};
pub fn main() anyerror!void {
src-self-hosted/link.zig
@@ -130,6 +130,20 @@ pub const ElfFile = struct {
no_entry_point_found: bool = false,
};
+ /// TODO it's too bad this optional takes up double the memory it should
+ pub const Decl = struct {
+ /// Each decl always gets a local symbol with the fully qualified name.
+ /// The vaddr and size are found here directly.
+ /// The file offset is found by computing the vaddr offset from the section vaddr
+ /// the symbol references, and adding that to the file offset of the section.
+ local_sym_index: ?usize = null,
+ };
+
+ /// TODO it's too bad this optional takes up double the memory it should
+ pub const Export = struct {
+ sym_index: ?usize = null,
+ };
+
pub fn deinit(self: *ElfFile) void {
self.sections.deinit(self.allocator);
self.program_headers.deinit(self.allocator);
@@ -138,7 +152,7 @@ pub const ElfFile = struct {
self.offset_table.deinit(self.allocator);
}
- // `expand_num / expand_den` is the factor of padding when allocation
+ // `alloc_num / alloc_den` is the factor of padding when allocation
const alloc_num = 4;
const alloc_den = 3;
@@ -216,12 +230,21 @@ pub const ElfFile = struct {
}
fn makeString(self: *ElfFile, bytes: []const u8) !u32 {
+ try self.shstrtab.ensureCapacity(self.allocator, self.shstrtab.items.len + bytes.len + 1);
const result = self.shstrtab.items.len;
- try self.shstrtab.appendSlice(bytes);
- try self.shstrtab.append(0);
+ self.shstrtab.appendSliceAssumeCapacity(bytes);
+ self.shstrtab.appendAssumeCapacity(0);
return @intCast(u32, result);
}
+ fn updateString(self: *ElfFile, old_str_off: u32, new_name: []const u8) !u32 {
+ const existing_name = self.getString(old_str_off);
+ if (mem.eql(u8, existing_name, new_name)) {
+ return old_str_off;
+ }
+ return self.makeString(new_name);
+ }
+
pub fn populateMissingMetadata(self: *ElfFile) !void {
const small_ptr = switch (self.ptr_width) {
.p32 => true,
@@ -575,166 +598,200 @@ pub const ElfFile = struct {
try self.file.pwriteAll(hdr_buf[0..index], 0);
}
- /// TODO Look into making this smaller to save memory.
- /// Lots of redundant info here with the data stored in symbol structs.
- const DeclSymbol = struct {
- symbol_indexes: []usize,
- vaddr: u64,
- file_offset: u64,
- size: u64,
- };
-
const AllocatedBlock = struct {
vaddr: u64,
file_offset: u64,
size_capacity: u64,
};
- fn allocateDeclSymbol(self: *ElfFile, size: u64) AllocatedBlock {
+ fn allocateTextBlock(self: *ElfFile, new_block_size: u64) !AllocatedBlock {
const phdr = &self.program_headers.items[self.phdr_load_re_index.?];
- todo();
- //{
- // // Now that we know the code size, we need to update the program header for executable code
- // phdr.p_memsz = vaddr - phdr.p_vaddr;
- // phdr.p_filesz = phdr.p_memsz;
-
- // const shdr = &self.sections.items[self.text_section_index.?];
- // shdr.sh_size = phdr.p_filesz;
+ const shdr = &self.sections.items[self.text_section_index.?];
+
+ const text_capacity = self.allocatedSize(shdr.sh_offset);
+ // TODO instead of looping here, maintain a free list and a pointer to the end.
+ const end_vaddr = blk: {
+ var start: u64 = 0;
+ var size: u64 = 0;
+ for (self.symbols.items) |sym| {
+ if (sym.st_value > start) {
+ start = sm.st_value;
+ size = sym.st_size;
+ }
+ }
+ break :blk start + (size * alloc_num / alloc_den);
+ };
- // self.phdr_table_dirty = true; // TODO look into making only the one program header dirty
- // self.shdr_table_dirty = true; // TODO look into making only the one section dirty
- //}
+ const text_size = end_vaddr - phdr.p_vaddr;
+ const needed_size = text_size + new_block_size;
+ if (needed_size > text_capacity) {
+ // Must move the entire text section.
+ const new_offset = self.findFreeSpace(needed_size, 0x1000);
+ const amt = try self.file.copyRangeAll(shdr.sh_offset, self.file, new_offset, text_size);
+ if (amt != text_size) return error.InputOutput;
+ shdr.sh_offset = new_offset;
+ }
+ // Now that we know the code size, we need to update the program header for executable code
+ shdr.sh_size = needed_size;
+ phdr.p_memsz = needed_size;
+ phdr.p_filesz = needed_size;
- //return self.writeSymbols();
+ self.phdr_table_dirty = true; // TODO look into making only the one program header dirty
+ self.shdr_table_dirty = true; // TODO look into making only the one section dirty
}
- fn findAllocatedBlock(self: *ElfFile, vaddr: u64) AllocatedBlock {
- todo();
+ fn findAllocatedTextBlock(self: *ElfFile, sym: elf.Elf64_Sym) AllocatedBlock {
+ const phdr = &self.program_headers.items[self.phdr_load_re_index.?];
+ const shdr = &self.sections.items[self.text_section_index.?];
+
+ // Find the next sym after this one.
+ // TODO look into using a hash map to speed up perf.
+ const text_capacity = self.allocatedSize(shdr.sh_offset);
+ var next_vaddr_start = phdr.p_vaddr + text_capacity;
+ for (self.symbols.items) |elem| {
+ if (elem.st_value < sym.st_value) continue;
+ if (elem.st_value < next_vaddr_start) next_vaddr_start = elem.st_value;
+ }
+ return .{
+ .vaddr = sym.st_value,
+ .file_offset = shdr.sh_offset + (sym.st_value - phdr.p_vaddr),
+ .size_capacity = next_vaddr_start - sym.st_value,
+ };
}
- pub fn updateDecl(
- self: *ElfFile,
- module: ir.Module,
- typed_value: ir.TypedValue,
- decl_export_node: ?*std.LinkedList(std.builtin.ExportOptions).Node,
- hash: ir.Module.Decl.Hash,
- err_msg_allocator: *Allocator,
- ) !?ir.ErrorMsg {
+ pub fn updateDecl(self: *ElfFile, module: *ir.Module, decl: *ir.Module.Decl) !void {
var code = std.ArrayList(u8).init(self.allocator);
defer code.deinit();
- const err_msg = try codegen.generateSymbol(typed_value, module, &code, err_msg_allocator);
- if (err_msg != null) |em| return em;
+ const typed_value = decl.typed_value.most_recent.typed_value;
+ const err_msg = try codegen.generateSymbol(typed_value, module, &code, module.allocator);
+ if (err_msg != null) |em| {
+ decl.analysis = .codegen_failure;
+ _ = try module.failed_decls.put(decl, em);
+ return;
+ }
- const export_count = blk: {
- var export_node = decl_export_node;
- var i: usize = 0;
- while (export_node) |node| : (export_node = node.next) i += 1;
- break :blk i;
- };
+ const file_offset = blk: {
+ const code_size = code.items.len;
+ const stt_bits: u8 = switch (typed_value.ty.zigTypeTag()) {
+ .Fn => elf.STT_FUNC,
+ else => elf.STT_OBJECT,
+ };
- // Find or create a symbol from the decl
- var valid_sym_index_len: usize = 0;
- const decl_symbol = blk: {
- if (self.decl_table.getValue(hash)) |decl_symbol| {
- valid_sym_index_len = decl_symbol.symbol_indexes.len;
- decl_symbol.symbol_indexes = try self.allocator.realloc(usize, export_count);
-
- const existing_block = self.findAllocatedBlock(decl_symbol.vaddr);
- if (code.items.len > existing_block.size_capacity) {
- const new_block = self.allocateDeclSymbol(code.items.len);
- decl_symbol.vaddr = new_block.vaddr;
- decl_symbol.file_offset = new_block.file_offset;
- decl_symbol.size = code.items.len;
- }
- break :blk decl_symbol;
+ if (decl.link.local_sym_index) |local_sym_index| {
+ const local_sym = &self.symbols.items[local_sym_index];
+ const existing_block = self.findAllocatedTextBlock(local_sym);
+ const file_offset = if (code_size > existing_block.size_capacity) fo: {
+ const new_block = self.allocateTextBlock(code_size);
+ local_sym.st_value = new_block.vaddr;
+ local_sym.st_size = code_size;
+ break :fo new_block.file_offset;
+ } else existing_block.file_offset;
+ local_sym.st_name = try self.updateString(local_sym.st_name, mem.spanZ(u8, decl.name));
+ local_sym.st_info = (elf.STB_LOCAL << 4) | stt_bits;
+ // TODO this write could be avoided if no fields of the symbol were changed.
+ try self.writeSymbol(local_sym_index);
+ break :blk file_offset;
} else {
- const new_block = self.allocateDeclSymbol(code.items.len);
-
- const decl_symbol = try self.allocator.create(DeclSymbol);
- errdefer self.allocator.destroy(decl_symbol);
-
- decl_symbol.* = .{
- .symbol_indexes = try self.allocator.alloc(usize, export_count),
- .vaddr = new_block.vaddr,
- .file_offset = new_block.file_offset,
- .size = code.items.len,
- };
- errdefer self.allocator.free(decl_symbol.symbol_indexes);
-
- try self.decl_table.put(hash, decl_symbol);
- break :blk decl_symbol;
+ try self.symbols.ensureCapacity(self.symbols.items.len + 1);
+ const decl_name = mem.spanZ(u8, decl.name);
+ const name_str_index = try self.makeString(decl_name);
+ const new_block = self.allocateTextBlock(code_size);
+ const local_sym_index = self.symbols.items.len;
+
+ self.symbols.appendAssumeCapacity(self.allocator, .{
+ .st_name = name_str_index,
+ .st_info = (elf.STB_LOCAL << 4) | stt_bits,
+ .st_other = 0,
+ .st_shndx = self.text_section_index.?,
+ .st_value = new_block.vaddr,
+ .st_size = code_size,
+ });
+ errdefer self.symbols.shrink(self.symbols.items.len - 1);
+ try self.writeSymbol(local_sym_index);
+
+ self.symbol_count_dirty = true;
+ decl.link.local_sym_index = local_sym_index;
+
+ break :blk new_block.file_offset;
}
};
- // Allocate new symbols.
- {
- var i: usize = valid_sym_index_len;
- const old_len = self.symbols.items.len;
- try self.symbols.resize(old_len + (decl_symbol.symbol_indexes.len - i));
- while (i < decl_symbol.symbol_indexes) : (i += 1) {
- decl_symbol.symbol_indexes[i] = old_len + i;
- }
- }
+ try self.file.pwriteAll(code.items, file_offset);
- var export_node = decl_export_node;
- var export_index: usize = 0;
- while (export_node) |node| : ({
- export_node = node.next;
- export_index += 1;
- }) {
- if (node.data.section) |section_name| {
+ // Since we updated the vaddr and the size, each corresponding export symbol also needs to be updated.
+ const decl_exports = module.decl_exports.get(decl) orelse &[0]*ir.Module.Export{};
+ return self.updateDeclExports(module, decl, decl_exports);
+ }
+
+ /// Must be called only after a successful call to `updateDecl`.
+ pub fn updateDeclExports(
+ self: *ElfFile,
+ module: *ir.Module,
+ decl: *const ir.Module.Decl,
+ exports: []const *const Export,
+ ) !void {
+ try self.symbols.ensureCapacity(self.symbols.items.len + exports.len);
+ const typed_value = decl.typed_value.most_recent.typed_value;
+ const decl_sym = self.symbols.items[decl.link.local_sym_index.?];
+
+ for (exports) |exp| {
+ if (exp.options.section) |section_name| {
if (!mem.eql(u8, section_name, ".text")) {
- try errors.ensureCapacity(errors.items.len + 1);
- errors.appendAssumeCapacity(.{
- .byte_offset = 0,
- .msg = try std.fmt.allocPrint(errors.allocator, "Unimplemented: ExportOptions.section", .{}),
- });
+ try module.failed_exports.ensureCapacity(module.failed_exports.size + 1);
+ module.failed_exports.putAssumeCapacityNoClobber(
+ exp,
+ try ir.ErrorMsg.create(0, "Unimplemented: ExportOptions.section", .{}),
+ );
}
}
- const stb_bits = switch (node.data.linkage) {
+ const stb_bits = switch (exp.options.linkage) {
.Internal => elf.STB_LOCAL,
.Strong => blk: {
- if (mem.eql(u8, node.data.name, "_start")) {
+ if (mem.eql(u8, exp.options.name, "_start")) {
self.entry_addr = decl_symbol.vaddr;
}
break :blk elf.STB_GLOBAL;
},
.Weak => elf.STB_WEAK,
.LinkOnce => {
- try errors.ensureCapacity(errors.items.len + 1);
- errors.appendAssumeCapacity(.{
- .byte_offset = 0,
- .msg = try std.fmt.allocPrint(errors.allocator, "Unimplemented: GlobalLinkage.LinkOnce", .{}),
- });
+ try module.failed_exports.ensureCapacity(module.failed_exports.size + 1);
+ module.failed_exports.putAssumeCapacityNoClobber(
+ exp,
+ try ir.ErrorMsg.create(0, "Unimplemented: GlobalLinkage.LinkOnce", .{}),
+ );
},
};
- const stt_bits = switch (typed_value.ty.zigTypeTag()) {
- .Fn => elf.STT_FUNC,
- else => elf.STT_OBJECT,
- };
- const sym_index = decl_symbol.symbol_indexes[export_index];
- const name = blk: {
- if (i < valid_sym_index_len) {
- const name_stroff = self.symbols.items[sym_index].st_name;
- const existing_name = self.getString(name_stroff);
- if (mem.eql(u8, existing_name, node.data.name)) {
- break :blk name_stroff;
- }
- }
- break :blk try self.makeString(node.data.name);
- };
- self.symbols.items[sym_index] = .{
- .st_name = name,
- .st_info = (stb_bits << 4) | stt_bits,
- .st_other = 0,
- .st_shndx = self.text_section_index.?,
- .st_value = decl_symbol.vaddr,
- .st_size = code.items.len,
- };
+ const stt_bits: u8 = @truncate(u4, decl_sym.st_info);
+ if (exp.link.sym_index) |i| {
+ const sym = &self.symbols.items[i];
+ sym.* = .{
+ .st_name = try self.updateString(sym.st_name, exp.options.name),
+ .st_info = (stb_bits << 4) | stt_bits,
+ .st_other = 0,
+ .st_shndx = self.text_section_index.?,
+ .st_value = decl_sym.st_value,
+ .st_size = decl_sym.st_size,
+ };
+ try self.writeSymbol(i);
+ } else {
+ const name = try self.makeString(exp.options.name);
+ const i = self.symbols.items.len;
+ self.symbols.appendAssumeCapacity(self.allocator, .{
+ .st_name = sn.name,
+ .st_info = (stb_bits << 4) | stt_bits,
+ .st_other = 0,
+ .st_shndx = self.text_section_index.?,
+ .st_value = decl_sym.st_value,
+ .st_size = decl_sym.st_size,
+ });
+ errdefer self.symbols.shrink(self.symbols.items.len - 1);
+ try self.writeSymbol(i);
+
+ self.symbol_count_dirty = true;
+ exp.link.sym_index = i;
+ }
}
-
- try self.file.pwriteAll(code.items, decl_symbol.file_offset);
}
fn writeProgHeader(self: *ElfFile, index: usize) !void {
@@ -782,7 +839,48 @@ pub const ElfFile = struct {
}
}
- fn writeSymbols(self: *ElfFile) !void {
+ fn writeSymbol(self: *ElfFile, index: usize) !void {
+ const syms_sect = &self.sections.items[self.symtab_section_index.?];
+ // Make sure we are not pointlessly writing symbol data that will have to get relocated
+ // due to running out of space.
+ if (self.symbol_count_dirty) {
+ const allocated_size = self.allocatedSize(syms_sect.sh_offset);
+ const needed_size = self.symbols.items.len * sym_size;
+ if (needed_size > allocated_size) {
+ return self.writeAllSymbols();
+ }
+ }
+ const foreign_endian = self.options.target.cpu.arch.endian() != std.Target.current.cpu.arch.endian();
+ switch (self.ptr_width) {
+ .p32 => {
+ var sym = [1]elf.Elf32_Sym{
+ .{
+ .st_name = self.symbols.items[index].st_name,
+ .st_value = @intCast(u32, self.symbols.items[index].st_value),
+ .st_size = @intCast(u32, self.symbols.items[index].st_size),
+ .st_info = self.symbols.items[index].st_info,
+ .st_other = self.symbols.items[index].st_other,
+ .st_shndx = self.symbols.items[index].st_shndx,
+ },
+ };
+ if (foreign_endian) {
+ bswapAllFields(elf.Elf32_Sym, &sym[0]);
+ }
+ const off = syms_sect.sh_offset + @sizeOf(elf.Elf32_Sym) * index;
+ try self.file.pwriteAll(mem.sliceAsBytes(sym[0..1]), off);
+ },
+ .p64 => {
+ var sym = [1]elf.Elf64_Sym{self.symbols.items[index]};
+ if (foreign_endian) {
+ bswapAllFields(elf.Elf64_Sym, &sym[0]);
+ }
+ const off = syms_sect.sh_offset + @sizeOf(elf.Elf64_Sym) * index;
+ try self.file.pwriteAll(mem.sliceAsBytes(sym[0..1]), off);
+ },
+ }
+ }
+
+ fn writeAllSymbols(self: *ElfFile) !void {
const small_ptr = self.ptr_width == .p32;
const syms_sect = &self.sections.items[self.symtab_section_index.?];
const sym_align: u16 = if (small_ptr) @alignOf(elf.Elf32_Sym) else @alignOf(elf.Elf64_Sym);
src-self-hosted/type.zig
@@ -5,8 +5,7 @@ const Allocator = std.mem.Allocator;
const Target = std.Target;
/// This is the raw data, with no bookkeeping, no memory awareness, no de-duplication.
-/// It's important for this struct to be small.
-/// It is not copyable since it may contain references to its inner data.
+/// It's important for this type to be small.
/// Types are not de-duplicated, which helps with multi-threading since it obviates the requirement
/// of obtaining a lock on a global type table, as well as making the
/// garbage collection bookkeeping simpler.
@@ -51,6 +50,7 @@ pub const Type = extern union {
.comptime_int => return .ComptimeInt,
.comptime_float => return .ComptimeFloat,
.noreturn => return .NoReturn,
+ .@"null" => return .Null,
.fn_noreturn_no_args => return .Fn,
.fn_naked_noreturn_no_args => return .Fn,
@@ -184,6 +184,8 @@ pub const Type = extern union {
.noreturn,
=> return out_stream.writeAll(@tagName(t)),
+ .@"null" => return out_stream.writeAll("@TypeOf(null)"),
+
.const_slice_u8 => return out_stream.writeAll("[]const u8"),
.fn_noreturn_no_args => return out_stream.writeAll("fn() noreturn"),
.fn_naked_noreturn_no_args => return out_stream.writeAll("fn() callconv(.Naked) noreturn"),
@@ -246,6 +248,7 @@ pub const Type = extern union {
.comptime_int => return Value.initTag(.comptime_int_type),
.comptime_float => return Value.initTag(.comptime_float_type),
.noreturn => return Value.initTag(.noreturn_type),
+ .@"null" => return Value.initTag(.null_type),
.fn_noreturn_no_args => return Value.initTag(.fn_noreturn_no_args_type),
.fn_naked_noreturn_no_args => return Value.initTag(.fn_naked_noreturn_no_args_type),
.fn_ccc_void_no_args => return Value.initTag(.fn_ccc_void_no_args_type),
@@ -286,6 +289,7 @@ pub const Type = extern union {
.comptime_int,
.comptime_float,
.noreturn,
+ .@"null",
.array,
.array_u8_sentinel_0,
.const_slice_u8,
@@ -329,6 +333,7 @@ pub const Type = extern union {
.comptime_int,
.comptime_float,
.noreturn,
+ .@"null",
.array,
.array_u8_sentinel_0,
.single_const_pointer,
@@ -372,6 +377,7 @@ pub const Type = extern union {
.comptime_int,
.comptime_float,
.noreturn,
+ .@"null",
.array,
.array_u8_sentinel_0,
.fn_noreturn_no_args,
@@ -416,6 +422,7 @@ pub const Type = extern union {
.comptime_int,
.comptime_float,
.noreturn,
+ .@"null",
.fn_noreturn_no_args,
.fn_naked_noreturn_no_args,
.fn_ccc_void_no_args,
@@ -458,6 +465,7 @@ pub const Type = extern union {
.comptime_int,
.comptime_float,
.noreturn,
+ .@"null",
.fn_noreturn_no_args,
.fn_naked_noreturn_no_args,
.fn_ccc_void_no_args,
@@ -489,6 +497,7 @@ pub const Type = extern union {
.comptime_int,
.comptime_float,
.noreturn,
+ .@"null",
.fn_noreturn_no_args,
.fn_naked_noreturn_no_args,
.fn_ccc_void_no_args,
@@ -533,6 +542,7 @@ pub const Type = extern union {
.comptime_int,
.comptime_float,
.noreturn,
+ .@"null",
.fn_noreturn_no_args,
.fn_naked_noreturn_no_args,
.fn_ccc_void_no_args,
@@ -606,6 +616,7 @@ pub const Type = extern union {
.comptime_int,
.comptime_float,
.noreturn,
+ .@"null",
.array,
.single_const_pointer,
.single_const_pointer_to_comptime_int,
@@ -650,6 +661,7 @@ pub const Type = extern union {
.comptime_int,
.comptime_float,
.noreturn,
+ .@"null",
.array,
.single_const_pointer,
.single_const_pointer_to_comptime_int,
@@ -693,6 +705,7 @@ pub const Type = extern union {
.comptime_int,
.comptime_float,
.noreturn,
+ .@"null",
.array,
.single_const_pointer,
.single_const_pointer_to_comptime_int,
@@ -736,6 +749,7 @@ pub const Type = extern union {
.comptime_int,
.comptime_float,
.noreturn,
+ .@"null",
.array,
.single_const_pointer,
.single_const_pointer_to_comptime_int,
@@ -779,6 +793,7 @@ pub const Type = extern union {
.comptime_int,
.comptime_float,
.noreturn,
+ .@"null",
.array,
.single_const_pointer,
.single_const_pointer_to_comptime_int,
@@ -833,6 +848,7 @@ pub const Type = extern union {
.type,
.anyerror,
.noreturn,
+ .@"null",
.fn_noreturn_no_args,
.fn_naked_noreturn_no_args,
.fn_ccc_void_no_args,
@@ -881,6 +897,7 @@ pub const Type = extern union {
.c_void,
.void,
.noreturn,
+ .@"null",
=> return true,
.int_unsigned => return ty.cast(Payload.IntUnsigned).?.bits == 0,
@@ -933,6 +950,7 @@ pub const Type = extern union {
.c_void,
.void,
.noreturn,
+ .@"null",
.int_unsigned,
.int_signed,
.array,
@@ -974,6 +992,7 @@ pub const Type = extern union {
comptime_int,
comptime_float,
noreturn,
+ @"null",
fn_noreturn_no_args,
fn_naked_noreturn_no_args,
fn_ccc_void_no_args,
src-self-hosted/TypedValue.zig
@@ -0,0 +1,23 @@
+const std = @import("std");
+const Type = @import("type.zig").Type;
+const Value = @import("value.zig").Value;
+const Allocator = std.mem.Allocator;
+const TypedValue = @This();
+
+ty: Type,
+val: Value,
+
+/// Memory management for TypedValue. The main purpose of this type
+/// is to be small and have a deinit() function to free associated resources.
+pub const Managed = struct {
+ /// If the tag value is less than Tag.no_payload_count, then no pointer
+ /// dereference is needed.
+ typed_value: TypedValue,
+ /// If this is `null` then there is no memory management needed.
+ arena: ?*std.heap.ArenaAllocator.State = null,
+
+ pub fn deinit(self: *ManagedTypedValue, allocator: *Allocator) void {
+ if (self.arena) |a| a.promote(allocator).deinit();
+ self.* = undefined;
+ }
+};
src-self-hosted/value.zig
@@ -10,7 +10,7 @@ const ir = @import("ir.zig");
/// This is the raw data, with no bookkeeping, no memory awareness,
/// no de-duplication, and no type system awareness.
-/// It's important for this struct to be small.
+/// It's important for this type to be small.
/// This union takes advantage of the fact that the first page of memory
/// is unmapped, giving us 4096 possible enum tags that have no payload.
pub const Value = extern union {
@@ -46,6 +46,7 @@ pub const Value = extern union {
comptime_int_type,
comptime_float_type,
noreturn_type,
+ null_type,
fn_noreturn_no_args_type,
fn_naked_noreturn_no_args_type,
fn_ccc_void_no_args_type,
@@ -138,6 +139,7 @@ pub const Value = extern union {
.comptime_int_type => return out_stream.writeAll("comptime_int"),
.comptime_float_type => return out_stream.writeAll("comptime_float"),
.noreturn_type => return out_stream.writeAll("noreturn"),
+ .null_type => return out_stream.writeAll("@TypeOf(null)"),
.fn_noreturn_no_args_type => return out_stream.writeAll("fn() noreturn"),
.fn_naked_noreturn_no_args_type => return out_stream.writeAll("fn() callconv(.Naked) noreturn"),
.fn_ccc_void_no_args_type => return out_stream.writeAll("fn() callconv(.C) void"),
@@ -209,6 +211,7 @@ pub const Value = extern union {
.comptime_int_type => Type.initTag(.comptime_int),
.comptime_float_type => Type.initTag(.comptime_float),
.noreturn_type => Type.initTag(.noreturn),
+ .null_type => Type.initTag(.@"null"),
.fn_noreturn_no_args_type => Type.initTag(.fn_noreturn_no_args),
.fn_naked_noreturn_no_args_type => Type.initTag(.fn_naked_noreturn_no_args),
.fn_ccc_void_no_args_type => Type.initTag(.fn_ccc_void_no_args),
@@ -263,6 +266,7 @@ pub const Value = extern union {
.comptime_int_type,
.comptime_float_type,
.noreturn_type,
+ .null_type,
.fn_noreturn_no_args_type,
.fn_naked_noreturn_no_args_type,
.fn_ccc_void_no_args_type,
@@ -319,6 +323,7 @@ pub const Value = extern union {
.comptime_int_type,
.comptime_float_type,
.noreturn_type,
+ .null_type,
.fn_noreturn_no_args_type,
.fn_naked_noreturn_no_args_type,
.fn_ccc_void_no_args_type,
@@ -376,6 +381,7 @@ pub const Value = extern union {
.comptime_int_type,
.comptime_float_type,
.noreturn_type,
+ .null_type,
.fn_noreturn_no_args_type,
.fn_naked_noreturn_no_args_type,
.fn_ccc_void_no_args_type,
@@ -438,6 +444,7 @@ pub const Value = extern union {
.comptime_int_type,
.comptime_float_type,
.noreturn_type,
+ .null_type,
.fn_noreturn_no_args_type,
.fn_naked_noreturn_no_args_type,
.fn_ccc_void_no_args_type,
@@ -529,6 +536,7 @@ pub const Value = extern union {
.comptime_int_type,
.comptime_float_type,
.noreturn_type,
+ .null_type,
.fn_noreturn_no_args_type,
.fn_naked_noreturn_no_args_type,
.fn_ccc_void_no_args_type,
@@ -582,6 +590,7 @@ pub const Value = extern union {
.comptime_int_type,
.comptime_float_type,
.noreturn_type,
+ .null_type,
.fn_noreturn_no_args_type,
.fn_naked_noreturn_no_args_type,
.fn_ccc_void_no_args_type,
@@ -674,6 +683,7 @@ pub const Value = extern union {
.comptime_int_type,
.comptime_float_type,
.noreturn_type,
+ .null_type,
.fn_noreturn_no_args_type,
.fn_naked_noreturn_no_args_type,
.fn_ccc_void_no_args_type,
@@ -736,6 +746,7 @@ pub const Value = extern union {
.comptime_int_type,
.comptime_float_type,
.noreturn_type,
+ .null_type,
.fn_noreturn_no_args_type,
.fn_naked_noreturn_no_args_type,
.fn_ccc_void_no_args_type,
@@ -812,6 +823,7 @@ pub const Value = extern union {
.comptime_int_type,
.comptime_float_type,
.noreturn_type,
+ .null_type,
.fn_noreturn_no_args_type,
.fn_naked_noreturn_no_args_type,
.fn_ccc_void_no_args_type,