Commit 2e31077fe0
Changed files (13)
lib
src
test
behavior
lib/std/math/isnan.zig
@@ -28,6 +28,8 @@ test isNan {
}
test isSignalNan {
+ if (builtin.zig_backend == .stage2_x86_64 and builtin.object_format == .coff and builtin.abi != .gnu) return error.SkipZigTest;
+
inline for ([_]type{ f16, f32, f64, f80, f128, c_longdouble }) |T| {
// TODO: Signalling NaN values get converted to quiet NaN values in
// some cases where they shouldn't such that this can fail.
lib/std/coff.zig
@@ -249,55 +249,6 @@ pub const OptionalHeader = extern struct {
pub const IMAGE_NUMBEROF_DIRECTORY_ENTRIES = 16;
-pub const DirectoryEntry = enum(u16) {
- /// Export Directory
- EXPORT = 0,
-
- /// Import Directory
- IMPORT = 1,
-
- /// Resource Directory
- RESOURCE = 2,
-
- /// Exception Directory
- EXCEPTION = 3,
-
- /// Security Directory
- SECURITY = 4,
-
- /// Base Relocation Table
- BASERELOC = 5,
-
- /// Debug Directory
- DEBUG = 6,
-
- /// Architecture Specific Data
- ARCHITECTURE = 7,
-
- /// RVA of GP
- GLOBALPTR = 8,
-
- /// TLS Directory
- TLS = 9,
-
- /// Load Configuration Directory
- LOAD_CONFIG = 10,
-
- /// Bound Import Directory in headers
- BOUND_IMPORT = 11,
-
- /// Import Address Table
- IAT = 12,
-
- /// Delay Load Import Descriptors
- DELAY_IMPORT = 13,
-
- /// COM Runtime descriptor
- COM_DESCRIPTOR = 14,
-
- _,
-};
-
pub const ImageDataDirectory = extern struct {
virtual_address: u32,
size: u32,
@@ -1054,9 +1005,9 @@ pub const Coff = struct {
assert(self.is_image);
const data_dirs = self.getDataDirectories();
- if (@intFromEnum(DirectoryEntry.DEBUG) >= data_dirs.len) return null;
+ if (@intFromEnum(IMAGE.DIRECTORY_ENTRY.DEBUG) >= data_dirs.len) return null;
- const debug_dir = data_dirs[@intFromEnum(DirectoryEntry.DEBUG)];
+ const debug_dir = data_dirs[@intFromEnum(IMAGE.DIRECTORY_ENTRY.DEBUG)];
var reader: std.Io.Reader = .fixed(self.data);
if (self.is_loaded) {
@@ -1400,6 +1351,44 @@ pub const Relocation = extern struct {
};
pub const IMAGE = struct {
+ pub const DIRECTORY_ENTRY = enum(u32) {
+ /// Export Directory
+ EXPORT = 0,
+ /// Import Directory
+ IMPORT = 1,
+ /// Resource Directory
+ RESOURCE = 2,
+ /// Exception Directory
+ EXCEPTION = 3,
+ /// Security Directory
+ SECURITY = 4,
+ /// Base Relocation Table
+ BASERELOC = 5,
+ /// Debug Directory
+ DEBUG = 6,
+ /// Architecture Specific Data
+ ARCHITECTURE = 7,
+ /// RVA of GP
+ GLOBALPTR = 8,
+ /// TLS Directory
+ TLS = 9,
+ /// Load Configuration Directory
+ LOAD_CONFIG = 10,
+ /// Bound Import Directory in headers
+ BOUND_IMPORT = 11,
+ /// Import Address Table
+ IAT = 12,
+ /// Delay Load Import Descriptors
+ DELAY_IMPORT = 13,
+ /// COM Runtime descriptor
+ COM_DESCRIPTOR = 14,
+ /// must be zero
+ RESERVED = 15,
+ _,
+
+ pub const len = @typeInfo(IMAGE.DIRECTORY_ENTRY).@"enum".fields.len;
+ };
+
pub const FILE = struct {
/// Machine Types
/// The Machine field has one of the following values, which specify the CPU type.
lib/std/debug.zig
@@ -468,10 +468,6 @@ const use_trap_panic = switch (builtin.zig_backend) {
.stage2_wasm,
.stage2_x86,
=> true,
- .stage2_x86_64 => switch (builtin.target.ofmt) {
- .elf, .macho => false,
- else => true,
- },
else => false,
};
@@ -484,22 +480,6 @@ pub fn defaultPanic(
if (use_trap_panic) @trap();
- switch (builtin.zig_backend) {
- .stage2_aarch64,
- .stage2_arm,
- .stage2_powerpc,
- .stage2_riscv64,
- .stage2_spirv,
- .stage2_wasm,
- .stage2_x86,
- => @trap(),
- .stage2_x86_64 => switch (builtin.target.ofmt) {
- .elf, .macho => {},
- else => @trap(),
- },
- else => {},
- }
-
switch (builtin.os.tag) {
.freestanding, .other => {
@trap();
lib/ubsan_rt.zig
@@ -120,12 +120,6 @@ const Value = extern struct {
}
pub fn format(value: Value, writer: *std.Io.Writer) std.Io.Writer.Error!void {
- // Work around x86_64 backend limitation.
- if (builtin.zig_backend == .stage2_x86_64 and builtin.os.tag == .windows) {
- try writer.writeAll("(unknown)");
- return;
- }
-
switch (value.td.kind) {
.integer => {
if (value.td.isSigned()) {
@@ -624,10 +618,11 @@ fn exportHandler(
handler: anytype,
comptime sym_name: []const u8,
) void {
- // Work around x86_64 backend limitation.
- const linkage = if (builtin.zig_backend == .stage2_x86_64 and builtin.os.tag == .windows) .internal else .weak;
- const N = "__ubsan_handle_" ++ sym_name;
- @export(handler, .{ .name = N, .linkage = linkage, .visibility = if (linkage == .internal) .default else .hidden });
+ @export(handler, .{
+ .name = "__ubsan_handle_" ++ sym_name,
+ .linkage = .weak,
+ .visibility = .hidden,
+ });
}
fn exportHandlerWithAbort(
@@ -635,16 +630,16 @@ fn exportHandlerWithAbort(
abort_handler: anytype,
comptime sym_name: []const u8,
) void {
- // Work around x86_64 backend limitation.
- const linkage = if (builtin.zig_backend == .stage2_x86_64 and builtin.os.tag == .windows) .internal else .weak;
- {
- const N = "__ubsan_handle_" ++ sym_name;
- @export(handler, .{ .name = N, .linkage = linkage, .visibility = if (linkage == .internal) .default else .hidden });
- }
- {
- const N = "__ubsan_handle_" ++ sym_name ++ "_abort";
- @export(abort_handler, .{ .name = N, .linkage = linkage, .visibility = if (linkage == .internal) .default else .hidden });
- }
+ @export(handler, .{
+ .name = "__ubsan_handle_" ++ sym_name,
+ .linkage = .weak,
+ .visibility = .hidden,
+ });
+ @export(abort_handler, .{
+ .name = "__ubsan_handle_" ++ sym_name ++ "_abort",
+ .linkage = .weak,
+ .visibility = .hidden,
+ });
}
const can_build_ubsan = switch (builtin.zig_backend) {
src/codegen/x86_64/CodeGen.zig
@@ -173685,7 +173685,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
const ty_nav = air_datas[@intFromEnum(inst)].ty_nav;
const nav = ip.getNav(ty_nav.nav);
const is_threadlocal = zcu.comp.config.any_non_single_threaded and nav.isThreadlocal(ip);
- if (is_threadlocal) if (cg.mod.pic) {
+ if (is_threadlocal) if (cg.target.ofmt == .coff or cg.mod.pic) {
try cg.spillRegisters(&.{ .rdi, .rax });
} else {
try cg.spillRegisters(&.{.rax});
src/codegen/x86_64/Emit.zig
@@ -386,6 +386,82 @@ pub fn emitMir(emit: *Emit) Error!void {
}, emit.lower.target), &.{});
},
else => unreachable,
+ } else if (emit.bin_file.cast(.coff2)) |coff| {
+ switch (emit.lower.target.cpu.arch) {
+ else => unreachable,
+ .x86 => {
+ try emit.encodeInst(try .new(.none, .mov, &.{
+ .{ .reg = .eax },
+ .{ .mem = .initSib(.qword, .{
+ .base = .{ .reg = .fs },
+ .disp = 4 * 11,
+ }) },
+ }, emit.lower.target), &.{});
+ try emit.encodeInst(try .new(.none, .mov, &.{
+ .{ .reg = .edi },
+ .{ .mem = .initSib(.dword, .{}) },
+ }, emit.lower.target), &.{.{
+ .op_index = 1,
+ .target = .{
+ .index = @intFromEnum(
+ try coff.globalSymbol("__tls_index", null),
+ ),
+ .is_extern = false,
+ .type = .symbol,
+ },
+ }});
+ try emit.encodeInst(try .new(.none, .mov, &.{
+ .{ .reg = .eax },
+ .{ .mem = .initSib(.dword, .{
+ .base = .{ .reg = .eax },
+ .scale_index = .{ .index = .edi, .scale = 4 },
+ }) },
+ }, emit.lower.target), &.{});
+ try emit.encodeInst(try .new(.none, lowered_inst.encoding.mnemonic, &.{
+ lowered_inst.ops[0],
+ .{ .mem = .initSib(lowered_inst.ops[1].mem.sib.ptr_size, .{
+ .base = .{ .reg = .eax },
+ .disp = std.math.minInt(i32),
+ }) },
+ }, emit.lower.target), reloc_info);
+ },
+ .x86_64 => {
+ try emit.encodeInst(try .new(.none, .mov, &.{
+ .{ .reg = .rax },
+ .{ .mem = .initSib(.qword, .{
+ .base = .{ .reg = .gs },
+ .disp = 8 * 11,
+ }) },
+ }, emit.lower.target), &.{});
+ try emit.encodeInst(try .new(.none, .mov, &.{
+ .{ .reg = .edi },
+ .{ .mem = .initRip(.dword, 0) },
+ }, emit.lower.target), &.{.{
+ .op_index = 1,
+ .target = .{
+ .index = @intFromEnum(
+ try coff.globalSymbol("_tls_index", null),
+ ),
+ .is_extern = false,
+ .type = .symbol,
+ },
+ }});
+ try emit.encodeInst(try .new(.none, .mov, &.{
+ .{ .reg = .rax },
+ .{ .mem = .initSib(.qword, .{
+ .base = .{ .reg = .rax },
+ .scale_index = .{ .index = .rdi, .scale = 8 },
+ }) },
+ }, emit.lower.target), &.{});
+ try emit.encodeInst(try .new(.none, lowered_inst.encoding.mnemonic, &.{
+ lowered_inst.ops[0],
+ .{ .mem = .initSib(lowered_inst.ops[1].mem.sib.ptr_size, .{
+ .base = .{ .reg = .rax },
+ .disp = std.math.minInt(i32),
+ }) },
+ }, emit.lower.target), reloc_info);
+ },
+ }
} else return emit.fail("TODO implement relocs for {s}", .{
@tagName(emit.bin_file.tag),
});
@@ -870,7 +946,13 @@ fn encodeInst(emit: *Emit, lowered_inst: Instruction, reloc_info: []const RelocI
.symbolnum = @intCast(reloc.target.index),
},
});
- } else return emit.fail("TODO implement {s} reloc for {s}", .{
+ } else if (emit.bin_file.cast(.coff2)) |coff| try coff.addReloc(
+ @enumFromInt(emit.atom_index),
+ end_offset - 4,
+ @enumFromInt(reloc.target.index),
+ reloc.off,
+ .{ .AMD64 = .SECREL },
+ ) else return emit.fail("TODO implement {s} reloc for {s}", .{
@tagName(reloc.target.type), @tagName(emit.bin_file.tag),
}),
};
src/link/Coff.zig
@@ -9,7 +9,9 @@ strings: std.HashMapUnmanaged(
std.hash_map.default_max_load_percentage,
),
string_bytes: std.ArrayList(u8),
-section_table: std.ArrayList(Symbol.Index),
+image_section_table: std.ArrayList(Symbol.Index),
+pseudo_section_table: std.AutoArrayHashMapUnmanaged(String, Symbol.Index),
+object_section_table: std.AutoArrayHashMapUnmanaged(String, Symbol.Index),
symbol_table: std.ArrayList(Symbol),
globals: std.AutoArrayHashMapUnmanaged(GlobalName, Symbol.Index),
global_pending_index: u32,
@@ -24,8 +26,6 @@ pending_uavs: std.AutoArrayHashMapUnmanaged(Node.UavMapIndex, struct {
src_loc: Zcu.LazySrcLoc,
}),
relocs: std.ArrayList(Reloc),
-/// This is hiding actual bugs with global symbols! Reconsider once they are implemented correctly.
-entry_hack: Symbol.Index,
pub const default_file_alignment: u16 = 0x200;
pub const default_size_of_stack_reserve: u32 = 0x1000000;
@@ -102,17 +102,45 @@ pub const Node = union(enum) {
optional_header,
data_directories,
section_table,
- section: Symbol.Index,
+ image_section: Symbol.Index,
+
import_directory_table,
import_lookup_table: ImportTable.Index,
import_address_table: ImportTable.Index,
import_hint_name_table: ImportTable.Index,
+
+ pseudo_section: PseudoSectionMapIndex,
+ object_section: ObjectSectionMapIndex,
global: GlobalMapIndex,
nav: NavMapIndex,
uav: UavMapIndex,
lazy_code: LazyMapRef.Index(.code),
lazy_const_data: LazyMapRef.Index(.const_data),
+ pub const PseudoSectionMapIndex = enum(u32) {
+ _,
+
+ pub fn name(psmi: PseudoSectionMapIndex, coff: *const Coff) String {
+ return coff.pseudo_section_table.keys()[@intFromEnum(psmi)];
+ }
+
+ pub fn symbol(psmi: PseudoSectionMapIndex, coff: *const Coff) Symbol.Index {
+ return coff.pseudo_section_table.values()[@intFromEnum(psmi)];
+ }
+ };
+
+ pub const ObjectSectionMapIndex = enum(u32) {
+ _,
+
+ pub fn name(osmi: ObjectSectionMapIndex, coff: *const Coff) String {
+ return coff.object_section_table.keys()[@intFromEnum(osmi)];
+ }
+
+ pub fn symbol(osmi: ObjectSectionMapIndex, coff: *const Coff) Symbol.Index {
+ return coff.object_section_table.values()[@intFromEnum(osmi)];
+ }
+ };
+
pub const GlobalMapIndex = enum(u32) {
_,
@@ -204,27 +232,6 @@ pub const Node = union(enum) {
}
};
-pub const DataDirectory = enum {
- export_table,
- import_table,
- resorce_table,
- exception_table,
- certificate_table,
- base_relocation_table,
- debug,
- architecture,
- global_ptr,
- tls_table,
- load_config_table,
- bound_import,
- import_address_table,
- delay_import_descriptor,
- clr_runtime_header,
- reserved,
-
- pub const len = @typeInfo(DataDirectory).@"enum".fields.len;
-};
-
pub const ImportTable = struct {
ni: MappedFile.Node.Index,
entries: std.AutoArrayHashMapUnmanaged(void, Entry),
@@ -264,9 +271,18 @@ pub const ImportTable = struct {
};
pub const String = enum(u32) {
+ @".data" = 0,
+ @".idata" = 6,
+ @".rdata" = 13,
+ @".text" = 20,
+ @".tls$" = 26,
_,
pub const Optional = enum(u32) {
+ @".data" = @intFromEnum(String.@".data"),
+ @".rdata" = @intFromEnum(String.@".rdata"),
+ @".text" = @intFromEnum(String.@".text"),
+ @".tls$" = @intFromEnum(String.@".tls$"),
none = std.math.maxInt(u32),
_,
@@ -318,7 +334,7 @@ pub const Symbol = struct {
}
pub fn symbol(sn: SectionNumber, coff: *const Coff) Symbol.Index {
- return coff.section_table.items[sn.toIndex()];
+ return coff.image_section_table.items[sn.toIndex()];
}
pub fn header(sn: SectionNumber, coff: *Coff) *std.coff.SectionHeader {
@@ -329,7 +345,6 @@ pub const Symbol = struct {
pub const Index = enum(u32) {
null,
data,
- idata,
rdata,
text,
_,
@@ -349,10 +364,6 @@ pub const Symbol = struct {
pub fn flushMoved(si: Symbol.Index, coff: *Coff) void {
const sym = si.get(coff);
sym.rva = coff.computeNodeRva(sym.ni);
- if (si == coff.entry_hack) {
- @branchHint(.unlikely);
- coff.targetStore(&coff.optionalHeaderStandardPtr().address_of_entry_point, sym.rva);
- }
si.applyLocationRelocs(coff);
si.applyTargetRelocs(coff);
}
@@ -493,6 +504,12 @@ pub const Reloc = extern struct {
@intCast(@as(i64, @bitCast(target_rva -% (loc_sym.rva + reloc.offset + 9)))),
target_endian,
),
+ .SECREL => std.mem.writeInt(
+ u32,
+ loc_slice[0..4],
+ coff.computeNodeSectionOffset(target_sym.ni),
+ target_endian,
+ ),
},
.I386 => switch (reloc.type.I386) {
else => |kind| @panic(@tagName(kind)),
@@ -527,6 +544,12 @@ pub const Reloc = extern struct {
@intCast(@as(i64, @bitCast(target_rva -% (loc_sym.rva + reloc.offset + 4)))),
target_endian,
),
+ .SECREL => std.mem.writeInt(
+ u32,
+ loc_slice[0..4],
+ coff.computeNodeSectionOffset(target_sym.ni),
+ target_endian,
+ ),
},
}
}
@@ -634,7 +657,9 @@ fn create(
},
.strings = .empty,
.string_bytes = .empty,
- .section_table = .empty,
+ .image_section_table = .empty,
+ .pseudo_section_table = .empty,
+ .object_section_table = .empty,
.symbol_table = .empty,
.globals = .empty,
.global_pending_index = 0,
@@ -646,10 +671,17 @@ fn create(
}),
.pending_uavs = .empty,
.relocs = .empty,
- .entry_hack = .null,
};
errdefer coff.deinit();
+ {
+ const strings = std.enums.values(String);
+ try coff.strings.ensureTotalCapacityContext(comp.gpa, @intCast(strings.len), .{
+ .bytes = &coff.string_bytes,
+ });
+ for (strings) |string| assert(try coff.getOrPutString(@tagName(string)) == string);
+ }
+
try coff.initHeaders(
is_image,
machine,
@@ -669,7 +701,9 @@ pub fn deinit(coff: *Coff) void {
coff.import_table.entries.deinit(gpa);
coff.strings.deinit(gpa);
coff.string_bytes.deinit(gpa);
- coff.section_table.deinit(gpa);
+ coff.image_section_table.deinit(gpa);
+ coff.pseudo_section_table.deinit(gpa);
+ coff.object_section_table.deinit(gpa);
coff.symbol_table.deinit(gpa);
coff.globals.deinit(gpa);
coff.navs.deinit(gpa);
@@ -692,19 +726,21 @@ fn initHeaders(
) !void {
const comp = coff.base.comp;
const gpa = comp.gpa;
- const file_align: std.mem.Alignment = comptime .fromByteUnits(default_file_alignment);
const target_endian = coff.targetEndian();
+ const file_align: std.mem.Alignment = comptime .fromByteUnits(default_file_alignment);
const optional_header_size: u16 = if (is_image) switch (magic) {
_ => unreachable,
inline else => |ct_magic| @sizeOf(@field(std.coff.OptionalHeader, @tagName(ct_magic))),
} else 0;
const data_directories_size: u16 = if (is_image)
- @sizeOf(std.coff.ImageDataDirectory) * DataDirectory.len
+ @sizeOf(std.coff.ImageDataDirectory) * std.coff.IMAGE.DIRECTORY_ENTRY.len
else
0;
- try coff.nodes.ensureTotalCapacity(gpa, Node.known_count);
+ const expected_nodes_len = Node.known_count + 6 +
+ @as(usize, @intFromBool(comp.config.any_non_single_threaded)) * 2;
+ try coff.nodes.ensureTotalCapacity(gpa, expected_nodes_len);
coff.nodes.appendAssumeCapacity(.file);
const header_ni = Node.known.header;
@@ -762,108 +798,110 @@ fn initHeaders(
.fixed = true,
}));
coff.nodes.appendAssumeCapacity(.optional_header);
- coff.targetStore(&coff.optionalHeaderStandardPtr().magic, magic);
- if (is_image) switch (coff.optionalHeaderPtr()) {
- .PE32 => |optional_header| {
- optional_header.* = .{
- .standard = .{
- .magic = .PE32,
- .major_linker_version = 0,
- .minor_linker_version = 0,
- .size_of_code = 0,
- .size_of_initialized_data = 0,
- .size_of_uninitialized_data = 0,
- .address_of_entry_point = 0,
- .base_of_code = 0,
- },
- .base_of_data = 0,
- .image_base = switch (coff.base.comp.config.output_mode) {
- .Exe => 0x400000,
- .Lib => switch (coff.base.comp.config.link_mode) {
- .static => 0,
- .dynamic => 0x10000000,
+ if (is_image) {
+ coff.targetStore(&coff.optionalHeaderStandardPtr().magic, magic);
+ switch (coff.optionalHeaderPtr()) {
+ .PE32 => |optional_header| {
+ optional_header.* = .{
+ .standard = .{
+ .magic = .PE32,
+ .major_linker_version = 0,
+ .minor_linker_version = 0,
+ .size_of_code = 0,
+ .size_of_initialized_data = 0,
+ .size_of_uninitialized_data = 0,
+ .address_of_entry_point = 0,
+ .base_of_code = 0,
},
- .Obj => 0,
- },
- .section_alignment = @intCast(section_align.toByteUnits()),
- .file_alignment = @intCast(file_align.toByteUnits()),
- .major_operating_system_version = 6,
- .minor_operating_system_version = 0,
- .major_image_version = 0,
- .minor_image_version = 0,
- .major_subsystem_version = major_subsystem_version,
- .minor_subsystem_version = minor_subsystem_version,
- .win32_version_value = 0,
- .size_of_image = 0,
- .size_of_headers = 0,
- .checksum = 0,
- .subsystem = .WINDOWS_CUI,
- .dll_flags = .{
- .HIGH_ENTROPY_VA = true,
- .DYNAMIC_BASE = true,
- .TERMINAL_SERVER_AWARE = true,
- .NX_COMPAT = true,
- },
- .size_of_stack_reserve = default_size_of_stack_reserve,
- .size_of_stack_commit = default_size_of_stack_commit,
- .size_of_heap_reserve = default_size_of_heap_reserve,
- .size_of_heap_commit = default_size_of_heap_commit,
- .loader_flags = 0,
- .number_of_rva_and_sizes = DataDirectory.len,
- };
- if (target_endian != native_endian)
- std.mem.byteSwapAllFields(std.coff.OptionalHeader.PE32, optional_header);
- },
- .@"PE32+" => |optional_header| {
- optional_header.* = .{
- .standard = .{
- .magic = .@"PE32+",
- .major_linker_version = 0,
- .minor_linker_version = 0,
- .size_of_code = 0,
- .size_of_initialized_data = 0,
- .size_of_uninitialized_data = 0,
- .address_of_entry_point = 0,
- .base_of_code = 0,
- },
- .image_base = switch (coff.base.comp.config.output_mode) {
- .Exe => 0x140000000,
- .Lib => switch (coff.base.comp.config.link_mode) {
- .static => 0,
- .dynamic => 0x180000000,
+ .base_of_data = 0,
+ .image_base = switch (coff.base.comp.config.output_mode) {
+ .Exe => 0x400000,
+ .Lib => switch (coff.base.comp.config.link_mode) {
+ .static => 0,
+ .dynamic => 0x10000000,
+ },
+ .Obj => 0,
},
- .Obj => 0,
- },
- .section_alignment = @intCast(section_align.toByteUnits()),
- .file_alignment = @intCast(file_align.toByteUnits()),
- .major_operating_system_version = 6,
- .minor_operating_system_version = 0,
- .major_image_version = 0,
- .minor_image_version = 0,
- .major_subsystem_version = major_subsystem_version,
- .minor_subsystem_version = minor_subsystem_version,
- .win32_version_value = 0,
- .size_of_image = 0,
- .size_of_headers = 0,
- .checksum = 0,
- .subsystem = .WINDOWS_CUI,
- .dll_flags = .{
- .HIGH_ENTROPY_VA = true,
- .DYNAMIC_BASE = true,
- .TERMINAL_SERVER_AWARE = true,
- .NX_COMPAT = true,
- },
- .size_of_stack_reserve = default_size_of_stack_reserve,
- .size_of_stack_commit = default_size_of_stack_commit,
- .size_of_heap_reserve = default_size_of_heap_reserve,
- .size_of_heap_commit = default_size_of_heap_commit,
- .loader_flags = 0,
- .number_of_rva_and_sizes = DataDirectory.len,
- };
- if (target_endian != native_endian)
- std.mem.byteSwapAllFields(std.coff.OptionalHeader.@"PE32+", optional_header);
- },
- };
+ .section_alignment = @intCast(section_align.toByteUnits()),
+ .file_alignment = @intCast(file_align.toByteUnits()),
+ .major_operating_system_version = 6,
+ .minor_operating_system_version = 0,
+ .major_image_version = 0,
+ .minor_image_version = 0,
+ .major_subsystem_version = major_subsystem_version,
+ .minor_subsystem_version = minor_subsystem_version,
+ .win32_version_value = 0,
+ .size_of_image = 0,
+ .size_of_headers = 0,
+ .checksum = 0,
+ .subsystem = .WINDOWS_CUI,
+ .dll_flags = .{
+ .HIGH_ENTROPY_VA = true,
+ .DYNAMIC_BASE = true,
+ .TERMINAL_SERVER_AWARE = true,
+ .NX_COMPAT = true,
+ },
+ .size_of_stack_reserve = default_size_of_stack_reserve,
+ .size_of_stack_commit = default_size_of_stack_commit,
+ .size_of_heap_reserve = default_size_of_heap_reserve,
+ .size_of_heap_commit = default_size_of_heap_commit,
+ .loader_flags = 0,
+ .number_of_rva_and_sizes = std.coff.IMAGE.DIRECTORY_ENTRY.len,
+ };
+ if (target_endian != native_endian)
+ std.mem.byteSwapAllFields(std.coff.OptionalHeader.PE32, optional_header);
+ },
+ .@"PE32+" => |optional_header| {
+ optional_header.* = .{
+ .standard = .{
+ .magic = .@"PE32+",
+ .major_linker_version = 0,
+ .minor_linker_version = 0,
+ .size_of_code = 0,
+ .size_of_initialized_data = 0,
+ .size_of_uninitialized_data = 0,
+ .address_of_entry_point = 0,
+ .base_of_code = 0,
+ },
+ .image_base = switch (coff.base.comp.config.output_mode) {
+ .Exe => 0x140000000,
+ .Lib => switch (coff.base.comp.config.link_mode) {
+ .static => 0,
+ .dynamic => 0x180000000,
+ },
+ .Obj => 0,
+ },
+ .section_alignment = @intCast(section_align.toByteUnits()),
+ .file_alignment = @intCast(file_align.toByteUnits()),
+ .major_operating_system_version = 6,
+ .minor_operating_system_version = 0,
+ .major_image_version = 0,
+ .minor_image_version = 0,
+ .major_subsystem_version = major_subsystem_version,
+ .minor_subsystem_version = minor_subsystem_version,
+ .win32_version_value = 0,
+ .size_of_image = 0,
+ .size_of_headers = 0,
+ .checksum = 0,
+ .subsystem = .WINDOWS_CUI,
+ .dll_flags = .{
+ .HIGH_ENTROPY_VA = true,
+ .DYNAMIC_BASE = true,
+ .TERMINAL_SERVER_AWARE = true,
+ .NX_COMPAT = true,
+ },
+ .size_of_stack_reserve = default_size_of_stack_reserve,
+ .size_of_stack_commit = default_size_of_stack_commit,
+ .size_of_heap_reserve = default_size_of_heap_reserve,
+ .size_of_heap_commit = default_size_of_heap_commit,
+ .loader_flags = 0,
+ .number_of_rva_and_sizes = std.coff.IMAGE.DIRECTORY_ENTRY.len,
+ };
+ if (target_endian != native_endian)
+ std.mem.byteSwapAllFields(std.coff.OptionalHeader.@"PE32+", optional_header);
+ },
+ }
+ }
const data_directories_ni = Node.known.data_directories;
assert(data_directories_ni == try coff.mf.addLastChildNode(gpa, header_ni, .{
@@ -875,8 +913,10 @@ fn initHeaders(
{
const data_directories = coff.dataDirectorySlice();
@memset(data_directories, .{ .virtual_address = 0, .size = 0 });
- if (target_endian != native_endian)
- std.mem.byteSwapAllFields([DataDirectory.len]std.coff.ImageDataDirectory, data_directories);
+ if (target_endian != native_endian) std.mem.byteSwapAllFields(
+ [std.coff.IMAGE.DIRECTORY_ENTRY.len]std.coff.ImageDataDirectory,
+ data_directories,
+ );
}
const section_table_ni = Node.known.section_table;
@@ -902,10 +942,6 @@ fn initHeaders(
.MEM_READ = true,
.MEM_WRITE = true,
}) == .data);
- assert(try coff.addSection(".idata", .{
- .CNT_INITIALIZED_DATA = true,
- .MEM_READ = true,
- }) == .idata);
assert(try coff.addSection(".rdata", .{
.CNT_INITIALIZED_DATA = true,
.MEM_READ = true,
@@ -915,13 +951,26 @@ fn initHeaders(
.MEM_EXECUTE = true,
.MEM_READ = true,
}) == .text);
+
coff.import_table.ni = try coff.mf.addLastChildNode(
gpa,
- Symbol.Index.idata.node(coff),
- .{ .alignment = .@"4" },
+ (try coff.objectSectionMapIndex(
+ .@".idata",
+ coff.mf.flags.block_size,
+ .{ .read = true },
+ )).symbol(coff).node(coff),
+ .{ .alignment = .@"4", .moved = true },
);
coff.nodes.appendAssumeCapacity(.import_directory_table);
- assert(coff.symbol_table.items.len == Symbol.Index.known_count);
+
+ // While tls variables allocated at runtime are writable, the template itself is not
+ if (comp.config.any_non_single_threaded) _ = try coff.objectSectionMapIndex(
+ .@".tls$",
+ coff.mf.flags.block_size,
+ .{ .read = true },
+ );
+
+ assert(coff.nodes.len == expected_nodes_len);
}
fn getNode(coff: *const Coff, ni: MappedFile.Node.Index) Node {
@@ -938,8 +987,10 @@ fn computeNodeRva(coff: *Coff, ni: MappedFile.Node.Index) u32 {
.data_directories,
.section_table,
=> unreachable,
- .section => |si| si,
- .import_directory_table => unreachable,
+ .image_section => |si| si,
+ .import_directory_table => break :parent_rva coff.targetLoad(
+ &coff.dataDirectoryPtr(.IMPORT).virtual_address,
+ ),
.import_lookup_table => |import_index| break :parent_rva coff.targetLoad(
&coff.importDirectoryEntryPtr(import_index).import_lookup_table_rva,
),
@@ -949,13 +1000,34 @@ fn computeNodeRva(coff: *Coff, ni: MappedFile.Node.Index) u32 {
.import_hint_name_table => |import_index| break :parent_rva coff.targetLoad(
&coff.importDirectoryEntryPtr(import_index).name_rva,
),
- inline .global, .nav, .uav, .lazy_code, .lazy_const_data => |mi| mi.symbol(coff),
+ inline .pseudo_section,
+ .object_section,
+ .global,
+ .nav,
+ .uav,
+ .lazy_code,
+ .lazy_const_data,
+ => |mi| mi.symbol(coff),
};
break :parent_rva parent_si.get(coff).rva;
};
const offset, _ = ni.location(&coff.mf).resolve(&coff.mf);
return @intCast(parent_rva + offset);
}
+fn computeNodeSectionOffset(coff: *Coff, ni: MappedFile.Node.Index) u32 {
+ var section_offset: u32 = 0;
+ var parent_ni = ni;
+ while (true) {
+ const offset, _ = parent_ni.location(&coff.mf).resolve(&coff.mf);
+ section_offset += @intCast(offset);
+ parent_ni = parent_ni.parent(&coff.mf);
+ switch (coff.getNode(parent_ni)) {
+ else => unreachable,
+ .image_section, .pseudo_section => return section_offset,
+ .object_section => {},
+ }
+ }
+}
pub inline fn targetEndian(_: *const Coff) std.builtin.Endian {
return .little;
@@ -1021,11 +1093,16 @@ pub fn optionalHeaderField(
};
}
-pub fn dataDirectorySlice(coff: *Coff) *[DataDirectory.len]std.coff.ImageDataDirectory {
+pub fn dataDirectorySlice(
+ coff: *Coff,
+) *[std.coff.IMAGE.DIRECTORY_ENTRY.len]std.coff.ImageDataDirectory {
return @ptrCast(@alignCast(Node.known.data_directories.slice(&coff.mf)));
}
-pub fn dataDirectoryPtr(coff: *Coff, data_directory: DataDirectory) *std.coff.ImageDataDirectory {
- return &coff.dataDirectorySlice()[@intFromEnum(data_directory)];
+pub fn dataDirectoryPtr(
+ coff: *Coff,
+ entry: std.coff.IMAGE.DIRECTORY_ENTRY,
+) *std.coff.ImageDataDirectory {
+ return &coff.dataDirectorySlice()[@intFromEnum(entry)];
}
pub fn sectionTableSlice(coff: *Coff) []std.coff.SectionHeader {
@@ -1060,13 +1137,22 @@ fn initSymbolAssumeCapacity(coff: *Coff) !Symbol.Index {
}
fn getOrPutString(coff: *Coff, string: []const u8) !String {
+ try coff.ensureUnusedStringCapacity(string.len);
+ return coff.getOrPutStringAssumeCapacity(string);
+}
+fn getOrPutOptionalString(coff: *Coff, string: ?[]const u8) !String.Optional {
+ return (try coff.getOrPutString(string orelse return .none)).toOptional();
+}
+
+fn ensureUnusedStringCapacity(coff: *Coff, len: usize) !void {
const gpa = coff.base.comp.gpa;
- try coff.string_bytes.ensureUnusedCapacity(gpa, string.len + 1);
- const gop = try coff.strings.getOrPutContextAdapted(
- gpa,
+ try coff.strings.ensureUnusedCapacityContext(gpa, 1, .{ .bytes = &coff.string_bytes });
+ try coff.string_bytes.ensureUnusedCapacity(gpa, len + 1);
+}
+fn getOrPutStringAssumeCapacity(coff: *Coff, string: []const u8) String {
+ const gop = coff.strings.getOrPutAssumeCapacityAdapted(
string,
std.hash_map.StringIndexAdapter{ .bytes = &coff.string_bytes },
- .{ .bytes = &coff.string_bytes },
);
if (!gop.found_existing) {
gop.key_ptr.* = @intCast(coff.string_bytes.items.len);
@@ -1077,10 +1163,6 @@ fn getOrPutString(coff: *Coff, string: []const u8) !String {
return @enumFromInt(gop.key_ptr.*);
}
-fn getOrPutOptionalString(coff: *Coff, string: ?[]const u8) !String.Optional {
- return (try coff.getOrPutString(string orelse return .none)).toOptional();
-}
-
pub fn globalSymbol(coff: *Coff, name: []const u8, lib_name: ?[]const u8) !Symbol.Index {
const gpa = coff.base.comp.gpa;
try coff.symbol_table.ensureUnusedCapacity(gpa, 1);
@@ -1095,6 +1177,43 @@ pub fn globalSymbol(coff: *Coff, name: []const u8, lib_name: ?[]const u8) !Symbo
return sym_gop.value_ptr.*;
}
+fn navSection(
+ coff: *Coff,
+ zcu: *Zcu,
+ nav_fr: @FieldType(@FieldType(InternPool.Nav, "status"), "fully_resolved"),
+) !Symbol.Index {
+ const ip = &zcu.intern_pool;
+ const default: String, const attributes: ObjectSectionAttributes =
+ switch (ip.indexToKey(nav_fr.val)) {
+ else => .{ .@".rdata", .{ .read = true } },
+ .variable => |variable| if (variable.is_threadlocal and
+ coff.base.comp.config.any_non_single_threaded)
+ .{ .@".tls$", .{ .read = true, .write = true } }
+ else
+ .{ .@".data", .{ .read = true, .write = true } },
+ .@"extern" => |@"extern"| if (@"extern".is_threadlocal and
+ coff.base.comp.config.any_non_single_threaded)
+ .{ .@".tls$", .{ .read = true, .write = true } }
+ else if (ip.isFunctionType(@"extern".ty))
+ .{ .@".text", .{ .read = true, .execute = true } }
+ else if (@"extern".is_const)
+ .{ .@".rdata", .{ .read = true } }
+ else
+ .{ .@".data", .{ .read = true, .write = true } },
+ .func => .{ .@".text", .{ .read = true, .execute = true } },
+ };
+ return (try coff.objectSectionMapIndex(
+ (try coff.getOrPutOptionalString(nav_fr.@"linksection".toSlice(ip))).unwrap() orelse default,
+ switch (nav_fr.@"linksection") {
+ .none => coff.mf.flags.block_size,
+ else => switch (nav_fr.alignment) {
+ .none => Type.fromInterned(ip.typeOf(nav_fr.val)).abiAlignment(zcu),
+ else => |alignment| alignment,
+ }.toStdMem(),
+ },
+ attributes,
+ )).symbol(coff);
+}
fn navMapIndex(coff: *Coff, zcu: *Zcu, nav_index: InternPool.Nav.Index) !Node.NavMapIndex {
const gpa = zcu.gpa;
try coff.symbol_table.ensureUnusedCapacity(gpa, 1);
@@ -1171,7 +1290,7 @@ pub fn getVAddr(coff: *Coff, reloc_info: link.File.RelocInfo, target_si: Symbol.
fn addSection(coff: *Coff, name: []const u8, flags: std.coff.SectionHeader.Flags) !Symbol.Index {
const gpa = coff.base.comp.gpa;
try coff.nodes.ensureUnusedCapacity(gpa, 1);
- try coff.section_table.ensureUnusedCapacity(gpa, 1);
+ try coff.image_section_table.ensureUnusedCapacity(gpa, 1);
try coff.symbol_table.ensureUnusedCapacity(gpa, 1);
const coff_header = coff.headerPtr();
@@ -1189,13 +1308,13 @@ fn addSection(coff: *Coff, name: []const u8, flags: std.coff.SectionHeader.Flags
.bubbles_moved = false,
});
const si = coff.addSymbolAssumeCapacity();
- coff.section_table.appendAssumeCapacity(si);
- coff.nodes.appendAssumeCapacity(.{ .section = si });
+ coff.image_section_table.appendAssumeCapacity(si);
+ coff.nodes.appendAssumeCapacity(.{ .image_section = si });
const section_table = coff.sectionTableSlice();
const virtual_size = coff.optionalHeaderField(.section_alignment);
const rva: u32 = switch (section_index) {
0 => @intCast(Node.known.header.location(&coff.mf).resolve(&coff.mf)[1]),
- else => coff.section_table.items[section_index - 1].get(coff).rva +
+ else => coff.image_section_table.items[section_index - 1].get(coff).rva +
coff.targetLoad(§ion_table[section_index - 1].virtual_size),
};
{
@@ -1230,6 +1349,99 @@ fn addSection(coff: *Coff, name: []const u8, flags: std.coff.SectionHeader.Flags
return si;
}
+const ObjectSectionAttributes = packed struct {
+ read: bool = false,
+ write: bool = false,
+ execute: bool = false,
+ shared: bool = false,
+ nopage: bool = false,
+ nocache: bool = false,
+ discard: bool = false,
+ remove: bool = false,
+};
+fn pseudoSectionMapIndex(
+ coff: *Coff,
+ name: String,
+ alignment: std.mem.Alignment,
+ attributes: ObjectSectionAttributes,
+) !Node.PseudoSectionMapIndex {
+ const gpa = coff.base.comp.gpa;
+ const pseudo_section_gop = try coff.pseudo_section_table.getOrPut(gpa, name);
+ const psmi: Node.PseudoSectionMapIndex = @enumFromInt(pseudo_section_gop.index);
+ if (!pseudo_section_gop.found_existing) {
+ const parent: Symbol.Index = if (attributes.execute)
+ .text
+ else if (attributes.write)
+ .data
+ else
+ .rdata;
+ try coff.nodes.ensureUnusedCapacity(gpa, 1);
+ try coff.symbol_table.ensureUnusedCapacity(gpa, 1);
+ const ni = try coff.mf.addLastChildNode(gpa, parent.node(coff), .{ .alignment = alignment });
+ const si = coff.addSymbolAssumeCapacity();
+ pseudo_section_gop.value_ptr.* = si;
+ const sym = si.get(coff);
+ sym.ni = ni;
+ sym.rva = coff.computeNodeRva(ni);
+ sym.section_number = parent.get(coff).section_number;
+ assert(sym.loc_relocs == .none);
+ sym.loc_relocs = @enumFromInt(coff.relocs.items.len);
+ coff.nodes.appendAssumeCapacity(.{ .pseudo_section = psmi });
+ }
+ return psmi;
+}
+fn objectSectionMapIndex(
+ coff: *Coff,
+ name: String,
+ alignment: std.mem.Alignment,
+ attributes: ObjectSectionAttributes,
+) !Node.ObjectSectionMapIndex {
+ const gpa = coff.base.comp.gpa;
+ const object_section_gop = try coff.object_section_table.getOrPut(gpa, name);
+ const osmi: Node.ObjectSectionMapIndex = @enumFromInt(object_section_gop.index);
+ if (!object_section_gop.found_existing) {
+ try coff.ensureUnusedStringCapacity(name.toSlice(coff).len);
+ const name_slice = name.toSlice(coff);
+ const parent = (try coff.pseudoSectionMapIndex(coff.getOrPutStringAssumeCapacity(
+ name_slice[0 .. std.mem.indexOfScalar(u8, name_slice, '$') orelse name_slice.len],
+ ), alignment, attributes)).symbol(coff);
+ try coff.nodes.ensureUnusedCapacity(gpa, 1);
+ try coff.symbol_table.ensureUnusedCapacity(gpa, 1);
+ const parent_ni = parent.node(coff);
+ var prev_ni: MappedFile.Node.Index = .none;
+ var next_it = parent_ni.children(&coff.mf);
+ while (next_it.next()) |next_ni| switch (std.mem.order(
+ u8,
+ name_slice,
+ coff.getNode(next_ni).object_section.name(coff).toSlice(coff),
+ )) {
+ .lt => break,
+ .eq => unreachable,
+ .gt => prev_ni = next_ni,
+ };
+ const ni = switch (prev_ni) {
+ .none => try coff.mf.addFirstChildNode(gpa, parent_ni, .{
+ .alignment = alignment,
+ .fixed = true,
+ }),
+ else => try coff.mf.addNodeAfter(gpa, prev_ni, .{
+ .alignment = alignment,
+ .fixed = true,
+ }),
+ };
+ const si = coff.addSymbolAssumeCapacity();
+ object_section_gop.value_ptr.* = si;
+ const sym = si.get(coff);
+ sym.ni = ni;
+ sym.rva = coff.computeNodeRva(ni);
+ sym.section_number = parent.get(coff).section_number;
+ assert(sym.loc_relocs == .none);
+ sym.loc_relocs = @enumFromInt(coff.relocs.items.len);
+ coff.nodes.appendAssumeCapacity(.{ .object_section = osmi });
+ }
+ return osmi;
+}
+
pub fn addReloc(
coff: *Coff,
loc_si: Symbol.Index,
@@ -1279,53 +1491,73 @@ fn updateNavInner(coff: *Coff, pt: Zcu.PerThread, nav_index: InternPool.Nav.Inde
const nav = ip.getNav(nav_index);
const nav_val = nav.status.fully_resolved.val;
- const nav_init, const is_threadlocal = switch (ip.indexToKey(nav_val)) {
- else => .{ nav_val, false },
- .variable => |variable| .{ variable.init, variable.is_threadlocal },
- .@"extern" => return,
- .func => .{ .none, false },
+ const nav_init = switch (ip.indexToKey(nav_val)) {
+ else => nav_val,
+ .variable => |variable| variable.init,
+ .@"extern", .func => .none,
};
if (nav_init == .none or !Type.fromInterned(ip.typeOf(nav_init)).hasRuntimeBits(zcu)) return;
const nmi = try coff.navMapIndex(zcu, nav_index);
const si = nmi.symbol(coff);
const ni = ni: {
- const sym = si.get(coff);
- switch (sym.ni) {
+ switch (si.get(coff).ni) {
.none => {
+ const sec_si = try coff.navSection(zcu, nav.status.fully_resolved);
try coff.nodes.ensureUnusedCapacity(gpa, 1);
- _ = is_threadlocal;
- const ni = try coff.mf.addLastChildNode(gpa, Symbol.Index.data.node(coff), .{
+ const ni = try coff.mf.addLastChildNode(gpa, sec_si.node(coff), .{
.alignment = pt.navAlignment(nav_index).toStdMem(),
.moved = true,
});
coff.nodes.appendAssumeCapacity(.{ .nav = nmi });
+ const sym = si.get(coff);
sym.ni = ni;
- sym.section_number = Symbol.Index.data.get(coff).section_number;
+ sym.section_number = sec_si.get(coff).section_number;
},
else => si.deleteLocationRelocs(coff),
}
+ const sym = si.get(coff);
assert(sym.loc_relocs == .none);
sym.loc_relocs = @enumFromInt(coff.relocs.items.len);
break :ni sym.ni;
};
- var nw: MappedFile.Node.Writer = undefined;
- ni.writer(&coff.mf, gpa, &nw);
- defer nw.deinit();
- codegen.generateSymbol(
- &coff.base,
- pt,
- zcu.navSrcLoc(nav_index),
- .fromInterned(nav_init),
- &nw.interface,
- .{ .atom_index = @intFromEnum(si) },
- ) catch |err| switch (err) {
- error.WriteFailed => return error.OutOfMemory,
- else => |e| return e,
- };
- si.get(coff).size = @intCast(nw.interface.end);
- si.applyLocationRelocs(coff);
+ {
+ var nw: MappedFile.Node.Writer = undefined;
+ ni.writer(&coff.mf, gpa, &nw);
+ defer nw.deinit();
+ codegen.generateSymbol(
+ &coff.base,
+ pt,
+ zcu.navSrcLoc(nav_index),
+ .fromInterned(nav_init),
+ &nw.interface,
+ .{ .atom_index = @intFromEnum(si) },
+ ) catch |err| switch (err) {
+ error.WriteFailed => return error.OutOfMemory,
+ else => |e| return e,
+ };
+ si.get(coff).size = @intCast(nw.interface.end);
+ si.applyLocationRelocs(coff);
+ }
+
+ if (nav.status.fully_resolved.@"linksection".unwrap()) |_| {
+ try ni.resize(&coff.mf, gpa, si.get(coff).size);
+ var parent_ni = ni;
+ while (true) {
+ parent_ni = parent_ni.parent(&coff.mf);
+ switch (coff.getNode(parent_ni)) {
+ else => unreachable,
+ .image_section, .pseudo_section => break,
+ .object_section => {
+ var child_it = parent_ni.reverseChildren(&coff.mf);
+ const last_offset, const last_size =
+ child_it.next().?.location(&coff.mf).resolve(&coff.mf);
+ try parent_ni.resize(&coff.mf, gpa, last_offset + last_size);
+ },
+ }
+ }
+ }
}
pub fn lowerUav(
@@ -1394,13 +1626,13 @@ fn updateFuncInner(
const si = nmi.symbol(coff);
log.debug("updateFunc({f}) = {d}", .{ nav.fqn.fmt(ip), si });
const ni = ni: {
- const sym = si.get(coff);
- switch (sym.ni) {
+ switch (si.get(coff).ni) {
.none => {
+ const sec_si = try coff.navSection(zcu, nav.status.fully_resolved);
try coff.nodes.ensureUnusedCapacity(gpa, 1);
const mod = zcu.navFileScope(func.owner_nav).mod.?;
const target = &mod.resolved_target.result;
- const ni = try coff.mf.addLastChildNode(gpa, Symbol.Index.text.node(coff), .{
+ const ni = try coff.mf.addLastChildNode(gpa, sec_si.node(coff), .{
.alignment = switch (nav.status.fully_resolved.alignment) {
.none => switch (mod.optimize_mode) {
.Debug,
@@ -1414,11 +1646,13 @@ fn updateFuncInner(
.moved = true,
});
coff.nodes.appendAssumeCapacity(.{ .nav = nmi });
+ const sym = si.get(coff);
sym.ni = ni;
- sym.section_number = Symbol.Index.text.get(coff).section_number;
+ sym.section_number = sec_si.get(coff).section_number;
},
else => si.deleteLocationRelocs(coff),
}
+ const sym = si.get(coff);
assert(sym.loc_relocs == .none);
sym.loc_relocs = @enumFromInt(coff.relocs.items.len);
break :ni sym.ni;
@@ -1492,11 +1726,8 @@ pub fn idle(coff: *Coff, tid: Zcu.PerThread.Id) !bool {
const comp = coff.base.comp;
task: {
while (coff.pending_uavs.pop()) |pending_uav| {
- const sub_prog_node = coff.idleProgNode(
- tid,
- comp.link_const_prog_node,
- .{ .uav = pending_uav.key },
- );
+ const sub_prog_node =
+ coff.idleProgNode(tid, comp.link_const_prog_node, .{ .uav = pending_uav.key });
defer sub_prog_node.end();
coff.flushUav(
.{ .zcu = coff.base.comp.zcu.?, .tid = tid },
@@ -1561,7 +1792,8 @@ pub fn idle(coff: *Coff, tid: Zcu.PerThread.Id) !bool {
const clean_moved = ni.cleanMoved(&coff.mf);
const clean_resized = ni.cleanResized(&coff.mf);
if (clean_moved or clean_resized) {
- const sub_prog_node = coff.idleProgNode(tid, coff.mf.update_prog_node, coff.getNode(ni));
+ const sub_prog_node =
+ coff.idleProgNode(tid, coff.mf.update_prog_node, coff.getNode(ni));
defer sub_prog_node.end();
if (clean_moved) try coff.flushMoved(ni);
if (clean_resized) try coff.flushResized(ni);
@@ -1584,7 +1816,9 @@ fn idleProgNode(
var name: [std.Progress.Node.max_name_len]u8 = undefined;
return prog_node.start(name: switch (node) {
else => |tag| @tagName(tag),
- .section => |si| std.mem.sliceTo(&si.get(coff).section_number.header(coff).name, 0),
+ .image_section => |si| std.mem.sliceTo(&si.get(coff).section_number.header(coff).name, 0),
+ inline .pseudo_section, .object_section => |smi| smi.name(coff).toSlice(coff),
+ .global => |gmi| gmi.globalName(coff).name.toSlice(coff),
.nav => |nmi| {
const ip = &coff.base.comp.zcu.?.intern_pool;
break :name ip.getNav(nmi.navIndex(coff)).fqn.toSlice(ip);
@@ -1611,23 +1845,30 @@ fn flushUav(
const uav_val = umi.uavValue(coff);
const si = umi.symbol(coff);
const ni = ni: {
- const sym = si.get(coff);
- switch (sym.ni) {
+ switch (si.get(coff).ni) {
.none => {
+ const sec_si = (try coff.objectSectionMapIndex(
+ .@".rdata",
+ coff.mf.flags.block_size,
+ .{ .read = true },
+ )).symbol(coff);
try coff.nodes.ensureUnusedCapacity(gpa, 1);
- const ni = try coff.mf.addLastChildNode(gpa, Symbol.Index.data.node(coff), .{
+ const sym = si.get(coff);
+ const ni = try coff.mf.addLastChildNode(gpa, sec_si.node(coff), .{
.alignment = uav_align.toStdMem(),
.moved = true,
});
coff.nodes.appendAssumeCapacity(.{ .uav = umi });
sym.ni = ni;
- sym.section_number = Symbol.Index.data.get(coff).section_number;
+ sym.section_number = sec_si.get(coff).section_number;
},
else => {
- if (sym.ni.alignment(&coff.mf).order(uav_align.toStdMem()).compare(.gte)) return;
+ if (si.get(coff).ni.alignment(&coff.mf).order(uav_align.toStdMem()).compare(.gte))
+ return;
si.deleteLocationRelocs(coff);
},
}
+ const sym = si.get(coff);
assert(sym.loc_relocs == .none);
sym.loc_relocs = @enumFromInt(coff.relocs.items.len);
break :ni sym.ni;
@@ -1684,7 +1925,7 @@ fn flushGlobal(coff: *Coff, pt: Zcu.PerThread, gmi: Node.GlobalMapIndex) !void {
);
const import_hint_name_table_len =
import_hint_name_align.forward(lib_name.len + ".dll".len + 1);
- const idata_section_ni = Symbol.Index.idata.node(coff);
+ const idata_section_ni = coff.import_table.ni.parent(&coff.mf);
const import_lookup_table_ni = try coff.mf.addLastChildNode(gpa, idata_section_ni, .{
.size = addr_size * 2,
.alignment = addr_align,
@@ -1701,7 +1942,8 @@ fn flushGlobal(coff: *Coff, pt: Zcu.PerThread, gmi: Node.GlobalMapIndex) !void {
import_address_table_sym.ni = import_address_table_ni;
assert(import_address_table_sym.loc_relocs == .none);
import_address_table_sym.loc_relocs = @enumFromInt(coff.relocs.items.len);
- import_address_table_sym.section_number = Symbol.Index.idata.get(coff).section_number;
+ import_address_table_sym.section_number =
+ coff.getNode(idata_section_ni).object_section.symbol(coff).get(coff).section_number;
}
const import_hint_name_table_ni = try coff.mf.addLastChildNode(gpa, idata_section_ni, .{
.size = import_hint_name_table_len,
@@ -1873,12 +2115,12 @@ fn flushMoved(coff: *Coff, ni: MappedFile.Node.Index) !void {
.data_directories,
.section_table,
=> unreachable,
- .section => |si| return coff.targetStore(
+ .image_section => |si| return coff.targetStore(
&si.get(coff).section_number.header(coff).pointer_to_raw_data,
@intCast(ni.fileLocation(&coff.mf, false).offset),
),
.import_directory_table => coff.targetStore(
- &coff.dataDirectoryPtr(.import_table).virtual_address,
+ &coff.dataDirectoryPtr(.IMPORT).virtual_address,
coff.computeNodeRva(ni),
),
.import_lookup_table => |import_index| coff.targetStore(
@@ -1939,7 +2181,9 @@ fn flushMoved(coff: *Coff, ni: MappedFile.Node.Index) !void {
import_hint_name_index += 2;
}
},
- inline .global,
+ inline .pseudo_section,
+ .object_section,
+ .global,
.nav,
.uav,
.lazy_code,
@@ -1960,7 +2204,7 @@ fn flushResized(coff: *Coff, ni: MappedFile.Node.Index) !void {
@intCast(size),
),
}
- if (size > coff.section_table.items[0].get(coff).rva) try coff.virtualSlide(
+ if (size > coff.image_section_table.items[0].get(coff).rva) try coff.virtualSlide(
0,
std.mem.alignForward(
u32,
@@ -1971,7 +2215,7 @@ fn flushResized(coff: *Coff, ni: MappedFile.Node.Index) !void {
},
.signature, .coff_header, .optional_header, .data_directories => unreachable,
.section_table => {},
- .section => |si| {
+ .image_section => |si| {
const sym = si.get(coff);
const section_index = sym.section_number.toIndex();
const section = &coff.sectionTableSlice()[section_index];
@@ -1987,24 +2231,20 @@ fn flushResized(coff: *Coff, ni: MappedFile.Node.Index) !void {
}
},
.import_directory_table => coff.targetStore(
- &coff.dataDirectoryPtr(.import_table).size,
+ &coff.dataDirectoryPtr(.IMPORT).size,
@intCast(size),
),
- .import_lookup_table,
- .import_address_table,
- .import_hint_name_table,
- .global,
- .nav,
- .uav,
- .lazy_code,
- .lazy_const_data,
- => {},
+ .import_lookup_table, .import_address_table, .import_hint_name_table => {},
+ inline .pseudo_section,
+ .object_section,
+ => |smi| smi.symbol(coff).get(coff).size = @intCast(size),
+ .global, .nav, .uav, .lazy_code, .lazy_const_data => {},
}
}
fn virtualSlide(coff: *Coff, start_section_index: usize, start_rva: u32) !void {
var rva = start_rva;
for (
- coff.section_table.items[start_section_index..],
+ coff.image_section_table.items[start_section_index..],
coff.sectionTableSlice()[start_section_index..],
) |section_si, *section| {
const section_sym = section_si.get(coff);
@@ -2078,8 +2318,12 @@ fn updateExportsInner(
export_sym.section_number = exported_sym.section_number;
export_si.applyTargetRelocs(coff);
if (@"export".opts.name.eqlSlice("wWinMainCRTStartup", ip)) {
- coff.entry_hack = exported_si;
coff.optionalHeaderStandardPtr().address_of_entry_point = exported_sym.rva;
+ } else if (@"export".opts.name.eqlSlice("_tls_used", ip)) {
+ const tls_directory = coff.dataDirectoryPtr(.TLS);
+ tls_directory.* = .{ .virtual_address = exported_sym.rva, .size = exported_sym.size };
+ if (coff.targetEndian() != native_endian)
+ std.mem.byteSwapAllFields(std.coff.ImageDataDirectory, tls_directory);
}
}
}
@@ -2108,7 +2352,7 @@ pub fn printNode(
try w.writeAll(@tagName(node));
switch (node) {
else => {},
- .section => |si| try w.print("({s})", .{
+ .image_section => |si| try w.print("({s})", .{
std.mem.sliceTo(&si.get(coff).section_number.header(coff).name, 0),
}),
.import_lookup_table,
@@ -2117,6 +2361,9 @@ pub fn printNode(
=> |import_index| try w.print("({s})", .{
std.mem.sliceTo(import_index.get(coff).import_hint_name_table_ni.sliceConst(&coff.mf), 0),
}),
+ inline .pseudo_section, .object_section => |smi| try w.print("({s})", .{
+ smi.name(coff).toSlice(coff),
+ }),
.global => |gmi| {
const gn = gmi.globalName(coff);
try w.writeByte('(');
src/link/Elf2.zig
@@ -546,7 +546,7 @@ fn initHeaders(
break :phndx phnum;
} else undefined;
- const expected_nodes_len = 15;
+ const expected_nodes_len = 5 + phnum * 2;
try elf.nodes.ensureTotalCapacity(gpa, expected_nodes_len);
try elf.phdrs.resize(gpa, phnum);
elf.nodes.appendAssumeCapacity(.file);
@@ -808,25 +808,6 @@ fn initHeaders(
Symbol.Index.shstrtab.node(elf).slice(&elf.mf)[0] = 0;
Symbol.Index.strtab.node(elf).slice(&elf.mf)[0] = 0;
- if (maybe_interp) |interp| {
- try elf.nodes.ensureUnusedCapacity(gpa, 1);
- const interp_ni = try elf.mf.addLastChildNode(gpa, Node.Known.rodata, .{
- .size = interp.len + 1,
- .moved = true,
- .resized = true,
- });
- elf.nodes.appendAssumeCapacity(.{ .segment = interp_phndx });
- elf.phdrs.items[interp_phndx] = interp_ni;
-
- const sec_interp_si = try elf.addSection(interp_ni, .{
- .name = ".interp",
- .size = @intCast(interp.len + 1),
- .flags = .{ .ALLOC = true },
- });
- const sec_interp = sec_interp_si.node(elf).slice(&elf.mf);
- @memcpy(sec_interp[0..interp.len], interp);
- sec_interp[interp.len] = 0;
- }
assert(try elf.addSection(Node.Known.rodata, .{
.name = ".rodata",
.flags = .{ .ALLOC = true },
@@ -857,6 +838,25 @@ fn initHeaders(
.addralign = elf.mf.flags.block_size,
}) == .tdata);
}
+ if (maybe_interp) |interp| {
+ try elf.nodes.ensureUnusedCapacity(gpa, 1);
+ const interp_ni = try elf.mf.addLastChildNode(gpa, Node.Known.rodata, .{
+ .size = interp.len + 1,
+ .moved = true,
+ .resized = true,
+ });
+ elf.nodes.appendAssumeCapacity(.{ .segment = interp_phndx });
+ elf.phdrs.items[interp_phndx] = interp_ni;
+
+ const sec_interp_si = try elf.addSection(interp_ni, .{
+ .name = ".interp",
+ .size = @intCast(interp.len + 1),
+ .flags = .{ .ALLOC = true },
+ });
+ const sec_interp = sec_interp_si.node(elf).slice(&elf.mf);
+ @memcpy(sec_interp[0..interp.len], interp);
+ sec_interp[interp.len] = 0;
+ }
assert(elf.nodes.len == expected_nodes_len);
}
@@ -1072,6 +1072,32 @@ fn navType(
},
};
}
+fn navSection(
+ elf: *Elf,
+ ip: *const InternPool,
+ nav_fr: @FieldType(@FieldType(InternPool.Nav, "status"), "fully_resolved"),
+) Symbol.Index {
+ if (nav_fr.@"linksection".toSlice(ip)) |@"linksection"| {
+ if (std.mem.eql(u8, @"linksection", ".rodata") or
+ std.mem.startsWith(u8, @"linksection", ".rodata.")) return .rodata;
+ if (std.mem.eql(u8, @"linksection", ".text") or
+ std.mem.startsWith(u8, @"linksection", ".text.")) return .text;
+ if (std.mem.eql(u8, @"linksection", ".data") or
+ std.mem.startsWith(u8, @"linksection", ".data.")) return .data;
+ if (std.mem.eql(u8, @"linksection", ".tdata") or
+ std.mem.startsWith(u8, @"linksection", ".tdata.")) return .tdata;
+ }
+ return switch (navType(
+ ip,
+ .{ .fully_resolved = nav_fr },
+ elf.base.comp.config.any_non_single_threaded,
+ )) {
+ else => unreachable,
+ .FUNC => .text,
+ .OBJECT => .data,
+ .TLS => .tdata,
+ };
+}
fn navMapIndex(elf: *Elf, zcu: *Zcu, nav_index: InternPool.Nav.Index) !Node.NavMapIndex {
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
@@ -1312,18 +1338,16 @@ pub fn updateNav(elf: *Elf, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index)
};
}
fn updateNavInner(elf: *Elf, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) !void {
- const comp = elf.base.comp;
const zcu = pt.zcu;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
const nav = ip.getNav(nav_index);
const nav_val = nav.status.fully_resolved.val;
- const nav_init, const is_threadlocal = switch (ip.indexToKey(nav_val)) {
- else => .{ nav_val, false },
- .variable => |variable| .{ variable.init, variable.is_threadlocal },
- .@"extern" => return,
- .func => .{ .none, false },
+ const nav_init = switch (ip.indexToKey(nav_val)) {
+ else => nav_val,
+ .variable => |variable| variable.init,
+ .@"extern", .func => .none,
};
if (nav_init == .none or !Type.fromInterned(ip.typeOf(nav_init)).hasRuntimeBits(zcu)) return;
@@ -1334,8 +1358,7 @@ fn updateNavInner(elf: *Elf, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index)
switch (sym.ni) {
.none => {
try elf.nodes.ensureUnusedCapacity(gpa, 1);
- const sec_si: Symbol.Index =
- if (is_threadlocal and comp.config.any_non_single_threaded) .tdata else .data;
+ const sec_si = elf.navSection(ip, nav.status.fully_resolved);
const ni = try elf.mf.addLastChildNode(gpa, sec_si.node(elf), .{
.alignment = pt.navAlignment(nav_index).toStdMem(),
.moved = true,
@@ -1452,9 +1475,10 @@ fn updateFuncInner(
switch (sym.ni) {
.none => {
try elf.nodes.ensureUnusedCapacity(gpa, 1);
+ const sec_si = elf.navSection(ip, nav.status.fully_resolved);
const mod = zcu.navFileScope(func.owner_nav).mod.?;
const target = &mod.resolved_target.result;
- const ni = try elf.mf.addLastChildNode(gpa, Symbol.Index.text.node(elf), .{
+ const ni = try elf.mf.addLastChildNode(gpa, sec_si.node(elf), .{
.alignment = switch (nav.status.fully_resolved.alignment) {
.none => switch (mod.optimize_mode) {
.Debug,
@@ -1471,7 +1495,7 @@ fn updateFuncInner(
sym.ni = ni;
switch (elf.symPtr(si)) {
inline else => |sym_ptr, class| sym_ptr.shndx =
- @field(elf.symPtr(.text), @tagName(class)).shndx,
+ @field(elf.symPtr(sec_si), @tagName(class)).shndx,
}
},
else => si.deleteLocationRelocs(elf),
src/link/MappedFile.zig
@@ -14,6 +14,8 @@ updates: std.ArrayList(Node.Index),
update_prog_node: std.Progress.Node,
writers: std.SinglyLinkedList,
+pub const growth_factor = 4;
+
pub const Error = std.posix.MMapError ||
std.posix.MRemapError ||
std.fs.File.SetEndPosError ||
@@ -64,6 +66,7 @@ pub fn init(file: std.fs.File, gpa: std.mem.Allocator) !MappedFile {
assert(try mf.addNode(gpa, .{
.add_node = .{
.size = size,
+ .alignment = mf.flags.block_size,
.fixed = true,
},
}) == Node.Index.root);
@@ -153,20 +156,24 @@ pub const Node = extern struct {
return ni.get(mf).parent;
}
- pub const ChildIterator = struct {
- mf: *const MappedFile,
- ni: Node.Index,
-
- pub fn next(it: *ChildIterator) ?Node.Index {
- const ni = it.ni;
- if (ni == .none) return null;
- it.ni = ni.get(it.mf).next;
- return ni;
- }
- };
- pub fn children(ni: Node.Index, mf: *const MappedFile) ChildIterator {
+ pub fn ChildIterator(comptime direction: enum { prev, next }) type {
+ return struct {
+ mf: *const MappedFile,
+ ni: Node.Index,
+ pub fn next(it: *@This()) ?Node.Index {
+ const ni = it.ni;
+ if (ni == .none) return null;
+ it.ni = @field(ni.get(it.mf), @tagName(direction));
+ return ni;
+ }
+ };
+ }
+ pub fn children(ni: Node.Index, mf: *const MappedFile) ChildIterator(.next) {
return .{ .mf = mf, .ni = ni.get(mf).first };
}
+ pub fn reverseChildren(ni: Node.Index, mf: *const MappedFile) ChildIterator(.prev) {
+ return .{ .mf = mf, .ni = ni.get(mf).last };
+ }
pub fn childrenMoved(ni: Node.Index, gpa: std.mem.Allocator, mf: *MappedFile) !void {
var child_ni = ni.get(mf).last;
@@ -274,7 +281,8 @@ pub const Node = extern struct {
if (set_has_content) parent_node.flags.has_content = true;
if (parent_ni == .none) break;
parent_ni = parent_node.parent;
- offset += parent_ni.location(mf).resolve(mf)[0];
+ const parent_offset, _ = parent_ni.location(mf).resolve(mf);
+ offset += parent_offset;
}
return .{ .offset = offset, .size = size };
}
@@ -428,7 +436,7 @@ pub const Node = extern struct {
const total_capacity = interface.end + unused_capacity;
if (interface.buffer.len >= total_capacity) return;
const w: *Writer = @fieldParentPtr("interface", interface);
- w.ni.resize(w.mf, w.gpa, total_capacity +| total_capacity / 2) catch |err| {
+ w.ni.resize(w.mf, w.gpa, total_capacity +| total_capacity / growth_factor) catch |err| {
w.err = err;
return error.WriteFailed;
};
@@ -487,7 +495,8 @@ fn addNode(mf: *MappedFile, gpa: std.mem.Allocator, opts: struct {
free_node.flags.moved = false;
free_node.flags.resized = false;
}
- if (offset > opts.parent.location(mf).resolve(mf)[1]) try opts.parent.resize(mf, gpa, offset);
+ _, const parent_size = opts.parent.location(mf).resolve(mf);
+ if (offset > parent_size) try opts.parent.resize(mf, gpa, offset);
try free_ni.resize(mf, gpa, opts.add_node.size);
}
if (opts.add_node.moved) free_ni.movedAssumeCapacity(mf);
@@ -522,6 +531,27 @@ pub fn addOnlyChildNode(
return ni;
}
+pub fn addFirstChildNode(
+ mf: *MappedFile,
+ gpa: std.mem.Allocator,
+ parent_ni: Node.Index,
+ opts: AddNodeOptions,
+) !Node.Index {
+ try mf.nodes.ensureUnusedCapacity(gpa, 1);
+ const parent = parent_ni.get(mf);
+ const ni = try mf.addNode(gpa, .{
+ .parent = parent_ni,
+ .next = parent.first,
+ .add_node = opts,
+ });
+ switch (parent.first) {
+ .none => parent.last = ni,
+ else => |first_ni| first_ni.get(mf).prev = ni,
+ }
+ parent.first = ni;
+ return ni;
+}
+
pub fn addLastChildNode(
mf: *MappedFile,
gpa: std.mem.Allocator,
@@ -577,7 +607,7 @@ pub fn addNodeAfter(
fn resizeNode(mf: *MappedFile, gpa: std.mem.Allocator, ni: Node.Index, requested_size: u64) !void {
const node = ni.get(mf);
- var old_offset, const old_size = node.location().resolve(mf);
+ const old_offset, const old_size = node.location().resolve(mf);
const new_size = node.flags.alignment.forward(@intCast(requested_size));
// Resize the entire file
if (ni == Node.Index.root) {
@@ -587,169 +617,238 @@ fn resizeNode(mf: *MappedFile, gpa: std.mem.Allocator, ni: Node.Index, requested
ni.setLocationAssumeCapacity(mf, old_offset, new_size);
return;
}
- while (true) {
- const parent = node.parent.get(mf);
- _, const old_parent_size = parent.location().resolve(mf);
- const trailing_end = switch (node.next) {
- .none => parent.location().resolve(mf)[1],
- else => |next_ni| next_ni.location(mf).resolve(mf)[0],
- };
- assert(old_offset + old_size <= trailing_end);
- // Expand the node into available trailing free space
- if (old_offset + new_size <= trailing_end) {
- try mf.ensureCapacityForSetLocation(gpa);
- ni.setLocationAssumeCapacity(mf, old_offset, new_size);
- return;
- }
- // Ask the filesystem driver to insert an extent into the file without copying any data
- if (is_linux and !mf.flags.fallocate_insert_range_unsupported and
- node.flags.alignment.order(mf.flags.block_size).compare(.gte))
- insert_range: {
- const last_offset, const last_size = parent.last.location(mf).resolve(mf);
- const last_end = last_offset + last_size;
- assert(last_end <= old_parent_size);
- const range_size =
- node.flags.alignment.forward(@intCast(requested_size +| requested_size / 2)) - old_size;
- const new_parent_size = last_end + range_size;
- if (new_parent_size > old_parent_size) {
- try mf.resizeNode(gpa, node.parent, new_parent_size +| new_parent_size / 2);
- continue;
- }
- const range_file_offset = ni.fileLocation(mf, false).offset + old_size;
- while (true) switch (linux.E.init(linux.fallocate(
+ const parent = node.parent.get(mf);
+ _, var old_parent_size = parent.location().resolve(mf);
+ const trailing_end = trailing_end: switch (node.next) {
+ .none => old_parent_size,
+ else => |next_ni| {
+ const next_offset, _ = next_ni.location(mf).resolve(mf);
+ break :trailing_end next_offset;
+ },
+ };
+ assert(old_offset + old_size <= trailing_end);
+ if (old_offset + new_size <= trailing_end) {
+ // Expand the node into trailing free space
+ try mf.ensureCapacityForSetLocation(gpa);
+ ni.setLocationAssumeCapacity(mf, old_offset, new_size);
+ return;
+ }
+ if (is_linux and !mf.flags.fallocate_insert_range_unsupported and
+ node.flags.alignment.order(mf.flags.block_size).compare(.gte))
+ insert_range: {
+ // Ask the filesystem driver to insert extents into the file without copying any data
+ const last_offset, const last_size = parent.last.location(mf).resolve(mf);
+ const last_end = last_offset + last_size;
+ assert(last_end <= old_parent_size);
+ const range_file_offset = ni.fileLocation(mf, false).offset + old_size;
+ const range_size = node.flags.alignment.forward(
+ @intCast(requested_size +| requested_size / growth_factor),
+ ) - old_size;
+ _, const file_size = Node.Index.root.location(mf).resolve(mf);
+ while (true) switch (linux.E.init(switch (std.math.order(range_file_offset, file_size)) {
+ .lt => linux.fallocate(
mf.file.handle,
linux.FALLOC.FL_INSERT_RANGE,
@intCast(range_file_offset),
@intCast(range_size),
- ))) {
- .SUCCESS => {
- var enclosing_ni = ni;
- while (true) {
- try mf.ensureCapacityForSetLocation(gpa);
- const enclosing = enclosing_ni.get(mf);
- const enclosing_offset, const old_enclosing_size =
- enclosing.location().resolve(mf);
- const new_enclosing_size = old_enclosing_size + range_size;
- enclosing_ni.setLocationAssumeCapacity(mf, enclosing_offset, new_enclosing_size);
- if (enclosing_ni == Node.Index.root) {
- assert(enclosing_offset == 0);
- try mf.ensureTotalCapacity(@intCast(new_enclosing_size));
- break;
- }
- var after_ni = enclosing.next;
- while (after_ni != .none) {
- try mf.ensureCapacityForSetLocation(gpa);
- const after = after_ni.get(mf);
- const after_offset, const after_size = after.location().resolve(mf);
- after_ni.setLocationAssumeCapacity(
- mf,
- range_size + after_offset,
- after_size,
- );
- after_ni = after.next;
- }
- enclosing_ni = enclosing.parent;
- }
- return;
- },
- .INTR => continue,
- .BADF, .FBIG, .INVAL => unreachable,
- .IO => return error.InputOutput,
- .NODEV => return error.NotFile,
- .NOSPC => return error.NoSpaceLeft,
- .NOSYS, .OPNOTSUPP => {
- mf.flags.fallocate_insert_range_unsupported = true;
- break :insert_range;
- },
- .PERM => return error.PermissionDenied,
- .SPIPE => return error.Unseekable,
- .TXTBSY => return error.FileBusy,
- else => |e| return std.posix.unexpectedErrno(e),
- };
- }
- switch (node.next) {
- .none => {
- // As this is the last node, we simply need more space in the parent
- const new_parent_size = old_offset + new_size;
- try mf.resizeNode(gpa, node.parent, new_parent_size +| new_parent_size / 2);
- },
- else => |*next_ni_ptr| switch (node.flags.fixed) {
- false => {
- // Make space at the end of the parent for this floating node
- const last = parent.last.get(mf);
- const last_offset, const last_size = last.location().resolve(mf);
- const new_offset = node.flags.alignment.forward(@intCast(last_offset + last_size));
- const new_parent_size = new_offset + new_size;
- if (new_parent_size > old_parent_size) {
- try mf.resizeNode(
- gpa,
- node.parent,
- new_parent_size +| new_parent_size / 2,
- );
- continue;
- }
- const next_ni = next_ni_ptr.*;
- next_ni.get(mf).prev = node.prev;
- switch (node.prev) {
- .none => parent.first = next_ni,
- else => |prev_ni| prev_ni.get(mf).next = next_ni,
+ ),
+ .eq => linux.ftruncate(mf.file.handle, @intCast(range_file_offset + range_size)),
+ .gt => unreachable,
+ })) {
+ .SUCCESS => {
+ var enclosing_ni = ni;
+ while (true) {
+ try mf.ensureCapacityForSetLocation(gpa);
+ const enclosing = enclosing_ni.get(mf);
+ const enclosing_offset, const old_enclosing_size =
+ enclosing.location().resolve(mf);
+ const new_enclosing_size = old_enclosing_size + range_size;
+ enclosing_ni.setLocationAssumeCapacity(mf, enclosing_offset, new_enclosing_size);
+ if (enclosing_ni == Node.Index.root) {
+ assert(enclosing_offset == 0);
+ try mf.ensureTotalCapacity(@intCast(new_enclosing_size));
+ break;
}
- last.next = ni;
- node.prev = parent.last;
- next_ni_ptr.* = .none;
- parent.last = ni;
- if (node.flags.has_content) {
- const parent_file_offset = node.parent.fileLocation(mf, false).offset;
- try mf.moveRange(
- parent_file_offset + old_offset,
- parent_file_offset + new_offset,
- old_size,
+ var after_ni = enclosing.next;
+ while (after_ni != .none) {
+ try mf.ensureCapacityForSetLocation(gpa);
+ const after = after_ni.get(mf);
+ const after_offset, const after_size = after.location().resolve(mf);
+ after_ni.setLocationAssumeCapacity(
+ mf,
+ range_size + after_offset,
+ after_size,
);
+ after_ni = after.next;
}
- old_offset = new_offset;
- },
- true => {
- // Move the next floating node to make space for this fixed node
- const next_ni = next_ni_ptr.*;
- const next = next_ni.get(mf);
- assert(!next.flags.fixed);
- const next_offset, const next_size = next.location().resolve(mf);
- const last = parent.last.get(mf);
- const last_offset, const last_size = last.location().resolve(mf);
- const new_offset = next.flags.alignment.forward(@intCast(
- @max(old_offset + new_size, last_offset + last_size),
+ enclosing_ni = enclosing.parent;
+ }
+ return;
+ },
+ .INTR => continue,
+ .BADF, .FBIG, .INVAL => unreachable,
+ .IO => return error.InputOutput,
+ .NODEV => return error.NotFile,
+ .NOSPC => return error.NoSpaceLeft,
+ .NOSYS, .OPNOTSUPP => {
+ mf.flags.fallocate_insert_range_unsupported = true;
+ break :insert_range;
+ },
+ .PERM => return error.PermissionDenied,
+ .SPIPE => return error.Unseekable,
+ .TXTBSY => return error.FileBusy,
+ else => |e| return std.posix.unexpectedErrno(e),
+ };
+ }
+ if (node.next == .none) {
+ // As this is the last node, we simply need more space in the parent
+ const new_parent_size = old_offset + new_size;
+ try mf.resizeNode(gpa, node.parent, new_parent_size +| new_parent_size / growth_factor);
+ try mf.ensureCapacityForSetLocation(gpa);
+ ni.setLocationAssumeCapacity(mf, old_offset, new_size);
+ return;
+ }
+ if (!node.flags.fixed) {
+ // Make space at the end of the parent for this floating node
+ const last = parent.last.get(mf);
+ const last_offset, const last_size = last.location().resolve(mf);
+ const new_offset = node.flags.alignment.forward(@intCast(last_offset + last_size));
+ const new_parent_size = new_offset + new_size;
+ if (new_parent_size > old_parent_size)
+ try mf.resizeNode(gpa, node.parent, new_parent_size +| new_parent_size / growth_factor);
+ try mf.ensureCapacityForSetLocation(gpa);
+ const next_ni = node.next;
+ next_ni.get(mf).prev = node.prev;
+ switch (node.prev) {
+ .none => parent.first = next_ni,
+ else => |prev_ni| prev_ni.get(mf).next = next_ni,
+ }
+ last.next = ni;
+ node.prev = parent.last;
+ node.next = .none;
+ parent.last = ni;
+ if (node.flags.has_content) {
+ const parent_file_offset = node.parent.fileLocation(mf, false).offset;
+ try mf.moveRange(
+ parent_file_offset + old_offset,
+ parent_file_offset + new_offset,
+ old_size,
+ );
+ }
+ ni.setLocationAssumeCapacity(mf, new_offset, new_size);
+ return;
+ }
+ // Search for the first floating node following this fixed node
+ var last_fixed_ni = ni;
+ var first_floating_ni = node.next;
+ var shift = new_size - old_size;
+ var direction: enum { forward, reverse } = .forward;
+ while (true) {
+ assert(last_fixed_ni != .none);
+ const last_fixed = last_fixed_ni.get(mf);
+ assert(last_fixed.flags.fixed);
+ const old_last_fixed_offset, const last_fixed_size = last_fixed.location().resolve(mf);
+ const new_last_fixed_offset = old_last_fixed_offset + shift;
+ make_space: switch (first_floating_ni) {
+ else => {
+ const first_floating = first_floating_ni.get(mf);
+ const old_first_floating_offset, const first_floating_size =
+ first_floating.location().resolve(mf);
+ assert(old_last_fixed_offset + last_fixed_size <= old_first_floating_offset);
+ if (new_last_fixed_offset + last_fixed_size <= old_first_floating_offset)
+ break :make_space;
+ assert(direction == .forward);
+ if (first_floating.flags.fixed) {
+ shift = first_floating.flags.alignment.forward(@intCast(
+ @max(shift, first_floating_size),
));
- const new_parent_size = new_offset + next_size;
- if (new_parent_size > old_parent_size) {
- try mf.resizeNode(
- gpa,
- node.parent,
- new_parent_size +| new_parent_size / 2,
- );
- continue;
- }
- try mf.ensureCapacityForSetLocation(gpa);
- next.prev = parent.last;
- parent.last = next_ni;
- last.next = next_ni;
- next_ni_ptr.* = next.next;
- switch (next.next) {
+ // Not enough space, try the next node
+ last_fixed_ni = first_floating_ni;
+ first_floating_ni = first_floating.next;
+ continue;
+ }
+ // Move the found floating node to make space for preceding fixed nodes
+ const last = parent.last.get(mf);
+ const last_offset, const last_size = last.location().resolve(mf);
+ const new_first_floating_offset = first_floating.flags.alignment.forward(
+ @intCast(@max(new_last_fixed_offset + last_fixed_size, last_offset + last_size)),
+ );
+ const new_parent_size = new_first_floating_offset + first_floating_size;
+ if (new_parent_size > old_parent_size) {
+ try mf.resizeNode(
+ gpa,
+ node.parent,
+ new_parent_size +| new_parent_size / growth_factor,
+ );
+ _, old_parent_size = parent.location().resolve(mf);
+ }
+ try mf.ensureCapacityForSetLocation(gpa);
+ if (parent.last != first_floating_ni) {
+ first_floating.prev = parent.last;
+ parent.last = first_floating_ni;
+ last.next = first_floating_ni;
+ last_fixed.next = first_floating.next;
+ switch (first_floating.next) {
.none => {},
- else => |next_next_ni| next_next_ni.get(mf).prev = ni,
+ else => |next_ni| next_ni.get(mf).prev = last_fixed_ni,
}
- next.next = .none;
- if (node.flags.has_content) {
- const parent_file_offset = node.parent.fileLocation(mf, false).offset;
- try mf.moveRange(
- parent_file_offset + next_offset,
- parent_file_offset + new_offset,
- next_size,
- );
- }
- next_ni.setLocationAssumeCapacity(mf, new_offset, next_size);
- },
+ first_floating.next = .none;
+ }
+ if (first_floating.flags.has_content) {
+ const parent_file_offset =
+ node.parent.fileLocation(mf, false).offset;
+ try mf.moveRange(
+ parent_file_offset + old_first_floating_offset,
+ parent_file_offset + new_first_floating_offset,
+ first_floating_size,
+ );
+ }
+ first_floating_ni.setLocationAssumeCapacity(
+ mf,
+ new_first_floating_offset,
+ first_floating_size,
+ );
+ // Continue the search after the just-moved floating node
+ first_floating_ni = last_fixed.next;
+ continue;
+ },
+ .none => {
+ assert(direction == .forward);
+ const new_parent_size = new_last_fixed_offset + last_fixed_size;
+ if (new_parent_size > old_parent_size) {
+ try mf.resizeNode(
+ gpa,
+ node.parent,
+ new_parent_size +| new_parent_size / growth_factor,
+ );
+ _, old_parent_size = parent.location().resolve(mf);
+ }
},
}
+ try mf.ensureCapacityForSetLocation(gpa);
+ if (last_fixed_ni == ni) {
+ // The original fixed node now has enough space
+ last_fixed_ni.setLocationAssumeCapacity(
+ mf,
+ old_last_fixed_offset,
+ last_fixed_size + shift,
+ );
+ return;
+ }
+ // Move a fixed node into trailing free space
+ if (last_fixed.flags.has_content) {
+ const parent_file_offset = node.parent.fileLocation(mf, false).offset;
+ try mf.moveRange(
+ parent_file_offset + old_last_fixed_offset,
+ parent_file_offset + new_last_fixed_offset,
+ last_fixed_size,
+ );
+ }
+ last_fixed_ni.setLocationAssumeCapacity(mf, new_last_fixed_offset, last_fixed_size);
+ // Retry the previous nodes now that there is enough space
+ first_floating_ni = last_fixed_ni;
+ last_fixed_ni = last_fixed.prev;
+ direction = .reverse;
}
}
@@ -843,7 +942,7 @@ fn ensureCapacityForSetLocation(mf: *MappedFile, gpa: std.mem.Allocator) !void {
pub fn ensureTotalCapacity(mf: *MappedFile, new_capacity: usize) !void {
if (mf.contents.len >= new_capacity) return;
- try mf.ensureTotalCapacityPrecise(new_capacity +| new_capacity / 2);
+ try mf.ensureTotalCapacityPrecise(new_capacity +| new_capacity / growth_factor);
}
pub fn ensureTotalCapacityPrecise(mf: *MappedFile, new_capacity: usize) !void {
src/Compilation.zig
@@ -1985,7 +1985,7 @@ pub fn create(gpa: Allocator, arena: Allocator, diag: *CreateDiagnostic, options
switch (target_util.zigBackend(target, use_llvm)) {
else => {},
.stage2_aarch64, .stage2_x86_64 => if (target.ofmt == .coff) {
- break :s if (is_exe_or_dyn_lib) .dyn_lib else .zcu;
+ break :s if (is_exe_or_dyn_lib and build_options.have_llvm) .dyn_lib else .zcu;
},
}
if (options.config.use_new_linker) break :s .zcu;
src/target.zig
@@ -389,10 +389,7 @@ pub fn canBuildLibUbsanRt(target: *const std.Target) enum { no, yes, llvm_only,
}
return switch (zigBackend(target, false)) {
.stage2_wasm => .llvm_lld_only,
- .stage2_x86_64 => switch (target.ofmt) {
- .elf, .macho => .yes,
- else => .llvm_only,
- },
+ .stage2_x86_64 => .yes,
else => .llvm_only,
};
}
@@ -776,10 +773,9 @@ pub fn supportsTailCall(target: *const std.Target, backend: std.builtin.Compiler
}
pub fn supportsThreads(target: *const std.Target, backend: std.builtin.CompilerBackend) bool {
+ _ = target;
return switch (backend) {
.stage2_aarch64 => false,
- .stage2_powerpc => true,
- .stage2_x86_64 => target.ofmt == .macho or target.ofmt == .elf,
else => true,
};
}
test/behavior/threadlocal.zig
@@ -7,7 +7,6 @@ test "thread local variable" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.os.tag == .macos) {
@@ -27,7 +26,6 @@ test "pointer to thread local array" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest; // TODO
const s = "Hello world";
@memcpy(buffer[0..s.len], s);
@@ -41,9 +39,8 @@ test "reference a global threadlocal variable" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest; // TODO
- _ = nrfx_uart_rx(&g_uart0);
+ try nrfx_uart_rx(&g_uart0);
}
const nrfx_uart_t = extern struct {
@@ -51,11 +48,12 @@ const nrfx_uart_t = extern struct {
drv_inst_idx: u8,
};
-pub fn nrfx_uart_rx(p_instance: [*c]const nrfx_uart_t) void {
- _ = p_instance;
+pub fn nrfx_uart_rx(p_instance: [*c]const nrfx_uart_t) !void {
+ try expect(p_instance.*.p_reg == 0);
+ try expect(p_instance.*.drv_inst_idx == 0xab);
}
threadlocal var g_uart0 = nrfx_uart_t{
.p_reg = 0,
- .drv_inst_idx = 0,
+ .drv_inst_idx = 0xab,
};
test/tests.zig
@@ -2291,24 +2291,12 @@ pub fn addModuleTests(b: *std.Build, options: ModuleTestOptions) *Step {
if (options.skip_single_threaded and test_target.single_threaded == true)
continue;
- // TODO get compiler-rt tests passing for self-hosted backends.
- if (((target.cpu.arch != .x86_64 and target.cpu.arch != .aarch64) or target.ofmt == .coff) and
- test_target.use_llvm == false and mem.eql(u8, options.name, "compiler-rt"))
- continue;
-
- // TODO get zigc tests passing for other self-hosted backends.
- if (target.cpu.arch != .x86_64 and
- test_target.use_llvm == false and mem.eql(u8, options.name, "zigc"))
- continue;
-
- // TODO get std lib tests passing for other self-hosted backends.
- if ((target.cpu.arch != .x86_64 or target.os.tag != .linux) and
- test_target.use_llvm == false and mem.eql(u8, options.name, "std"))
- continue;
-
- if (target.cpu.arch != .x86_64 and
- test_target.use_llvm == false and mem.eql(u8, options.name, "c-import"))
- continue;
+ if (!would_use_llvm and target.cpu.arch == .aarch64) {
+ // TODO get std tests passing for the aarch64 self-hosted backend.
+ if (mem.eql(u8, options.name, "std")) continue;
+ // TODO get zigc tests passing for the aarch64 self-hosted backend.
+ if (mem.eql(u8, options.name, "zigc")) continue;
+ }
const want_this_mode = for (options.optimize_modes) |m| {
if (m == test_target.optimize_mode) break true;
@@ -2362,7 +2350,7 @@ fn addOneModuleTest(
const single_threaded_suffix = if (test_target.single_threaded == true) "-single" else "";
const backend_suffix = if (test_target.use_llvm == true)
"-llvm"
- else if (target.ofmt == std.Target.ObjectFormat.c)
+ else if (target.ofmt == .c)
"-cbe"
else if (test_target.use_llvm == false)
"-selfhosted"
@@ -2389,7 +2377,7 @@ fn addOneModuleTest(
use_pic,
});
- if (target.ofmt == std.Target.ObjectFormat.c) {
+ if (target.ofmt == .c) {
var altered_query = test_target.target;
altered_query.ofmt = null;