Commit b4c571301b
Changed files (6)
src-self-hosted
src-self-hosted/codegen.zig
@@ -21,7 +21,7 @@ pub const Result = union(enum) {
};
pub fn generateSymbol(
- bin_file: *link.ElfFile,
+ bin_file: *link.File.Elf,
src: usize,
typed_value: TypedValue,
code: *std.ArrayList(u8),
@@ -211,7 +211,7 @@ pub fn generateSymbol(
}
const Function = struct {
- bin_file: *link.ElfFile,
+ bin_file: *link.File.Elf,
target: *const std.Target,
mod_fn: *const Module.Fn,
code: *std.ArrayList(u8),
src-self-hosted/link.zig
@@ -21,6 +21,7 @@ pub const Options = struct {
/// Used for calculating how much space to reserve for executable program code in case
/// the binary file deos not already have such a section.
program_code_size_hint: u64 = 256 * 1024,
+ c_standard: ?Module.CStandard = null,
};
/// Attempts incremental linking, if the file already exists.
@@ -32,13 +33,19 @@ pub fn openBinFilePath(
dir: fs.Dir,
sub_path: []const u8,
options: Options,
-) !ElfFile {
+) !*File {
const file = try dir.createFile(sub_path, .{ .truncate = false, .read = true, .mode = determineMode(options) });
errdefer file.close();
- var bin_file = try openBinFile(allocator, file, options);
- bin_file.owns_file_handle = true;
- return bin_file;
+ if (options.c_standard) |cstd| {
+ return error.Unimplemented;
+ } else {
+ var bin_file = try allocator.create(File.Elf);
+ errdefer allocator.destroy(bin_file);
+ bin_file.* = try openBinFile(allocator, file, options);
+ bin_file.owns_file_handle = true;
+ return &bin_file.base;
+ }
}
/// Atomically overwrites the old file, if present.
@@ -80,7 +87,7 @@ pub fn writeFilePath(
/// Returns an error if `file` is not already open with +read +write +seek abilities.
/// A malicious file is detected as incremental link failure and does not cause Illegal Behavior.
/// This operation is not atomic.
-pub fn openBinFile(allocator: *Allocator, file: fs.File, options: Options) !ElfFile {
+pub fn openBinFile(allocator: *Allocator, file: fs.File, options: Options) !File.Elf {
return openBinFileInner(allocator, file, options) catch |err| switch (err) {
error.IncrFailed => {
return createElfFile(allocator, file, options);
@@ -89,447 +96,496 @@ pub fn openBinFile(allocator: *Allocator, file: fs.File, options: Options) !ElfF
};
}
-pub const ElfFile = struct {
- allocator: *Allocator,
- file: ?fs.File,
- owns_file_handle: bool,
- options: Options,
- ptr_width: enum { p32, p64 },
-
- /// Stored in native-endian format, depending on target endianness needs to be bswapped on read/write.
- /// Same order as in the file.
- sections: std.ArrayListUnmanaged(elf.Elf64_Shdr) = std.ArrayListUnmanaged(elf.Elf64_Shdr){},
- shdr_table_offset: ?u64 = null,
-
- /// Stored in native-endian format, depending on target endianness needs to be bswapped on read/write.
- /// Same order as in the file.
- program_headers: std.ArrayListUnmanaged(elf.Elf64_Phdr) = std.ArrayListUnmanaged(elf.Elf64_Phdr){},
- phdr_table_offset: ?u64 = null,
- /// The index into the program headers of a PT_LOAD program header with Read and Execute flags
- phdr_load_re_index: ?u16 = null,
- /// The index into the program headers of the global offset table.
- /// It needs PT_LOAD and Read flags.
- phdr_got_index: ?u16 = null,
- entry_addr: ?u64 = null,
-
- shstrtab: std.ArrayListUnmanaged(u8) = std.ArrayListUnmanaged(u8){},
- shstrtab_index: ?u16 = null,
-
- text_section_index: ?u16 = null,
- symtab_section_index: ?u16 = null,
- got_section_index: ?u16 = null,
-
- /// The same order as in the file. ELF requires global symbols to all be after the
- /// local symbols, they cannot be mixed. So we must buffer all the global symbols and
- /// write them at the end. These are only the local symbols. The length of this array
- /// is the value used for sh_info in the .symtab section.
- local_symbols: std.ArrayListUnmanaged(elf.Elf64_Sym) = std.ArrayListUnmanaged(elf.Elf64_Sym){},
- global_symbols: std.ArrayListUnmanaged(elf.Elf64_Sym) = std.ArrayListUnmanaged(elf.Elf64_Sym){},
-
- local_symbol_free_list: std.ArrayListUnmanaged(u32) = std.ArrayListUnmanaged(u32){},
- global_symbol_free_list: std.ArrayListUnmanaged(u32) = std.ArrayListUnmanaged(u32){},
- offset_table_free_list: std.ArrayListUnmanaged(u32) = std.ArrayListUnmanaged(u32){},
-
- /// Same order as in the file. The value is the absolute vaddr value.
- /// If the vaddr of the executable program header changes, the entire
- /// offset table needs to be rewritten.
- offset_table: std.ArrayListUnmanaged(u64) = std.ArrayListUnmanaged(u64){},
-
- phdr_table_dirty: bool = false,
- shdr_table_dirty: bool = false,
- shstrtab_dirty: bool = false,
- offset_table_count_dirty: bool = false,
-
- error_flags: ErrorFlags = ErrorFlags{},
-
- /// A list of text blocks that have surplus capacity. This list can have false
- /// positives, as functions grow and shrink over time, only sometimes being added
- /// or removed from the freelist.
- ///
- /// A text block has surplus capacity when its overcapacity value is greater than
- /// minimum_text_block_size * alloc_num / alloc_den. That is, when it has so
- /// much extra capacity, that we could fit a small new symbol in it, itself with
- /// ideal_capacity or more.
- ///
- /// Ideal capacity is defined by size * alloc_num / alloc_den.
- ///
- /// Overcapacity is measured by actual_capacity - ideal_capacity. Note that
- /// overcapacity can be negative. A simple way to have negative overcapacity is to
- /// allocate a fresh text block, which will have ideal capacity, and then grow it
- /// by 1 byte. It will then have -1 overcapacity.
- text_block_free_list: std.ArrayListUnmanaged(*TextBlock) = std.ArrayListUnmanaged(*TextBlock){},
- last_text_block: ?*TextBlock = null,
-
- /// `alloc_num / alloc_den` is the factor of padding when allocating.
- const alloc_num = 4;
- const alloc_den = 3;
-
- /// In order for a slice of bytes to be considered eligible to keep metadata pointing at
- /// it as a possible place to put new symbols, it must have enough room for this many bytes
- /// (plus extra for reserved capacity).
- const minimum_text_block_size = 64;
- const min_text_capacity = minimum_text_block_size * alloc_num / alloc_den;
+pub const File = struct {
+ tag: Tag,
+ pub fn cast(base: *File, comptime T: type) ?*T {
+ if (base.tag != T.base_tag)
+ return null;
- pub const ErrorFlags = struct {
- no_entry_point_found: bool = false,
- };
+ return @fieldParentPtr(T, "base", base);
+ }
- pub const TextBlock = struct {
- /// Each decl always gets a local symbol with the fully qualified name.
- /// The vaddr and size are found here directly.
- /// The file offset is found by computing the vaddr offset from the section vaddr
- /// the symbol references, and adding that to the file offset of the section.
- /// If this field is 0, it means the codegen size = 0 and there is no symbol or
- /// offset table entry.
- local_sym_index: u32,
- /// This field is undefined for symbols with size = 0.
- offset_table_index: u32,
- /// Points to the previous and next neighbors, based on the `text_offset`.
- /// This can be used to find, for example, the capacity of this `TextBlock`.
- prev: ?*TextBlock,
- next: ?*TextBlock,
-
- pub const empty = TextBlock{
- .local_sym_index = 0,
- .offset_table_index = undefined,
- .prev = null,
- .next = null,
+ pub fn makeWritable(base: *File, dir: fs.Dir, sub_path: []const u8) !void {
+ try switch (base.tag) {
+ .Elf => @fieldParentPtr(Elf, "base", base).makeWritable(dir, sub_path),
+ else => unreachable,
};
-
- /// Returns how much room there is to grow in virtual address space.
- /// File offset relocation happens transparently, so it is not included in
- /// this calculation.
- fn capacity(self: TextBlock, elf_file: ElfFile) u64 {
- const self_sym = elf_file.local_symbols.items[self.local_sym_index];
- if (self.next) |next| {
- const next_sym = elf_file.local_symbols.items[next.local_sym_index];
- return next_sym.st_value - self_sym.st_value;
- } else {
- // We are the last block. The capacity is limited only by virtual address space.
- return std.math.maxInt(u32) - self_sym.st_value;
- }
- }
-
- fn freeListEligible(self: TextBlock, elf_file: ElfFile) bool {
- // No need to keep a free list node for the last block.
- const next = self.next orelse return false;
- const self_sym = elf_file.local_symbols.items[self.local_sym_index];
- const next_sym = elf_file.local_symbols.items[next.local_sym_index];
- const cap = next_sym.st_value - self_sym.st_value;
- const ideal_cap = self_sym.st_size * alloc_num / alloc_den;
- if (cap <= ideal_cap) return false;
- const surplus = cap - ideal_cap;
- return surplus >= min_text_capacity;
- }
- };
-
- pub const Export = struct {
- sym_index: ?u32 = null,
- };
-
- pub fn deinit(self: *ElfFile) void {
- self.sections.deinit(self.allocator);
- self.program_headers.deinit(self.allocator);
- self.shstrtab.deinit(self.allocator);
- self.local_symbols.deinit(self.allocator);
- self.global_symbols.deinit(self.allocator);
- self.global_symbol_free_list.deinit(self.allocator);
- self.local_symbol_free_list.deinit(self.allocator);
- self.offset_table_free_list.deinit(self.allocator);
- self.text_block_free_list.deinit(self.allocator);
- self.offset_table.deinit(self.allocator);
- if (self.owns_file_handle) {
- if (self.file) |f| f.close();
- }
}
- pub fn makeExecutable(self: *ElfFile) !void {
- assert(self.owns_file_handle);
- if (self.file) |f| {
- f.close();
- self.file = null;
- }
+ pub fn makeExecutable(base: *File) !void {
+ try switch (base.tag) {
+ .Elf => @fieldParentPtr(Elf, "base", base).makeExecutable(),
+ else => unreachable,
+ };
}
- pub fn makeWritable(self: *ElfFile, dir: fs.Dir, sub_path: []const u8) !void {
- assert(self.owns_file_handle);
- if (self.file != null) return;
- self.file = try dir.createFile(sub_path, .{
- .truncate = false,
- .read = true,
- .mode = determineMode(self.options),
- });
+ pub fn updateDecl(base: *File, module: *Module, decl: *Module.Decl) !void {
+ try switch (base.tag) {
+ .Elf => @fieldParentPtr(Elf, "base", base).updateDecl(module, decl),
+ else => unreachable,
+ };
}
- /// Returns end pos of collision, if any.
- fn detectAllocCollision(self: *ElfFile, start: u64, size: u64) ?u64 {
- const small_ptr = self.options.target.cpu.arch.ptrBitWidth() == 32;
- const ehdr_size: u64 = if (small_ptr) @sizeOf(elf.Elf32_Ehdr) else @sizeOf(elf.Elf64_Ehdr);
- if (start < ehdr_size)
- return ehdr_size;
-
- const end = start + satMul(size, alloc_num) / alloc_den;
-
- if (self.shdr_table_offset) |off| {
- const shdr_size: u64 = if (small_ptr) @sizeOf(elf.Elf32_Shdr) else @sizeOf(elf.Elf64_Shdr);
- const tight_size = self.sections.items.len * shdr_size;
- const increased_size = satMul(tight_size, alloc_num) / alloc_den;
- const test_end = off + increased_size;
- if (end > off and start < test_end) {
- return test_end;
- }
- }
-
- if (self.phdr_table_offset) |off| {
- const phdr_size: u64 = if (small_ptr) @sizeOf(elf.Elf32_Phdr) else @sizeOf(elf.Elf64_Phdr);
- const tight_size = self.sections.items.len * phdr_size;
- const increased_size = satMul(tight_size, alloc_num) / alloc_den;
- const test_end = off + increased_size;
- if (end > off and start < test_end) {
- return test_end;
- }
- }
+ pub fn allocateDeclIndexes(base: *File, decl: *Module.Decl) !void {
+ try switch (base.tag) {
+ .Elf => @fieldParentPtr(Elf, "base", base).allocateDeclIndexes(decl),
+ else => unreachable,
+ };
+ }
- for (self.sections.items) |section| {
- const increased_size = satMul(section.sh_size, alloc_num) / alloc_den;
- const test_end = section.sh_offset + increased_size;
- if (end > section.sh_offset and start < test_end) {
- return test_end;
- }
- }
- for (self.program_headers.items) |program_header| {
- const increased_size = satMul(program_header.p_filesz, alloc_num) / alloc_den;
- const test_end = program_header.p_offset + increased_size;
- if (end > program_header.p_offset and start < test_end) {
- return test_end;
- }
+ pub fn deinit(base: *File) void {
+ switch (base.tag) {
+ .Elf => @fieldParentPtr(Elf, "base", base).deinit(),
+ else => unreachable,
}
- return null;
}
- fn allocatedSize(self: *ElfFile, start: u64) u64 {
- var min_pos: u64 = std.math.maxInt(u64);
- if (self.shdr_table_offset) |off| {
- if (off > start and off < min_pos) min_pos = off;
- }
- if (self.phdr_table_offset) |off| {
- if (off > start and off < min_pos) min_pos = off;
- }
- for (self.sections.items) |section| {
- if (section.sh_offset <= start) continue;
- if (section.sh_offset < min_pos) min_pos = section.sh_offset;
- }
- for (self.program_headers.items) |program_header| {
- if (program_header.p_offset <= start) continue;
- if (program_header.p_offset < min_pos) min_pos = program_header.p_offset;
- }
- return min_pos - start;
+ pub fn flush(base: *File) !void {
+ try switch (base.tag) {
+ .Elf => @fieldParentPtr(Elf, "base", base).flush(),
+ else => unreachable,
+ };
}
- fn findFreeSpace(self: *ElfFile, object_size: u64, min_alignment: u16) u64 {
- var start: u64 = 0;
- while (self.detectAllocCollision(start, object_size)) |item_end| {
- start = mem.alignForwardGeneric(u64, item_end, min_alignment);
+ pub fn freeDecl(base: *File, decl: *Module.Decl) void {
+ switch (base.tag) {
+ .Elf => @fieldParentPtr(Elf, "base", base).freeDecl(decl),
+ else => unreachable,
}
- return start;
}
- fn makeString(self: *ElfFile, bytes: []const u8) !u32 {
- try self.shstrtab.ensureCapacity(self.allocator, self.shstrtab.items.len + bytes.len + 1);
- const result = self.shstrtab.items.len;
- self.shstrtab.appendSliceAssumeCapacity(bytes);
- self.shstrtab.appendAssumeCapacity(0);
- return @intCast(u32, result);
+ pub fn errorFlags(base: *File) ErrorFlags {
+ return switch (base.tag) {
+ .Elf => @fieldParentPtr(Elf, "base", base).error_flags,
+ else => unreachable,
+ };
}
- fn getString(self: *ElfFile, str_off: u32) []const u8 {
- assert(str_off < self.shstrtab.items.len);
- return mem.spanZ(@ptrCast([*:0]const u8, self.shstrtab.items.ptr + str_off));
+ pub fn options(base: *File) Options {
+ return switch (base.tag) {
+ .Elf => @fieldParentPtr(Elf, "base", base).options,
+ else => unreachable,
+ };
}
- fn updateString(self: *ElfFile, old_str_off: u32, new_name: []const u8) !u32 {
- const existing_name = self.getString(old_str_off);
- if (mem.eql(u8, existing_name, new_name)) {
- return old_str_off;
- }
- return self.makeString(new_name);
- }
+ pub const Tag = enum {
+ Elf,
+ C,
+ };
- pub fn populateMissingMetadata(self: *ElfFile) !void {
- const small_ptr = switch (self.ptr_width) {
- .p32 => true,
- .p64 => false,
+ pub const ErrorFlags = struct {
+ no_entry_point_found: bool = false,
+ };
+ pub const Elf = struct {
+ pub const base_tag: Tag = .Elf;
+ base: File = File{ .tag = base_tag },
+
+ allocator: *Allocator,
+ file: ?fs.File,
+ owns_file_handle: bool,
+ options: Options,
+ ptr_width: enum { p32, p64 },
+
+ /// Stored in native-endian format, depending on target endianness needs to be bswapped on read/write.
+ /// Same order as in the file.
+ sections: std.ArrayListUnmanaged(elf.Elf64_Shdr) = std.ArrayListUnmanaged(elf.Elf64_Shdr){},
+ shdr_table_offset: ?u64 = null,
+
+ /// Stored in native-endian format, depending on target endianness needs to be bswapped on read/write.
+ /// Same order as in the file.
+ program_headers: std.ArrayListUnmanaged(elf.Elf64_Phdr) = std.ArrayListUnmanaged(elf.Elf64_Phdr){},
+ phdr_table_offset: ?u64 = null,
+ /// The index into the program headers of a PT_LOAD program header with Read and Execute flags
+ phdr_load_re_index: ?u16 = null,
+ /// The index into the program headers of the global offset table.
+ /// It needs PT_LOAD and Read flags.
+ phdr_got_index: ?u16 = null,
+ entry_addr: ?u64 = null,
+
+ shstrtab: std.ArrayListUnmanaged(u8) = std.ArrayListUnmanaged(u8){},
+ shstrtab_index: ?u16 = null,
+
+ text_section_index: ?u16 = null,
+ symtab_section_index: ?u16 = null,
+ got_section_index: ?u16 = null,
+
+ /// The same order as in the file. ELF requires global symbols to all be after the
+ /// local symbols, they cannot be mixed. So we must buffer all the global symbols and
+ /// write them at the end. These are only the local symbols. The length of this array
+ /// is the value used for sh_info in the .symtab section.
+ local_symbols: std.ArrayListUnmanaged(elf.Elf64_Sym) = std.ArrayListUnmanaged(elf.Elf64_Sym){},
+ global_symbols: std.ArrayListUnmanaged(elf.Elf64_Sym) = std.ArrayListUnmanaged(elf.Elf64_Sym){},
+
+ local_symbol_free_list: std.ArrayListUnmanaged(u32) = std.ArrayListUnmanaged(u32){},
+ global_symbol_free_list: std.ArrayListUnmanaged(u32) = std.ArrayListUnmanaged(u32){},
+ offset_table_free_list: std.ArrayListUnmanaged(u32) = std.ArrayListUnmanaged(u32){},
+
+ /// Same order as in the file. The value is the absolute vaddr value.
+ /// If the vaddr of the executable program header changes, the entire
+ /// offset table needs to be rewritten.
+ offset_table: std.ArrayListUnmanaged(u64) = std.ArrayListUnmanaged(u64){},
+
+ phdr_table_dirty: bool = false,
+ shdr_table_dirty: bool = false,
+ shstrtab_dirty: bool = false,
+ offset_table_count_dirty: bool = false,
+
+ error_flags: ErrorFlags = ErrorFlags{},
+
+ /// A list of text blocks that have surplus capacity. This list can have false
+ /// positives, as functions grow and shrink over time, only sometimes being added
+ /// or removed from the freelist.
+ ///
+ /// A text block has surplus capacity when its overcapacity value is greater than
+ /// minimum_text_block_size * alloc_num / alloc_den. That is, when it has so
+ /// much extra capacity, that we could fit a small new symbol in it, itself with
+ /// ideal_capacity or more.
+ ///
+ /// Ideal capacity is defined by size * alloc_num / alloc_den.
+ ///
+ /// Overcapacity is measured by actual_capacity - ideal_capacity. Note that
+ /// overcapacity can be negative. A simple way to have negative overcapacity is to
+ /// allocate a fresh text block, which will have ideal capacity, and then grow it
+ /// by 1 byte. It will then have -1 overcapacity.
+ text_block_free_list: std.ArrayListUnmanaged(*TextBlock) = std.ArrayListUnmanaged(*TextBlock){},
+ last_text_block: ?*TextBlock = null,
+
+ /// `alloc_num / alloc_den` is the factor of padding when allocating.
+ const alloc_num = 4;
+ const alloc_den = 3;
+
+ /// In order for a slice of bytes to be considered eligible to keep metadata pointing at
+ /// it as a possible place to put new symbols, it must have enough room for this many bytes
+ /// (plus extra for reserved capacity).
+ const minimum_text_block_size = 64;
+ const min_text_capacity = minimum_text_block_size * alloc_num / alloc_den;
+
+ pub const TextBlock = struct {
+ /// Each decl always gets a local symbol with the fully qualified name.
+ /// The vaddr and size are found here directly.
+ /// The file offset is found by computing the vaddr offset from the section vaddr
+ /// the symbol references, and adding that to the file offset of the section.
+ /// If this field is 0, it means the codegen size = 0 and there is no symbol or
+ /// offset table entry.
+ local_sym_index: u32,
+ /// This field is undefined for symbols with size = 0.
+ offset_table_index: u32,
+ /// Points to the previous and next neighbors, based on the `text_offset`.
+ /// This can be used to find, for example, the capacity of this `TextBlock`.
+ prev: ?*TextBlock,
+ next: ?*TextBlock,
+
+ pub const empty = TextBlock{
+ .local_sym_index = 0,
+ .offset_table_index = undefined,
+ .prev = null,
+ .next = null,
+ };
+
+ /// Returns how much room there is to grow in virtual address space.
+ /// File offset relocation happens transparently, so it is not included in
+ /// this calculation.
+ fn capacity(self: TextBlock, elf_file: File.Elf) u64 {
+ const self_sym = elf_file.local_symbols.items[self.local_sym_index];
+ if (self.next) |next| {
+ const next_sym = elf_file.local_symbols.items[next.local_sym_index];
+ return next_sym.st_value - self_sym.st_value;
+ } else {
+ // We are the last block. The capacity is limited only by virtual address space.
+ return std.math.maxInt(u32) - self_sym.st_value;
+ }
+ }
+
+ fn freeListEligible(self: TextBlock, elf_file: File.Elf) bool {
+ // No need to keep a free list node for the last block.
+ const next = self.next orelse return false;
+ const self_sym = elf_file.local_symbols.items[self.local_sym_index];
+ const next_sym = elf_file.local_symbols.items[next.local_sym_index];
+ const cap = next_sym.st_value - self_sym.st_value;
+ const ideal_cap = self_sym.st_size * alloc_num / alloc_den;
+ if (cap <= ideal_cap) return false;
+ const surplus = cap - ideal_cap;
+ return surplus >= min_text_capacity;
+ }
};
- const ptr_size: u8 = switch (self.ptr_width) {
- .p32 => 4,
- .p64 => 8,
+
+ pub const Export = struct {
+ sym_index: ?u32 = null,
};
- if (self.phdr_load_re_index == null) {
- self.phdr_load_re_index = @intCast(u16, self.program_headers.items.len);
- const file_size = self.options.program_code_size_hint;
- const p_align = 0x1000;
- const off = self.findFreeSpace(file_size, p_align);
- //std.log.debug(.link, "found PT_LOAD free space 0x{x} to 0x{x}\n", .{ off, off + file_size });
- try self.program_headers.append(self.allocator, .{
- .p_type = elf.PT_LOAD,
- .p_offset = off,
- .p_filesz = file_size,
- .p_vaddr = default_entry_addr,
- .p_paddr = default_entry_addr,
- .p_memsz = file_size,
- .p_align = p_align,
- .p_flags = elf.PF_X | elf.PF_R,
- });
- self.entry_addr = null;
- self.phdr_table_dirty = true;
+
+ pub fn deinit(self: *File.Elf) void {
+ self.sections.deinit(self.allocator);
+ self.program_headers.deinit(self.allocator);
+ self.shstrtab.deinit(self.allocator);
+ self.local_symbols.deinit(self.allocator);
+ self.global_symbols.deinit(self.allocator);
+ self.global_symbol_free_list.deinit(self.allocator);
+ self.local_symbol_free_list.deinit(self.allocator);
+ self.offset_table_free_list.deinit(self.allocator);
+ self.text_block_free_list.deinit(self.allocator);
+ self.offset_table.deinit(self.allocator);
+ if (self.owns_file_handle) {
+ if (self.file) |f| f.close();
+ }
}
- if (self.phdr_got_index == null) {
- self.phdr_got_index = @intCast(u16, self.program_headers.items.len);
- const file_size = @as(u64, ptr_size) * self.options.symbol_count_hint;
- // We really only need ptr alignment but since we are using PROGBITS, linux requires
- // page align.
- const p_align = 0x1000;
- const off = self.findFreeSpace(file_size, p_align);
- //std.log.debug(.link, "found PT_LOAD free space 0x{x} to 0x{x}\n", .{ off, off + file_size });
- // TODO instead of hard coding the vaddr, make a function to find a vaddr to put things at.
- // we'll need to re-use that function anyway, in case the GOT grows and overlaps something
- // else in virtual memory.
- const default_got_addr = 0x4000000;
- try self.program_headers.append(self.allocator, .{
- .p_type = elf.PT_LOAD,
- .p_offset = off,
- .p_filesz = file_size,
- .p_vaddr = default_got_addr,
- .p_paddr = default_got_addr,
- .p_memsz = file_size,
- .p_align = p_align,
- .p_flags = elf.PF_R,
- });
- self.phdr_table_dirty = true;
+
+ pub fn makeExecutable(self: *File.Elf) !void {
+ assert(self.owns_file_handle);
+ if (self.file) |f| {
+ f.close();
+ self.file = null;
+ }
}
- if (self.shstrtab_index == null) {
- self.shstrtab_index = @intCast(u16, self.sections.items.len);
- assert(self.shstrtab.items.len == 0);
- try self.shstrtab.append(self.allocator, 0); // need a 0 at position 0
- const off = self.findFreeSpace(self.shstrtab.items.len, 1);
- //std.log.debug(.link, "found shstrtab free space 0x{x} to 0x{x}\n", .{ off, off + self.shstrtab.items.len });
- try self.sections.append(self.allocator, .{
- .sh_name = try self.makeString(".shstrtab"),
- .sh_type = elf.SHT_STRTAB,
- .sh_flags = 0,
- .sh_addr = 0,
- .sh_offset = off,
- .sh_size = self.shstrtab.items.len,
- .sh_link = 0,
- .sh_info = 0,
- .sh_addralign = 1,
- .sh_entsize = 0,
+
+ pub fn makeWritable(self: *File.Elf, dir: fs.Dir, sub_path: []const u8) !void {
+ assert(self.owns_file_handle);
+ if (self.file != null) return;
+ self.file = try dir.createFile(sub_path, .{
+ .truncate = false,
+ .read = true,
+ .mode = determineMode(self.options),
});
- self.shstrtab_dirty = true;
- self.shdr_table_dirty = true;
}
- if (self.text_section_index == null) {
- self.text_section_index = @intCast(u16, self.sections.items.len);
- const phdr = &self.program_headers.items[self.phdr_load_re_index.?];
- try self.sections.append(self.allocator, .{
- .sh_name = try self.makeString(".text"),
- .sh_type = elf.SHT_PROGBITS,
- .sh_flags = elf.SHF_ALLOC | elf.SHF_EXECINSTR,
- .sh_addr = phdr.p_vaddr,
- .sh_offset = phdr.p_offset,
- .sh_size = phdr.p_filesz,
- .sh_link = 0,
- .sh_info = 0,
- .sh_addralign = phdr.p_align,
- .sh_entsize = 0,
- });
- self.shdr_table_dirty = true;
+ /// Returns end pos of collision, if any.
+ fn detectAllocCollision(self: *File.Elf, start: u64, size: u64) ?u64 {
+ const small_ptr = self.options.target.cpu.arch.ptrBitWidth() == 32;
+ const ehdr_size: u64 = if (small_ptr) @sizeOf(elf.Elf32_Ehdr) else @sizeOf(elf.Elf64_Ehdr);
+ if (start < ehdr_size)
+ return ehdr_size;
+
+ const end = start + satMul(size, alloc_num) / alloc_den;
+
+ if (self.shdr_table_offset) |off| {
+ const shdr_size: u64 = if (small_ptr) @sizeOf(elf.Elf32_Shdr) else @sizeOf(elf.Elf64_Shdr);
+ const tight_size = self.sections.items.len * shdr_size;
+ const increased_size = satMul(tight_size, alloc_num) / alloc_den;
+ const test_end = off + increased_size;
+ if (end > off and start < test_end) {
+ return test_end;
+ }
+ }
+
+ if (self.phdr_table_offset) |off| {
+ const phdr_size: u64 = if (small_ptr) @sizeOf(elf.Elf32_Phdr) else @sizeOf(elf.Elf64_Phdr);
+ const tight_size = self.sections.items.len * phdr_size;
+ const increased_size = satMul(tight_size, alloc_num) / alloc_den;
+ const test_end = off + increased_size;
+ if (end > off and start < test_end) {
+ return test_end;
+ }
+ }
+
+ for (self.sections.items) |section| {
+ const increased_size = satMul(section.sh_size, alloc_num) / alloc_den;
+ const test_end = section.sh_offset + increased_size;
+ if (end > section.sh_offset and start < test_end) {
+ return test_end;
+ }
+ }
+ for (self.program_headers.items) |program_header| {
+ const increased_size = satMul(program_header.p_filesz, alloc_num) / alloc_den;
+ const test_end = program_header.p_offset + increased_size;
+ if (end > program_header.p_offset and start < test_end) {
+ return test_end;
+ }
+ }
+ return null;
}
- if (self.got_section_index == null) {
- self.got_section_index = @intCast(u16, self.sections.items.len);
- const phdr = &self.program_headers.items[self.phdr_got_index.?];
- try self.sections.append(self.allocator, .{
- .sh_name = try self.makeString(".got"),
- .sh_type = elf.SHT_PROGBITS,
- .sh_flags = elf.SHF_ALLOC,
- .sh_addr = phdr.p_vaddr,
- .sh_offset = phdr.p_offset,
- .sh_size = phdr.p_filesz,
- .sh_link = 0,
- .sh_info = 0,
- .sh_addralign = phdr.p_align,
- .sh_entsize = 0,
- });
- self.shdr_table_dirty = true;
+ fn allocatedSize(self: *File.Elf, start: u64) u64 {
+ var min_pos: u64 = std.math.maxInt(u64);
+ if (self.shdr_table_offset) |off| {
+ if (off > start and off < min_pos) min_pos = off;
+ }
+ if (self.phdr_table_offset) |off| {
+ if (off > start and off < min_pos) min_pos = off;
+ }
+ for (self.sections.items) |section| {
+ if (section.sh_offset <= start) continue;
+ if (section.sh_offset < min_pos) min_pos = section.sh_offset;
+ }
+ for (self.program_headers.items) |program_header| {
+ if (program_header.p_offset <= start) continue;
+ if (program_header.p_offset < min_pos) min_pos = program_header.p_offset;
+ }
+ return min_pos - start;
}
- if (self.symtab_section_index == null) {
- self.symtab_section_index = @intCast(u16, self.sections.items.len);
- const min_align: u16 = if (small_ptr) @alignOf(elf.Elf32_Sym) else @alignOf(elf.Elf64_Sym);
- const each_size: u64 = if (small_ptr) @sizeOf(elf.Elf32_Sym) else @sizeOf(elf.Elf64_Sym);
- const file_size = self.options.symbol_count_hint * each_size;
- const off = self.findFreeSpace(file_size, min_align);
- //std.log.debug(.link, "found symtab free space 0x{x} to 0x{x}\n", .{ off, off + file_size });
-
- try self.sections.append(self.allocator, .{
- .sh_name = try self.makeString(".symtab"),
- .sh_type = elf.SHT_SYMTAB,
- .sh_flags = 0,
- .sh_addr = 0,
- .sh_offset = off,
- .sh_size = file_size,
- // The section header index of the associated string table.
- .sh_link = self.shstrtab_index.?,
- .sh_info = @intCast(u32, self.local_symbols.items.len),
- .sh_addralign = min_align,
- .sh_entsize = each_size,
- });
- self.shdr_table_dirty = true;
- try self.writeSymbol(0);
+
+ fn findFreeSpace(self: *File.Elf, object_size: u64, min_alignment: u16) u64 {
+ var start: u64 = 0;
+ while (self.detectAllocCollision(start, object_size)) |item_end| {
+ start = mem.alignForwardGeneric(u64, item_end, min_alignment);
+ }
+ return start;
}
- const shsize: u64 = switch (self.ptr_width) {
- .p32 => @sizeOf(elf.Elf32_Shdr),
- .p64 => @sizeOf(elf.Elf64_Shdr),
- };
- const shalign: u16 = switch (self.ptr_width) {
- .p32 => @alignOf(elf.Elf32_Shdr),
- .p64 => @alignOf(elf.Elf64_Shdr),
- };
- if (self.shdr_table_offset == null) {
- self.shdr_table_offset = self.findFreeSpace(self.sections.items.len * shsize, shalign);
- self.shdr_table_dirty = true;
+
+ fn makeString(self: *File.Elf, bytes: []const u8) !u32 {
+ try self.shstrtab.ensureCapacity(self.allocator, self.shstrtab.items.len + bytes.len + 1);
+ const result = self.shstrtab.items.len;
+ self.shstrtab.appendSliceAssumeCapacity(bytes);
+ self.shstrtab.appendAssumeCapacity(0);
+ return @intCast(u32, result);
}
- const phsize: u64 = switch (self.ptr_width) {
- .p32 => @sizeOf(elf.Elf32_Phdr),
- .p64 => @sizeOf(elf.Elf64_Phdr),
- };
- const phalign: u16 = switch (self.ptr_width) {
- .p32 => @alignOf(elf.Elf32_Phdr),
- .p64 => @alignOf(elf.Elf64_Phdr),
- };
- if (self.phdr_table_offset == null) {
- self.phdr_table_offset = self.findFreeSpace(self.program_headers.items.len * phsize, phalign);
- self.phdr_table_dirty = true;
+
+ fn getString(self: *File.Elf, str_off: u32) []const u8 {
+ assert(str_off < self.shstrtab.items.len);
+ return mem.spanZ(@ptrCast([*:0]const u8, self.shstrtab.items.ptr + str_off));
}
- {
- // Iterate over symbols, populating free_list and last_text_block.
- if (self.local_symbols.items.len != 1) {
- @panic("TODO implement setting up free_list and last_text_block from existing ELF file");
+
+ fn updateString(self: *File.Elf, old_str_off: u32, new_name: []const u8) !u32 {
+ const existing_name = self.getString(old_str_off);
+ if (mem.eql(u8, existing_name, new_name)) {
+ return old_str_off;
}
- // We are starting with an empty file. The default values are correct, null and empty list.
+ return self.makeString(new_name);
}
- }
-
- /// Commit pending changes and write headers.
- pub fn flush(self: *ElfFile) !void {
- const foreign_endian = self.options.target.cpu.arch.endian() != std.Target.current.cpu.arch.endian();
- // Unfortunately these have to be buffered and done at the end because ELF does not allow
- // mixing local and global symbols within a symbol table.
- try self.writeAllGlobalSymbols();
-
- if (self.phdr_table_dirty) {
+ pub fn populateMissingMetadata(self: *File.Elf) !void {
+ const small_ptr = switch (self.ptr_width) {
+ .p32 => true,
+ .p64 => false,
+ };
+ const ptr_size: u8 = switch (self.ptr_width) {
+ .p32 => 4,
+ .p64 => 8,
+ };
+ if (self.phdr_load_re_index == null) {
+ self.phdr_load_re_index = @intCast(u16, self.program_headers.items.len);
+ const file_size = self.options.program_code_size_hint;
+ const p_align = 0x1000;
+ const off = self.findFreeSpace(file_size, p_align);
+ //std.log.debug(.link, "found PT_LOAD free space 0x{x} to 0x{x}\n", .{ off, off + file_size });
+ try self.program_headers.append(self.allocator, .{
+ .p_type = elf.PT_LOAD,
+ .p_offset = off,
+ .p_filesz = file_size,
+ .p_vaddr = default_entry_addr,
+ .p_paddr = default_entry_addr,
+ .p_memsz = file_size,
+ .p_align = p_align,
+ .p_flags = elf.PF_X | elf.PF_R,
+ });
+ self.entry_addr = null;
+ self.phdr_table_dirty = true;
+ }
+ if (self.phdr_got_index == null) {
+ self.phdr_got_index = @intCast(u16, self.program_headers.items.len);
+ const file_size = @as(u64, ptr_size) * self.options.symbol_count_hint;
+ // We really only need ptr alignment but since we are using PROGBITS, linux requires
+ // page align.
+ const p_align = 0x1000;
+ const off = self.findFreeSpace(file_size, p_align);
+ //std.log.debug(.link, "found PT_LOAD free space 0x{x} to 0x{x}\n", .{ off, off + file_size });
+ // TODO instead of hard coding the vaddr, make a function to find a vaddr to put things at.
+ // we'll need to re-use that function anyway, in case the GOT grows and overlaps something
+ // else in virtual memory.
+ const default_got_addr = 0x4000000;
+ try self.program_headers.append(self.allocator, .{
+ .p_type = elf.PT_LOAD,
+ .p_offset = off,
+ .p_filesz = file_size,
+ .p_vaddr = default_got_addr,
+ .p_paddr = default_got_addr,
+ .p_memsz = file_size,
+ .p_align = p_align,
+ .p_flags = elf.PF_R,
+ });
+ self.phdr_table_dirty = true;
+ }
+ if (self.shstrtab_index == null) {
+ self.shstrtab_index = @intCast(u16, self.sections.items.len);
+ assert(self.shstrtab.items.len == 0);
+ try self.shstrtab.append(self.allocator, 0); // need a 0 at position 0
+ const off = self.findFreeSpace(self.shstrtab.items.len, 1);
+ //std.log.debug(.link, "found shstrtab free space 0x{x} to 0x{x}\n", .{ off, off + self.shstrtab.items.len });
+ try self.sections.append(self.allocator, .{
+ .sh_name = try self.makeString(".shstrtab"),
+ .sh_type = elf.SHT_STRTAB,
+ .sh_flags = 0,
+ .sh_addr = 0,
+ .sh_offset = off,
+ .sh_size = self.shstrtab.items.len,
+ .sh_link = 0,
+ .sh_info = 0,
+ .sh_addralign = 1,
+ .sh_entsize = 0,
+ });
+ self.shstrtab_dirty = true;
+ self.shdr_table_dirty = true;
+ }
+ if (self.text_section_index == null) {
+ self.text_section_index = @intCast(u16, self.sections.items.len);
+ const phdr = &self.program_headers.items[self.phdr_load_re_index.?];
+
+ try self.sections.append(self.allocator, .{
+ .sh_name = try self.makeString(".text"),
+ .sh_type = elf.SHT_PROGBITS,
+ .sh_flags = elf.SHF_ALLOC | elf.SHF_EXECINSTR,
+ .sh_addr = phdr.p_vaddr,
+ .sh_offset = phdr.p_offset,
+ .sh_size = phdr.p_filesz,
+ .sh_link = 0,
+ .sh_info = 0,
+ .sh_addralign = phdr.p_align,
+ .sh_entsize = 0,
+ });
+ self.shdr_table_dirty = true;
+ }
+ if (self.got_section_index == null) {
+ self.got_section_index = @intCast(u16, self.sections.items.len);
+ const phdr = &self.program_headers.items[self.phdr_got_index.?];
+
+ try self.sections.append(self.allocator, .{
+ .sh_name = try self.makeString(".got"),
+ .sh_type = elf.SHT_PROGBITS,
+ .sh_flags = elf.SHF_ALLOC,
+ .sh_addr = phdr.p_vaddr,
+ .sh_offset = phdr.p_offset,
+ .sh_size = phdr.p_filesz,
+ .sh_link = 0,
+ .sh_info = 0,
+ .sh_addralign = phdr.p_align,
+ .sh_entsize = 0,
+ });
+ self.shdr_table_dirty = true;
+ }
+ if (self.symtab_section_index == null) {
+ self.symtab_section_index = @intCast(u16, self.sections.items.len);
+ const min_align: u16 = if (small_ptr) @alignOf(elf.Elf32_Sym) else @alignOf(elf.Elf64_Sym);
+ const each_size: u64 = if (small_ptr) @sizeOf(elf.Elf32_Sym) else @sizeOf(elf.Elf64_Sym);
+ const file_size = self.options.symbol_count_hint * each_size;
+ const off = self.findFreeSpace(file_size, min_align);
+ //std.log.debug(.link, "found symtab free space 0x{x} to 0x{x}\n", .{ off, off + file_size });
+
+ try self.sections.append(self.allocator, .{
+ .sh_name = try self.makeString(".symtab"),
+ .sh_type = elf.SHT_SYMTAB,
+ .sh_flags = 0,
+ .sh_addr = 0,
+ .sh_offset = off,
+ .sh_size = file_size,
+ // The section header index of the associated string table.
+ .sh_link = self.shstrtab_index.?,
+ .sh_info = @intCast(u32, self.local_symbols.items.len),
+ .sh_addralign = min_align,
+ .sh_entsize = each_size,
+ });
+ self.shdr_table_dirty = true;
+ try self.writeSymbol(0);
+ }
+ const shsize: u64 = switch (self.ptr_width) {
+ .p32 => @sizeOf(elf.Elf32_Shdr),
+ .p64 => @sizeOf(elf.Elf64_Shdr),
+ };
+ const shalign: u16 = switch (self.ptr_width) {
+ .p32 => @alignOf(elf.Elf32_Shdr),
+ .p64 => @alignOf(elf.Elf64_Shdr),
+ };
+ if (self.shdr_table_offset == null) {
+ self.shdr_table_offset = self.findFreeSpace(self.sections.items.len * shsize, shalign);
+ self.shdr_table_dirty = true;
+ }
const phsize: u64 = switch (self.ptr_width) {
.p32 => @sizeOf(elf.Elf32_Phdr),
.p64 => @sizeOf(elf.Elf64_Phdr),
@@ -538,823 +594,854 @@ pub const ElfFile = struct {
.p32 => @alignOf(elf.Elf32_Phdr),
.p64 => @alignOf(elf.Elf64_Phdr),
};
- const allocated_size = self.allocatedSize(self.phdr_table_offset.?);
- const needed_size = self.program_headers.items.len * phsize;
-
- if (needed_size > allocated_size) {
- self.phdr_table_offset = null; // free the space
- self.phdr_table_offset = self.findFreeSpace(needed_size, phalign);
+ if (self.phdr_table_offset == null) {
+ self.phdr_table_offset = self.findFreeSpace(self.program_headers.items.len * phsize, phalign);
+ self.phdr_table_dirty = true;
}
-
- switch (self.ptr_width) {
- .p32 => {
- const buf = try self.allocator.alloc(elf.Elf32_Phdr, self.program_headers.items.len);
- defer self.allocator.free(buf);
-
- for (buf) |*phdr, i| {
- phdr.* = progHeaderTo32(self.program_headers.items[i]);
- if (foreign_endian) {
- bswapAllFields(elf.Elf32_Phdr, phdr);
- }
- }
- try self.file.?.pwriteAll(mem.sliceAsBytes(buf), self.phdr_table_offset.?);
- },
- .p64 => {
- const buf = try self.allocator.alloc(elf.Elf64_Phdr, self.program_headers.items.len);
- defer self.allocator.free(buf);
-
- for (buf) |*phdr, i| {
- phdr.* = self.program_headers.items[i];
- if (foreign_endian) {
- bswapAllFields(elf.Elf64_Phdr, phdr);
- }
- }
- try self.file.?.pwriteAll(mem.sliceAsBytes(buf), self.phdr_table_offset.?);
- },
+ {
+ // Iterate over symbols, populating free_list and last_text_block.
+ if (self.local_symbols.items.len != 1) {
+ @panic("TODO implement setting up free_list and last_text_block from existing ELF file");
+ }
+ // We are starting with an empty file. The default values are correct, null and empty list.
}
- self.phdr_table_dirty = false;
}
- {
- const shstrtab_sect = &self.sections.items[self.shstrtab_index.?];
- if (self.shstrtab_dirty or self.shstrtab.items.len != shstrtab_sect.sh_size) {
- const allocated_size = self.allocatedSize(shstrtab_sect.sh_offset);
- const needed_size = self.shstrtab.items.len;
+ /// Commit pending changes and write headers.
+ pub fn flush(self: *File.Elf) !void {
+ const foreign_endian = self.options.target.cpu.arch.endian() != std.Target.current.cpu.arch.endian();
+
+ // Unfortunately these have to be buffered and done at the end because ELF does not allow
+ // mixing local and global symbols within a symbol table.
+ try self.writeAllGlobalSymbols();
+
+ if (self.phdr_table_dirty) {
+ const phsize: u64 = switch (self.ptr_width) {
+ .p32 => @sizeOf(elf.Elf32_Phdr),
+ .p64 => @sizeOf(elf.Elf64_Phdr),
+ };
+ const phalign: u16 = switch (self.ptr_width) {
+ .p32 => @alignOf(elf.Elf32_Phdr),
+ .p64 => @alignOf(elf.Elf64_Phdr),
+ };
+ const allocated_size = self.allocatedSize(self.phdr_table_offset.?);
+ const needed_size = self.program_headers.items.len * phsize;
if (needed_size > allocated_size) {
- shstrtab_sect.sh_size = 0; // free the space
- shstrtab_sect.sh_offset = self.findFreeSpace(needed_size, 1);
+ self.phdr_table_offset = null; // free the space
+ self.phdr_table_offset = self.findFreeSpace(needed_size, phalign);
}
- shstrtab_sect.sh_size = needed_size;
- //std.log.debug(.link, "shstrtab start=0x{x} end=0x{x}\n", .{ shstrtab_sect.sh_offset, shstrtab_sect.sh_offset + needed_size });
- try self.file.?.pwriteAll(self.shstrtab.items, shstrtab_sect.sh_offset);
- if (!self.shdr_table_dirty) {
- // Then it won't get written with the others and we need to do it.
- try self.writeSectHeader(self.shstrtab_index.?);
+ switch (self.ptr_width) {
+ .p32 => {
+ const buf = try self.allocator.alloc(elf.Elf32_Phdr, self.program_headers.items.len);
+ defer self.allocator.free(buf);
+
+ for (buf) |*phdr, i| {
+ phdr.* = progHeaderTo32(self.program_headers.items[i]);
+ if (foreign_endian) {
+ bswapAllFields(elf.Elf32_Phdr, phdr);
+ }
+ }
+ try self.file.?.pwriteAll(mem.sliceAsBytes(buf), self.phdr_table_offset.?);
+ },
+ .p64 => {
+ const buf = try self.allocator.alloc(elf.Elf64_Phdr, self.program_headers.items.len);
+ defer self.allocator.free(buf);
+
+ for (buf) |*phdr, i| {
+ phdr.* = self.program_headers.items[i];
+ if (foreign_endian) {
+ bswapAllFields(elf.Elf64_Phdr, phdr);
+ }
+ }
+ try self.file.?.pwriteAll(mem.sliceAsBytes(buf), self.phdr_table_offset.?);
+ },
}
- self.shstrtab_dirty = false;
+ self.phdr_table_dirty = false;
}
- }
- if (self.shdr_table_dirty) {
- const shsize: u64 = switch (self.ptr_width) {
- .p32 => @sizeOf(elf.Elf32_Shdr),
- .p64 => @sizeOf(elf.Elf64_Shdr),
- };
- const shalign: u16 = switch (self.ptr_width) {
- .p32 => @alignOf(elf.Elf32_Shdr),
- .p64 => @alignOf(elf.Elf64_Shdr),
- };
- const allocated_size = self.allocatedSize(self.shdr_table_offset.?);
- const needed_size = self.sections.items.len * shsize;
- if (needed_size > allocated_size) {
- self.shdr_table_offset = null; // free the space
- self.shdr_table_offset = self.findFreeSpace(needed_size, shalign);
- }
+ {
+ const shstrtab_sect = &self.sections.items[self.shstrtab_index.?];
+ if (self.shstrtab_dirty or self.shstrtab.items.len != shstrtab_sect.sh_size) {
+ const allocated_size = self.allocatedSize(shstrtab_sect.sh_offset);
+ const needed_size = self.shstrtab.items.len;
- switch (self.ptr_width) {
- .p32 => {
- const buf = try self.allocator.alloc(elf.Elf32_Shdr, self.sections.items.len);
- defer self.allocator.free(buf);
+ if (needed_size > allocated_size) {
+ shstrtab_sect.sh_size = 0; // free the space
+ shstrtab_sect.sh_offset = self.findFreeSpace(needed_size, 1);
+ }
+ shstrtab_sect.sh_size = needed_size;
+ //std.log.debug(.link, "shstrtab start=0x{x} end=0x{x}\n", .{ shstrtab_sect.sh_offset, shstrtab_sect.sh_offset + needed_size });
- for (buf) |*shdr, i| {
- shdr.* = sectHeaderTo32(self.sections.items[i]);
- if (foreign_endian) {
- bswapAllFields(elf.Elf32_Shdr, shdr);
- }
+ try self.file.?.pwriteAll(self.shstrtab.items, shstrtab_sect.sh_offset);
+ if (!self.shdr_table_dirty) {
+ // Then it won't get written with the others and we need to do it.
+ try self.writeSectHeader(self.shstrtab_index.?);
}
- try self.file.?.pwriteAll(mem.sliceAsBytes(buf), self.shdr_table_offset.?);
- },
- .p64 => {
- const buf = try self.allocator.alloc(elf.Elf64_Shdr, self.sections.items.len);
- defer self.allocator.free(buf);
+ self.shstrtab_dirty = false;
+ }
+ }
+ if (self.shdr_table_dirty) {
+ const shsize: u64 = switch (self.ptr_width) {
+ .p32 => @sizeOf(elf.Elf32_Shdr),
+ .p64 => @sizeOf(elf.Elf64_Shdr),
+ };
+ const shalign: u16 = switch (self.ptr_width) {
+ .p32 => @alignOf(elf.Elf32_Shdr),
+ .p64 => @alignOf(elf.Elf64_Shdr),
+ };
+ const allocated_size = self.allocatedSize(self.shdr_table_offset.?);
+ const needed_size = self.sections.items.len * shsize;
- for (buf) |*shdr, i| {
- shdr.* = self.sections.items[i];
- //std.log.debug(.link, "writing section {}\n", .{shdr.*});
- if (foreign_endian) {
- bswapAllFields(elf.Elf64_Shdr, shdr);
+ if (needed_size > allocated_size) {
+ self.shdr_table_offset = null; // free the space
+ self.shdr_table_offset = self.findFreeSpace(needed_size, shalign);
+ }
+
+ switch (self.ptr_width) {
+ .p32 => {
+ const buf = try self.allocator.alloc(elf.Elf32_Shdr, self.sections.items.len);
+ defer self.allocator.free(buf);
+
+ for (buf) |*shdr, i| {
+ shdr.* = sectHeaderTo32(self.sections.items[i]);
+ if (foreign_endian) {
+ bswapAllFields(elf.Elf32_Shdr, shdr);
+ }
}
- }
- try self.file.?.pwriteAll(mem.sliceAsBytes(buf), self.shdr_table_offset.?);
- },
+ try self.file.?.pwriteAll(mem.sliceAsBytes(buf), self.shdr_table_offset.?);
+ },
+ .p64 => {
+ const buf = try self.allocator.alloc(elf.Elf64_Shdr, self.sections.items.len);
+ defer self.allocator.free(buf);
+
+ for (buf) |*shdr, i| {
+ shdr.* = self.sections.items[i];
+ //std.log.debug(.link, "writing section {}\n", .{shdr.*});
+ if (foreign_endian) {
+ bswapAllFields(elf.Elf64_Shdr, shdr);
+ }
+ }
+ try self.file.?.pwriteAll(mem.sliceAsBytes(buf), self.shdr_table_offset.?);
+ },
+ }
+ self.shdr_table_dirty = false;
}
- self.shdr_table_dirty = false;
- }
- if (self.entry_addr == null and self.options.output_mode == .Exe) {
- self.error_flags.no_entry_point_found = true;
- } else {
- self.error_flags.no_entry_point_found = false;
- try self.writeElfHeader();
+ if (self.entry_addr == null and self.options.output_mode == .Exe) {
+ self.error_flags.no_entry_point_found = true;
+ } else {
+ self.error_flags.no_entry_point_found = false;
+ try self.writeElfHeader();
+ }
+
+ // The point of flush() is to commit changes, so nothing should be dirty after this.
+ assert(!self.phdr_table_dirty);
+ assert(!self.shdr_table_dirty);
+ assert(!self.shstrtab_dirty);
+ assert(!self.offset_table_count_dirty);
+ const syms_sect = &self.sections.items[self.symtab_section_index.?];
+ assert(syms_sect.sh_info == self.local_symbols.items.len);
}
- // The point of flush() is to commit changes, so nothing should be dirty after this.
- assert(!self.phdr_table_dirty);
- assert(!self.shdr_table_dirty);
- assert(!self.shstrtab_dirty);
- assert(!self.offset_table_count_dirty);
- const syms_sect = &self.sections.items[self.symtab_section_index.?];
- assert(syms_sect.sh_info == self.local_symbols.items.len);
- }
+ fn writeElfHeader(self: *File.Elf) !void {
+ var hdr_buf: [@sizeOf(elf.Elf64_Ehdr)]u8 = undefined;
- fn writeElfHeader(self: *ElfFile) !void {
- var hdr_buf: [@sizeOf(elf.Elf64_Ehdr)]u8 = undefined;
+ var index: usize = 0;
+ hdr_buf[0..4].* = "\x7fELF".*;
+ index += 4;
- var index: usize = 0;
- hdr_buf[0..4].* = "\x7fELF".*;
- index += 4;
+ hdr_buf[index] = switch (self.ptr_width) {
+ .p32 => elf.ELFCLASS32,
+ .p64 => elf.ELFCLASS64,
+ };
+ index += 1;
- hdr_buf[index] = switch (self.ptr_width) {
- .p32 => elf.ELFCLASS32,
- .p64 => elf.ELFCLASS64,
- };
- index += 1;
+ const endian = self.options.target.cpu.arch.endian();
+ hdr_buf[index] = switch (endian) {
+ .Little => elf.ELFDATA2LSB,
+ .Big => elf.ELFDATA2MSB,
+ };
+ index += 1;
- const endian = self.options.target.cpu.arch.endian();
- hdr_buf[index] = switch (endian) {
- .Little => elf.ELFDATA2LSB,
- .Big => elf.ELFDATA2MSB,
- };
- index += 1;
-
- hdr_buf[index] = 1; // ELF version
- index += 1;
-
- // OS ABI, often set to 0 regardless of target platform
- // ABI Version, possibly used by glibc but not by static executables
- // padding
- mem.set(u8, hdr_buf[index..][0..9], 0);
- index += 9;
-
- assert(index == 16);
-
- const elf_type = switch (self.options.output_mode) {
- .Exe => elf.ET.EXEC,
- .Obj => elf.ET.REL,
- .Lib => switch (self.options.link_mode) {
- .Static => elf.ET.REL,
- .Dynamic => elf.ET.DYN,
- },
- };
- mem.writeInt(u16, hdr_buf[index..][0..2], @enumToInt(elf_type), endian);
- index += 2;
-
- const machine = self.options.target.cpu.arch.toElfMachine();
- mem.writeInt(u16, hdr_buf[index..][0..2], @enumToInt(machine), endian);
- index += 2;
-
- // ELF Version, again
- mem.writeInt(u32, hdr_buf[index..][0..4], 1, endian);
- index += 4;
-
- const e_entry = if (elf_type == .REL) 0 else self.entry_addr.?;
-
- switch (self.ptr_width) {
- .p32 => {
- mem.writeInt(u32, hdr_buf[index..][0..4], @intCast(u32, e_entry), endian);
- index += 4;
-
- // e_phoff
- mem.writeInt(u32, hdr_buf[index..][0..4], @intCast(u32, self.phdr_table_offset.?), endian);
- index += 4;
-
- // e_shoff
- mem.writeInt(u32, hdr_buf[index..][0..4], @intCast(u32, self.shdr_table_offset.?), endian);
- index += 4;
- },
- .p64 => {
- // e_entry
- mem.writeInt(u64, hdr_buf[index..][0..8], e_entry, endian);
- index += 8;
-
- // e_phoff
- mem.writeInt(u64, hdr_buf[index..][0..8], self.phdr_table_offset.?, endian);
- index += 8;
-
- // e_shoff
- mem.writeInt(u64, hdr_buf[index..][0..8], self.shdr_table_offset.?, endian);
- index += 8;
- },
- }
+ hdr_buf[index] = 1; // ELF version
+ index += 1;
- const e_flags = 0;
- mem.writeInt(u32, hdr_buf[index..][0..4], e_flags, endian);
- index += 4;
+ // OS ABI, often set to 0 regardless of target platform
+ // ABI Version, possibly used by glibc but not by static executables
+ // padding
+ mem.set(u8, hdr_buf[index..][0..9], 0);
+ index += 9;
- const e_ehsize: u16 = switch (self.ptr_width) {
- .p32 => @sizeOf(elf.Elf32_Ehdr),
- .p64 => @sizeOf(elf.Elf64_Ehdr),
- };
- mem.writeInt(u16, hdr_buf[index..][0..2], e_ehsize, endian);
- index += 2;
+ assert(index == 16);
- const e_phentsize: u16 = switch (self.ptr_width) {
- .p32 => @sizeOf(elf.Elf32_Phdr),
- .p64 => @sizeOf(elf.Elf64_Phdr),
- };
- mem.writeInt(u16, hdr_buf[index..][0..2], e_phentsize, endian);
- index += 2;
+ const elf_type = switch (self.options.output_mode) {
+ .Exe => elf.ET.EXEC,
+ .Obj => elf.ET.REL,
+ .Lib => switch (self.options.link_mode) {
+ .Static => elf.ET.REL,
+ .Dynamic => elf.ET.DYN,
+ },
+ };
+ mem.writeInt(u16, hdr_buf[index..][0..2], @enumToInt(elf_type), endian);
+ index += 2;
- const e_phnum = @intCast(u16, self.program_headers.items.len);
- mem.writeInt(u16, hdr_buf[index..][0..2], e_phnum, endian);
- index += 2;
+ const machine = self.options.target.cpu.arch.toElfMachine();
+ mem.writeInt(u16, hdr_buf[index..][0..2], @enumToInt(machine), endian);
+ index += 2;
- const e_shentsize: u16 = switch (self.ptr_width) {
- .p32 => @sizeOf(elf.Elf32_Shdr),
- .p64 => @sizeOf(elf.Elf64_Shdr),
- };
- mem.writeInt(u16, hdr_buf[index..][0..2], e_shentsize, endian);
- index += 2;
+ // ELF Version, again
+ mem.writeInt(u32, hdr_buf[index..][0..4], 1, endian);
+ index += 4;
- const e_shnum = @intCast(u16, self.sections.items.len);
- mem.writeInt(u16, hdr_buf[index..][0..2], e_shnum, endian);
- index += 2;
+ const e_entry = if (elf_type == .REL) 0 else self.entry_addr.?;
- mem.writeInt(u16, hdr_buf[index..][0..2], self.shstrtab_index.?, endian);
- index += 2;
+ switch (self.ptr_width) {
+ .p32 => {
+ mem.writeInt(u32, hdr_buf[index..][0..4], @intCast(u32, e_entry), endian);
+ index += 4;
- assert(index == e_ehsize);
+ // e_phoff
+ mem.writeInt(u32, hdr_buf[index..][0..4], @intCast(u32, self.phdr_table_offset.?), endian);
+ index += 4;
- try self.file.?.pwriteAll(hdr_buf[0..index], 0);
- }
+ // e_shoff
+ mem.writeInt(u32, hdr_buf[index..][0..4], @intCast(u32, self.shdr_table_offset.?), endian);
+ index += 4;
+ },
+ .p64 => {
+ // e_entry
+ mem.writeInt(u64, hdr_buf[index..][0..8], e_entry, endian);
+ index += 8;
- fn freeTextBlock(self: *ElfFile, text_block: *TextBlock) void {
- var already_have_free_list_node = false;
- {
- var i: usize = 0;
- while (i < self.text_block_free_list.items.len) {
- if (self.text_block_free_list.items[i] == text_block) {
- _ = self.text_block_free_list.swapRemove(i);
- continue;
- }
- if (self.text_block_free_list.items[i] == text_block.prev) {
- already_have_free_list_node = true;
- }
- i += 1;
+ // e_phoff
+ mem.writeInt(u64, hdr_buf[index..][0..8], self.phdr_table_offset.?, endian);
+ index += 8;
+
+ // e_shoff
+ mem.writeInt(u64, hdr_buf[index..][0..8], self.shdr_table_offset.?, endian);
+ index += 8;
+ },
}
- }
- if (self.last_text_block == text_block) {
- // TODO shrink the .text section size here
- self.last_text_block = text_block.prev;
- }
+ const e_flags = 0;
+ mem.writeInt(u32, hdr_buf[index..][0..4], e_flags, endian);
+ index += 4;
- if (text_block.prev) |prev| {
- prev.next = text_block.next;
+ const e_ehsize: u16 = switch (self.ptr_width) {
+ .p32 => @sizeOf(elf.Elf32_Ehdr),
+ .p64 => @sizeOf(elf.Elf64_Ehdr),
+ };
+ mem.writeInt(u16, hdr_buf[index..][0..2], e_ehsize, endian);
+ index += 2;
- if (!already_have_free_list_node and prev.freeListEligible(self.*)) {
- // The free list is heuristics, it doesn't have to be perfect, so we can
- // ignore the OOM here.
- self.text_block_free_list.append(self.allocator, prev) catch {};
- }
- } else {
- text_block.prev = null;
- }
+ const e_phentsize: u16 = switch (self.ptr_width) {
+ .p32 => @sizeOf(elf.Elf32_Phdr),
+ .p64 => @sizeOf(elf.Elf64_Phdr),
+ };
+ mem.writeInt(u16, hdr_buf[index..][0..2], e_phentsize, endian);
+ index += 2;
- if (text_block.next) |next| {
- next.prev = text_block.prev;
- } else {
- text_block.next = null;
- }
- }
+ const e_phnum = @intCast(u16, self.program_headers.items.len);
+ mem.writeInt(u16, hdr_buf[index..][0..2], e_phnum, endian);
+ index += 2;
- fn shrinkTextBlock(self: *ElfFile, text_block: *TextBlock, new_block_size: u64) void {
- // TODO check the new capacity, and if it crosses the size threshold into a big enough
- // capacity, insert a free list node for it.
- }
+ const e_shentsize: u16 = switch (self.ptr_width) {
+ .p32 => @sizeOf(elf.Elf32_Shdr),
+ .p64 => @sizeOf(elf.Elf64_Shdr),
+ };
+ mem.writeInt(u16, hdr_buf[index..][0..2], e_shentsize, endian);
+ index += 2;
- fn growTextBlock(self: *ElfFile, text_block: *TextBlock, new_block_size: u64, alignment: u64) !u64 {
- const sym = self.local_symbols.items[text_block.local_sym_index];
- const align_ok = mem.alignBackwardGeneric(u64, sym.st_value, alignment) == sym.st_value;
- const need_realloc = !align_ok or new_block_size > text_block.capacity(self.*);
- if (!need_realloc) return sym.st_value;
- return self.allocateTextBlock(text_block, new_block_size, alignment);
- }
+ const e_shnum = @intCast(u16, self.sections.items.len);
+ mem.writeInt(u16, hdr_buf[index..][0..2], e_shnum, endian);
+ index += 2;
+
+ mem.writeInt(u16, hdr_buf[index..][0..2], self.shstrtab_index.?, endian);
+ index += 2;
+
+ assert(index == e_ehsize);
- fn allocateTextBlock(self: *ElfFile, text_block: *TextBlock, new_block_size: u64, alignment: u64) !u64 {
- const phdr = &self.program_headers.items[self.phdr_load_re_index.?];
- const shdr = &self.sections.items[self.text_section_index.?];
- const new_block_ideal_capacity = new_block_size * alloc_num / alloc_den;
-
- // We use these to indicate our intention to update metadata, placing the new block,
- // and possibly removing a free list node.
- // It would be simpler to do it inside the for loop below, but that would cause a
- // problem if an error was returned later in the function. So this action
- // is actually carried out at the end of the function, when errors are no longer possible.
- var block_placement: ?*TextBlock = null;
- var free_list_removal: ?usize = null;
-
- // First we look for an appropriately sized free list node.
- // The list is unordered. We'll just take the first thing that works.
- const vaddr = blk: {
- var i: usize = 0;
- while (i < self.text_block_free_list.items.len) {
- const big_block = self.text_block_free_list.items[i];
- // We now have a pointer to a live text block that has too much capacity.
- // Is it enough that we could fit this new text block?
- const sym = self.local_symbols.items[big_block.local_sym_index];
- const capacity = big_block.capacity(self.*);
- const ideal_capacity = capacity * alloc_num / alloc_den;
- const ideal_capacity_end_vaddr = sym.st_value + ideal_capacity;
- const capacity_end_vaddr = sym.st_value + capacity;
- const new_start_vaddr_unaligned = capacity_end_vaddr - new_block_ideal_capacity;
- const new_start_vaddr = mem.alignBackwardGeneric(u64, new_start_vaddr_unaligned, alignment);
- if (new_start_vaddr < ideal_capacity_end_vaddr) {
- // Additional bookkeeping here to notice if this free list node
- // should be deleted because the block that it points to has grown to take up
- // more of the extra capacity.
- if (!big_block.freeListEligible(self.*)) {
+ try self.file.?.pwriteAll(hdr_buf[0..index], 0);
+ }
+
+ fn freeTextBlock(self: *File.Elf, text_block: *TextBlock) void {
+ var already_have_free_list_node = false;
+ {
+ var i: usize = 0;
+ while (i < self.text_block_free_list.items.len) {
+ if (self.text_block_free_list.items[i] == text_block) {
_ = self.text_block_free_list.swapRemove(i);
- } else {
- i += 1;
+ continue;
}
- continue;
- }
- // At this point we know that we will place the new block here. But the
- // remaining question is whether there is still yet enough capacity left
- // over for there to still be a free list node.
- const remaining_capacity = new_start_vaddr - ideal_capacity_end_vaddr;
- const keep_free_list_node = remaining_capacity >= min_text_capacity;
-
- // Set up the metadata to be updated, after errors are no longer possible.
- block_placement = big_block;
- if (!keep_free_list_node) {
- free_list_removal = i;
+ if (self.text_block_free_list.items[i] == text_block.prev) {
+ already_have_free_list_node = true;
+ }
+ i += 1;
}
- break :blk new_start_vaddr;
- } else if (self.last_text_block) |last| {
- const sym = self.local_symbols.items[last.local_sym_index];
- const ideal_capacity = sym.st_size * alloc_num / alloc_den;
- const ideal_capacity_end_vaddr = sym.st_value + ideal_capacity;
- const new_start_vaddr = mem.alignForwardGeneric(u64, ideal_capacity_end_vaddr, alignment);
- // Set up the metadata to be updated, after errors are no longer possible.
- block_placement = last;
- break :blk new_start_vaddr;
- } else {
- break :blk phdr.p_vaddr;
}
- };
- const expand_text_section = block_placement == null or block_placement.?.next == null;
- if (expand_text_section) {
- const text_capacity = self.allocatedSize(shdr.sh_offset);
- const needed_size = (vaddr + new_block_size) - phdr.p_vaddr;
- if (needed_size > text_capacity) {
- // Must move the entire text section.
- const new_offset = self.findFreeSpace(needed_size, 0x1000);
- const text_size = if (self.last_text_block) |last| blk: {
- const sym = self.local_symbols.items[last.local_sym_index];
- break :blk (sym.st_value + sym.st_size) - phdr.p_vaddr;
- } else 0;
- const amt = try self.file.?.copyRangeAll(shdr.sh_offset, self.file.?, new_offset, text_size);
- if (amt != text_size) return error.InputOutput;
- shdr.sh_offset = new_offset;
- phdr.p_offset = new_offset;
+ if (self.last_text_block == text_block) {
+ // TODO shrink the .text section size here
+ self.last_text_block = text_block.prev;
}
- self.last_text_block = text_block;
- shdr.sh_size = needed_size;
- phdr.p_memsz = needed_size;
- phdr.p_filesz = needed_size;
+ if (text_block.prev) |prev| {
+ prev.next = text_block.next;
- self.phdr_table_dirty = true; // TODO look into making only the one program header dirty
- self.shdr_table_dirty = true; // TODO look into making only the one section dirty
- }
+ if (!already_have_free_list_node and prev.freeListEligible(self.*)) {
+ // The free list is heuristics, it doesn't have to be perfect, so we can
+ // ignore the OOM here.
+ self.text_block_free_list.append(self.allocator, prev) catch {};
+ }
+ } else {
+ text_block.prev = null;
+ }
- // This function can also reallocate a text block.
- // In this case we need to "unplug" it from its previous location before
- // plugging it in to its new location.
- if (text_block.prev) |prev| {
- prev.next = text_block.next;
- }
- if (text_block.next) |next| {
- next.prev = text_block.prev;
+ if (text_block.next) |next| {
+ next.prev = text_block.prev;
+ } else {
+ text_block.next = null;
+ }
}
- if (block_placement) |big_block| {
- text_block.prev = big_block;
- text_block.next = big_block.next;
- big_block.next = text_block;
- } else {
- text_block.prev = null;
- text_block.next = null;
- }
- if (free_list_removal) |i| {
- _ = self.text_block_free_list.swapRemove(i);
+ fn shrinkTextBlock(self: *File.Elf, text_block: *TextBlock, new_block_size: u64) void {
+ // TODO check the new capacity, and if it crosses the size threshold into a big enough
+ // capacity, insert a free list node for it.
}
- return vaddr;
- }
- pub fn allocateDeclIndexes(self: *ElfFile, decl: *Module.Decl) !void {
- if (decl.link.local_sym_index != 0) return;
-
- // Here we also ensure capacity for the free lists so that they can be appended to without fail.
- try self.local_symbols.ensureCapacity(self.allocator, self.local_symbols.items.len + 1);
- try self.local_symbol_free_list.ensureCapacity(self.allocator, self.local_symbols.items.len);
- try self.offset_table.ensureCapacity(self.allocator, self.offset_table.items.len + 1);
- try self.offset_table_free_list.ensureCapacity(self.allocator, self.local_symbols.items.len);
-
- if (self.local_symbol_free_list.popOrNull()) |i| {
- //std.log.debug(.link, "reusing symbol index {} for {}\n", .{i, decl.name});
- decl.link.local_sym_index = i;
- } else {
- //std.log.debug(.link, "allocating symbol index {} for {}\n", .{self.local_symbols.items.len, decl.name});
- decl.link.local_sym_index = @intCast(u32, self.local_symbols.items.len);
- _ = self.local_symbols.addOneAssumeCapacity();
+ fn growTextBlock(self: *File.Elf, text_block: *TextBlock, new_block_size: u64, alignment: u64) !u64 {
+ const sym = self.local_symbols.items[text_block.local_sym_index];
+ const align_ok = mem.alignBackwardGeneric(u64, sym.st_value, alignment) == sym.st_value;
+ const need_realloc = !align_ok or new_block_size > text_block.capacity(self.*);
+ if (!need_realloc) return sym.st_value;
+ return self.allocateTextBlock(text_block, new_block_size, alignment);
}
- if (self.offset_table_free_list.popOrNull()) |i| {
- decl.link.offset_table_index = i;
- } else {
- decl.link.offset_table_index = @intCast(u32, self.offset_table.items.len);
- _ = self.offset_table.addOneAssumeCapacity();
- self.offset_table_count_dirty = true;
- }
+ fn allocateTextBlock(self: *File.Elf, text_block: *TextBlock, new_block_size: u64, alignment: u64) !u64 {
+ const phdr = &self.program_headers.items[self.phdr_load_re_index.?];
+ const shdr = &self.sections.items[self.text_section_index.?];
+ const new_block_ideal_capacity = new_block_size * alloc_num / alloc_den;
+
+ // We use these to indicate our intention to update metadata, placing the new block,
+ // and possibly removing a free list node.
+ // It would be simpler to do it inside the for loop below, but that would cause a
+ // problem if an error was returned later in the function. So this action
+ // is actually carried out at the end of the function, when errors are no longer possible.
+ var block_placement: ?*TextBlock = null;
+ var free_list_removal: ?usize = null;
+
+ // First we look for an appropriately sized free list node.
+ // The list is unordered. We'll just take the first thing that works.
+ const vaddr = blk: {
+ var i: usize = 0;
+ while (i < self.text_block_free_list.items.len) {
+ const big_block = self.text_block_free_list.items[i];
+ // We now have a pointer to a live text block that has too much capacity.
+ // Is it enough that we could fit this new text block?
+ const sym = self.local_symbols.items[big_block.local_sym_index];
+ const capacity = big_block.capacity(self.*);
+ const ideal_capacity = capacity * alloc_num / alloc_den;
+ const ideal_capacity_end_vaddr = sym.st_value + ideal_capacity;
+ const capacity_end_vaddr = sym.st_value + capacity;
+ const new_start_vaddr_unaligned = capacity_end_vaddr - new_block_ideal_capacity;
+ const new_start_vaddr = mem.alignBackwardGeneric(u64, new_start_vaddr_unaligned, alignment);
+ if (new_start_vaddr < ideal_capacity_end_vaddr) {
+ // Additional bookkeeping here to notice if this free list node
+ // should be deleted because the block that it points to has grown to take up
+ // more of the extra capacity.
+ if (!big_block.freeListEligible(self.*)) {
+ _ = self.text_block_free_list.swapRemove(i);
+ } else {
+ i += 1;
+ }
+ continue;
+ }
+ // At this point we know that we will place the new block here. But the
+ // remaining question is whether there is still yet enough capacity left
+ // over for there to still be a free list node.
+ const remaining_capacity = new_start_vaddr - ideal_capacity_end_vaddr;
+ const keep_free_list_node = remaining_capacity >= min_text_capacity;
+
+ // Set up the metadata to be updated, after errors are no longer possible.
+ block_placement = big_block;
+ if (!keep_free_list_node) {
+ free_list_removal = i;
+ }
+ break :blk new_start_vaddr;
+ } else if (self.last_text_block) |last| {
+ const sym = self.local_symbols.items[last.local_sym_index];
+ const ideal_capacity = sym.st_size * alloc_num / alloc_den;
+ const ideal_capacity_end_vaddr = sym.st_value + ideal_capacity;
+ const new_start_vaddr = mem.alignForwardGeneric(u64, ideal_capacity_end_vaddr, alignment);
+ // Set up the metadata to be updated, after errors are no longer possible.
+ block_placement = last;
+ break :blk new_start_vaddr;
+ } else {
+ break :blk phdr.p_vaddr;
+ }
+ };
- const phdr = &self.program_headers.items[self.phdr_load_re_index.?];
+ const expand_text_section = block_placement == null or block_placement.?.next == null;
+ if (expand_text_section) {
+ const text_capacity = self.allocatedSize(shdr.sh_offset);
+ const needed_size = (vaddr + new_block_size) - phdr.p_vaddr;
+ if (needed_size > text_capacity) {
+ // Must move the entire text section.
+ const new_offset = self.findFreeSpace(needed_size, 0x1000);
+ const text_size = if (self.last_text_block) |last| blk: {
+ const sym = self.local_symbols.items[last.local_sym_index];
+ break :blk (sym.st_value + sym.st_size) - phdr.p_vaddr;
+ } else 0;
+ const amt = try self.file.?.copyRangeAll(shdr.sh_offset, self.file.?, new_offset, text_size);
+ if (amt != text_size) return error.InputOutput;
+ shdr.sh_offset = new_offset;
+ phdr.p_offset = new_offset;
+ }
+ self.last_text_block = text_block;
- self.local_symbols.items[decl.link.local_sym_index] = .{
- .st_name = 0,
- .st_info = 0,
- .st_other = 0,
- .st_shndx = 0,
- .st_value = phdr.p_vaddr,
- .st_size = 0,
- };
- self.offset_table.items[decl.link.offset_table_index] = 0;
- }
+ shdr.sh_size = needed_size;
+ phdr.p_memsz = needed_size;
+ phdr.p_filesz = needed_size;
- pub fn freeDecl(self: *ElfFile, decl: *Module.Decl) void {
- self.freeTextBlock(&decl.link);
- if (decl.link.local_sym_index != 0) {
- self.local_symbol_free_list.appendAssumeCapacity(decl.link.local_sym_index);
- self.offset_table_free_list.appendAssumeCapacity(decl.link.offset_table_index);
+ self.phdr_table_dirty = true; // TODO look into making only the one program header dirty
+ self.shdr_table_dirty = true; // TODO look into making only the one section dirty
+ }
- self.local_symbols.items[decl.link.local_sym_index].st_info = 0;
+ // This function can also reallocate a text block.
+ // In this case we need to "unplug" it from its previous location before
+ // plugging it in to its new location.
+ if (text_block.prev) |prev| {
+ prev.next = text_block.next;
+ }
+ if (text_block.next) |next| {
+ next.prev = text_block.prev;
+ }
- decl.link.local_sym_index = 0;
+ if (block_placement) |big_block| {
+ text_block.prev = big_block;
+ text_block.next = big_block.next;
+ big_block.next = text_block;
+ } else {
+ text_block.prev = null;
+ text_block.next = null;
+ }
+ if (free_list_removal) |i| {
+ _ = self.text_block_free_list.swapRemove(i);
+ }
+ return vaddr;
}
- }
- pub fn updateDecl(self: *ElfFile, module: *Module, decl: *Module.Decl) !void {
- var code_buffer = std.ArrayList(u8).init(self.allocator);
- defer code_buffer.deinit();
-
- const typed_value = decl.typed_value.most_recent.typed_value;
- const code = switch (try codegen.generateSymbol(self, decl.src(), typed_value, &code_buffer)) {
- .externally_managed => |x| x,
- .appended => code_buffer.items,
- .fail => |em| {
- decl.analysis = .codegen_failure;
- _ = try module.failed_decls.put(decl, em);
- return;
- },
- };
+ pub fn allocateDeclIndexes(self: *File.Elf, decl: *Module.Decl) !void {
+ if (decl.link.local_sym_index != 0) return;
- const required_alignment = typed_value.ty.abiAlignment(self.options.target);
+ // Here we also ensure capacity for the free lists so that they can be appended to without fail.
+ try self.local_symbols.ensureCapacity(self.allocator, self.local_symbols.items.len + 1);
+ try self.local_symbol_free_list.ensureCapacity(self.allocator, self.local_symbols.items.len);
+ try self.offset_table.ensureCapacity(self.allocator, self.offset_table.items.len + 1);
+ try self.offset_table_free_list.ensureCapacity(self.allocator, self.local_symbols.items.len);
- const stt_bits: u8 = switch (typed_value.ty.zigTypeTag()) {
- .Fn => elf.STT_FUNC,
- else => elf.STT_OBJECT,
- };
+ if (self.local_symbol_free_list.popOrNull()) |i| {
+ //std.log.debug(.link, "reusing symbol index {} for {}\n", .{i, decl.name});
+ decl.link.local_sym_index = i;
+ } else {
+ //std.log.debug(.link, "allocating symbol index {} for {}\n", .{self.local_symbols.items.len, decl.name});
+ decl.link.local_sym_index = @intCast(u32, self.local_symbols.items.len);
+ _ = self.local_symbols.addOneAssumeCapacity();
+ }
- assert(decl.link.local_sym_index != 0); // Caller forgot to allocateDeclIndexes()
- const local_sym = &self.local_symbols.items[decl.link.local_sym_index];
- if (local_sym.st_size != 0) {
- const capacity = decl.link.capacity(self.*);
- const need_realloc = code.len > capacity or
- !mem.isAlignedGeneric(u64, local_sym.st_value, required_alignment);
- if (need_realloc) {
- const vaddr = try self.growTextBlock(&decl.link, code.len, required_alignment);
- //std.log.debug(.link, "growing {} from 0x{x} to 0x{x}\n", .{ decl.name, local_sym.st_value, vaddr });
- if (vaddr != local_sym.st_value) {
- local_sym.st_value = vaddr;
-
- //std.log.debug(.link, " (writing new offset table entry)\n", .{});
- self.offset_table.items[decl.link.offset_table_index] = vaddr;
- try self.writeOffsetTableEntry(decl.link.offset_table_index);
- }
- } else if (code.len < local_sym.st_size) {
- self.shrinkTextBlock(&decl.link, code.len);
+ if (self.offset_table_free_list.popOrNull()) |i| {
+ decl.link.offset_table_index = i;
+ } else {
+ decl.link.offset_table_index = @intCast(u32, self.offset_table.items.len);
+ _ = self.offset_table.addOneAssumeCapacity();
+ self.offset_table_count_dirty = true;
}
- local_sym.st_size = code.len;
- local_sym.st_name = try self.updateString(local_sym.st_name, mem.spanZ(decl.name));
- local_sym.st_info = (elf.STB_LOCAL << 4) | stt_bits;
- local_sym.st_other = 0;
- local_sym.st_shndx = self.text_section_index.?;
- // TODO this write could be avoided if no fields of the symbol were changed.
- try self.writeSymbol(decl.link.local_sym_index);
- } else {
- const decl_name = mem.spanZ(decl.name);
- const name_str_index = try self.makeString(decl_name);
- const vaddr = try self.allocateTextBlock(&decl.link, code.len, required_alignment);
- //std.log.debug(.link, "allocated text block for {} at 0x{x}\n", .{ decl_name, vaddr });
- errdefer self.freeTextBlock(&decl.link);
-
- local_sym.* = .{
- .st_name = name_str_index,
- .st_info = (elf.STB_LOCAL << 4) | stt_bits,
+
+ const phdr = &self.program_headers.items[self.phdr_load_re_index.?];
+
+ self.local_symbols.items[decl.link.local_sym_index] = .{
+ .st_name = 0,
+ .st_info = 0,
.st_other = 0,
- .st_shndx = self.text_section_index.?,
- .st_value = vaddr,
- .st_size = code.len,
+ .st_shndx = 0,
+ .st_value = phdr.p_vaddr,
+ .st_size = 0,
};
- self.offset_table.items[decl.link.offset_table_index] = vaddr;
-
- try self.writeSymbol(decl.link.local_sym_index);
- try self.writeOffsetTableEntry(decl.link.offset_table_index);
+ self.offset_table.items[decl.link.offset_table_index] = 0;
}
- const section_offset = local_sym.st_value - self.program_headers.items[self.phdr_load_re_index.?].p_vaddr;
- const file_offset = self.sections.items[self.text_section_index.?].sh_offset + section_offset;
- try self.file.?.pwriteAll(code, file_offset);
+ pub fn freeDecl(self: *File.Elf, decl: *Module.Decl) void {
+ self.freeTextBlock(&decl.link);
+ if (decl.link.local_sym_index != 0) {
+ self.local_symbol_free_list.appendAssumeCapacity(decl.link.local_sym_index);
+ self.offset_table_free_list.appendAssumeCapacity(decl.link.offset_table_index);
- // Since we updated the vaddr and the size, each corresponding export symbol also needs to be updated.
- const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{};
- return self.updateDeclExports(module, decl, decl_exports);
- }
+ self.local_symbols.items[decl.link.local_sym_index].st_info = 0;
- /// Must be called only after a successful call to `updateDecl`.
- pub fn updateDeclExports(
- self: *ElfFile,
- module: *Module,
- decl: *const Module.Decl,
- exports: []const *Module.Export,
- ) !void {
- // In addition to ensuring capacity for global_symbols, we also ensure capacity for freeing all of
- // them, so that deleting exports is guaranteed to succeed.
- try self.global_symbols.ensureCapacity(self.allocator, self.global_symbols.items.len + exports.len);
- try self.global_symbol_free_list.ensureCapacity(self.allocator, self.global_symbols.items.len);
- const typed_value = decl.typed_value.most_recent.typed_value;
- if (decl.link.local_sym_index == 0) return;
- const decl_sym = self.local_symbols.items[decl.link.local_sym_index];
-
- for (exports) |exp| {
- if (exp.options.section) |section_name| {
- if (!mem.eql(u8, section_name, ".text")) {
- try module.failed_exports.ensureCapacity(module.failed_exports.items().len + 1);
- module.failed_exports.putAssumeCapacityNoClobber(
- exp,
- try Module.ErrorMsg.create(self.allocator, 0, "Unimplemented: ExportOptions.section", .{}),
- );
- continue;
- }
+ decl.link.local_sym_index = 0;
}
- const stb_bits: u8 = switch (exp.options.linkage) {
- .Internal => elf.STB_LOCAL,
- .Strong => blk: {
- if (mem.eql(u8, exp.options.name, "_start")) {
- self.entry_addr = decl_sym.st_value;
- }
- break :blk elf.STB_GLOBAL;
- },
- .Weak => elf.STB_WEAK,
- .LinkOnce => {
- try module.failed_exports.ensureCapacity(module.failed_exports.items().len + 1);
- module.failed_exports.putAssumeCapacityNoClobber(
- exp,
- try Module.ErrorMsg.create(self.allocator, 0, "Unimplemented: GlobalLinkage.LinkOnce", .{}),
- );
- continue;
+ }
+
+ pub fn updateDecl(self: *File.Elf, module: *Module, decl: *Module.Decl) !void {
+ var code_buffer = std.ArrayList(u8).init(self.allocator);
+ defer code_buffer.deinit();
+
+ const typed_value = decl.typed_value.most_recent.typed_value;
+ const code = switch (try codegen.generateSymbol(self, decl.src(), typed_value, &code_buffer)) {
+ .externally_managed => |x| x,
+ .appended => code_buffer.items,
+ .fail => |em| {
+ decl.analysis = .codegen_failure;
+ _ = try module.failed_decls.put(decl, em);
+ return;
},
};
- const stt_bits: u8 = @truncate(u4, decl_sym.st_info);
- if (exp.link.sym_index) |i| {
- const sym = &self.global_symbols.items[i];
- sym.* = .{
- .st_name = try self.updateString(sym.st_name, exp.options.name),
- .st_info = (stb_bits << 4) | stt_bits,
- .st_other = 0,
- .st_shndx = self.text_section_index.?,
- .st_value = decl_sym.st_value,
- .st_size = decl_sym.st_size,
- };
+
+ const required_alignment = typed_value.ty.abiAlignment(self.options.target);
+
+ const stt_bits: u8 = switch (typed_value.ty.zigTypeTag()) {
+ .Fn => elf.STT_FUNC,
+ else => elf.STT_OBJECT,
+ };
+
+ assert(decl.link.local_sym_index != 0); // Caller forgot to allocateDeclIndexes()
+ const local_sym = &self.local_symbols.items[decl.link.local_sym_index];
+ if (local_sym.st_size != 0) {
+ const capacity = decl.link.capacity(self.*);
+ const need_realloc = code.len > capacity or
+ !mem.isAlignedGeneric(u64, local_sym.st_value, required_alignment);
+ if (need_realloc) {
+ const vaddr = try self.growTextBlock(&decl.link, code.len, required_alignment);
+ //std.log.debug(.link, "growing {} from 0x{x} to 0x{x}\n", .{ decl.name, local_sym.st_value, vaddr });
+ if (vaddr != local_sym.st_value) {
+ local_sym.st_value = vaddr;
+
+ //std.log.debug(.link, " (writing new offset table entry)\n", .{});
+ self.offset_table.items[decl.link.offset_table_index] = vaddr;
+ try self.writeOffsetTableEntry(decl.link.offset_table_index);
+ }
+ } else if (code.len < local_sym.st_size) {
+ self.shrinkTextBlock(&decl.link, code.len);
+ }
+ local_sym.st_size = code.len;
+ local_sym.st_name = try self.updateString(local_sym.st_name, mem.spanZ(decl.name));
+ local_sym.st_info = (elf.STB_LOCAL << 4) | stt_bits;
+ local_sym.st_other = 0;
+ local_sym.st_shndx = self.text_section_index.?;
+ // TODO this write could be avoided if no fields of the symbol were changed.
+ try self.writeSymbol(decl.link.local_sym_index);
} else {
- const name = try self.makeString(exp.options.name);
- const i = if (self.global_symbol_free_list.popOrNull()) |i| i else blk: {
- _ = self.global_symbols.addOneAssumeCapacity();
- break :blk self.global_symbols.items.len - 1;
- };
- self.global_symbols.items[i] = .{
- .st_name = name,
- .st_info = (stb_bits << 4) | stt_bits,
+ const decl_name = mem.spanZ(decl.name);
+ const name_str_index = try self.makeString(decl_name);
+ const vaddr = try self.allocateTextBlock(&decl.link, code.len, required_alignment);
+ //std.log.debug(.link, "allocated text block for {} at 0x{x}\n", .{ decl_name, vaddr });
+ errdefer self.freeTextBlock(&decl.link);
+
+ local_sym.* = .{
+ .st_name = name_str_index,
+ .st_info = (elf.STB_LOCAL << 4) | stt_bits,
.st_other = 0,
.st_shndx = self.text_section_index.?,
- .st_value = decl_sym.st_value,
- .st_size = decl_sym.st_size,
+ .st_value = vaddr,
+ .st_size = code.len,
};
+ self.offset_table.items[decl.link.offset_table_index] = vaddr;
- exp.link.sym_index = @intCast(u32, i);
+ try self.writeSymbol(decl.link.local_sym_index);
+ try self.writeOffsetTableEntry(decl.link.offset_table_index);
}
- }
- }
- pub fn deleteExport(self: *ElfFile, exp: Export) void {
- const sym_index = exp.sym_index orelse return;
- self.global_symbol_free_list.appendAssumeCapacity(sym_index);
- self.global_symbols.items[sym_index].st_info = 0;
- }
+ const section_offset = local_sym.st_value - self.program_headers.items[self.phdr_load_re_index.?].p_vaddr;
+ const file_offset = self.sections.items[self.text_section_index.?].sh_offset + section_offset;
+ try self.file.?.pwriteAll(code, file_offset);
- fn writeProgHeader(self: *ElfFile, index: usize) !void {
- const foreign_endian = self.options.target.cpu.arch.endian() != std.Target.current.cpu.arch.endian();
- const offset = self.program_headers.items[index].p_offset;
- switch (self.options.target.cpu.arch.ptrBitWidth()) {
- 32 => {
- var phdr = [1]elf.Elf32_Phdr{progHeaderTo32(self.program_headers.items[index])};
- if (foreign_endian) {
- bswapAllFields(elf.Elf32_Phdr, &phdr[0]);
- }
- return self.file.?.pwriteAll(mem.sliceAsBytes(&phdr), offset);
- },
- 64 => {
- var phdr = [1]elf.Elf64_Phdr{self.program_headers.items[index]};
- if (foreign_endian) {
- bswapAllFields(elf.Elf64_Phdr, &phdr[0]);
- }
- return self.file.?.pwriteAll(mem.sliceAsBytes(&phdr), offset);
- },
- else => return error.UnsupportedArchitecture,
+ // Since we updated the vaddr and the size, each corresponding export symbol also needs to be updated.
+ const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{};
+ return self.updateDeclExports(module, decl, decl_exports);
}
- }
- fn writeSectHeader(self: *ElfFile, index: usize) !void {
- const foreign_endian = self.options.target.cpu.arch.endian() != std.Target.current.cpu.arch.endian();
- const offset = self.sections.items[index].sh_offset;
- switch (self.options.target.cpu.arch.ptrBitWidth()) {
- 32 => {
- var shdr: [1]elf.Elf32_Shdr = undefined;
- shdr[0] = sectHeaderTo32(self.sections.items[index]);
- if (foreign_endian) {
- bswapAllFields(elf.Elf32_Shdr, &shdr[0]);
- }
- return self.file.?.pwriteAll(mem.sliceAsBytes(&shdr), offset);
- },
- 64 => {
- var shdr = [1]elf.Elf64_Shdr{self.sections.items[index]};
- if (foreign_endian) {
- bswapAllFields(elf.Elf64_Shdr, &shdr[0]);
+ /// Must be called only after a successful call to `updateDecl`.
+ pub fn updateDeclExports(
+ self: *File.Elf,
+ module: *Module,
+ decl: *const Module.Decl,
+ exports: []const *Module.Export,
+ ) !void {
+ // In addition to ensuring capacity for global_symbols, we also ensure capacity for freeing all of
+ // them, so that deleting exports is guaranteed to succeed.
+ try self.global_symbols.ensureCapacity(self.allocator, self.global_symbols.items.len + exports.len);
+ try self.global_symbol_free_list.ensureCapacity(self.allocator, self.global_symbols.items.len);
+ const typed_value = decl.typed_value.most_recent.typed_value;
+ if (decl.link.local_sym_index == 0) return;
+ const decl_sym = self.local_symbols.items[decl.link.local_sym_index];
+
+ for (exports) |exp| {
+ if (exp.options.section) |section_name| {
+ if (!mem.eql(u8, section_name, ".text")) {
+ try module.failed_exports.ensureCapacity(module.failed_exports.items().len + 1);
+ module.failed_exports.putAssumeCapacityNoClobber(
+ exp,
+ try Module.ErrorMsg.create(self.allocator, 0, "Unimplemented: ExportOptions.section", .{}),
+ );
+ continue;
+ }
}
- return self.file.?.pwriteAll(mem.sliceAsBytes(&shdr), offset);
- },
- else => return error.UnsupportedArchitecture,
- }
- }
+ const stb_bits: u8 = switch (exp.options.linkage) {
+ .Internal => elf.STB_LOCAL,
+ .Strong => blk: {
+ if (mem.eql(u8, exp.options.name, "_start")) {
+ self.entry_addr = decl_sym.st_value;
+ }
+ break :blk elf.STB_GLOBAL;
+ },
+ .Weak => elf.STB_WEAK,
+ .LinkOnce => {
+ try module.failed_exports.ensureCapacity(module.failed_exports.items().len + 1);
+ module.failed_exports.putAssumeCapacityNoClobber(
+ exp,
+ try Module.ErrorMsg.create(self.allocator, 0, "Unimplemented: GlobalLinkage.LinkOnce", .{}),
+ );
+ continue;
+ },
+ };
+ const stt_bits: u8 = @truncate(u4, decl_sym.st_info);
+ if (exp.link.sym_index) |i| {
+ const sym = &self.global_symbols.items[i];
+ sym.* = .{
+ .st_name = try self.updateString(sym.st_name, exp.options.name),
+ .st_info = (stb_bits << 4) | stt_bits,
+ .st_other = 0,
+ .st_shndx = self.text_section_index.?,
+ .st_value = decl_sym.st_value,
+ .st_size = decl_sym.st_size,
+ };
+ } else {
+ const name = try self.makeString(exp.options.name);
+ const i = if (self.global_symbol_free_list.popOrNull()) |i| i else blk: {
+ _ = self.global_symbols.addOneAssumeCapacity();
+ break :blk self.global_symbols.items.len - 1;
+ };
+ self.global_symbols.items[i] = .{
+ .st_name = name,
+ .st_info = (stb_bits << 4) | stt_bits,
+ .st_other = 0,
+ .st_shndx = self.text_section_index.?,
+ .st_value = decl_sym.st_value,
+ .st_size = decl_sym.st_size,
+ };
- fn writeOffsetTableEntry(self: *ElfFile, index: usize) !void {
- const shdr = &self.sections.items[self.got_section_index.?];
- const phdr = &self.program_headers.items[self.phdr_got_index.?];
- const entry_size: u16 = switch (self.ptr_width) {
- .p32 => 4,
- .p64 => 8,
- };
- if (self.offset_table_count_dirty) {
- // TODO Also detect virtual address collisions.
- const allocated_size = self.allocatedSize(shdr.sh_offset);
- const needed_size = self.local_symbols.items.len * entry_size;
- if (needed_size > allocated_size) {
- // Must move the entire got section.
- const new_offset = self.findFreeSpace(needed_size, entry_size);
- const amt = try self.file.?.copyRangeAll(shdr.sh_offset, self.file.?, new_offset, shdr.sh_size);
- if (amt != shdr.sh_size) return error.InputOutput;
- shdr.sh_offset = new_offset;
- phdr.p_offset = new_offset;
+ exp.link.sym_index = @intCast(u32, i);
+ }
}
- shdr.sh_size = needed_size;
- phdr.p_memsz = needed_size;
- phdr.p_filesz = needed_size;
+ }
- self.shdr_table_dirty = true; // TODO look into making only the one section dirty
- self.phdr_table_dirty = true; // TODO look into making only the one program header dirty
+ pub fn deleteExport(self: *File.Elf, exp: Export) void {
+ const sym_index = exp.sym_index orelse return;
+ self.global_symbol_free_list.appendAssumeCapacity(sym_index);
+ self.global_symbols.items[sym_index].st_info = 0;
+ }
- self.offset_table_count_dirty = false;
+ fn writeProgHeader(self: *File.Elf, index: usize) !void {
+ const foreign_endian = self.options.target.cpu.arch.endian() != std.Target.current.cpu.arch.endian();
+ const offset = self.program_headers.items[index].p_offset;
+ switch (self.options.target.cpu.arch.ptrBitWidth()) {
+ 32 => {
+ var phdr = [1]elf.Elf32_Phdr{progHeaderTo32(self.program_headers.items[index])};
+ if (foreign_endian) {
+ bswapAllFields(elf.Elf32_Phdr, &phdr[0]);
+ }
+ return self.file.?.pwriteAll(mem.sliceAsBytes(&phdr), offset);
+ },
+ 64 => {
+ var phdr = [1]elf.Elf64_Phdr{self.program_headers.items[index]};
+ if (foreign_endian) {
+ bswapAllFields(elf.Elf64_Phdr, &phdr[0]);
+ }
+ return self.file.?.pwriteAll(mem.sliceAsBytes(&phdr), offset);
+ },
+ else => return error.UnsupportedArchitecture,
+ }
}
- const endian = self.options.target.cpu.arch.endian();
- const off = shdr.sh_offset + @as(u64, entry_size) * index;
- switch (self.ptr_width) {
- .p32 => {
- var buf: [4]u8 = undefined;
- mem.writeInt(u32, &buf, @intCast(u32, self.offset_table.items[index]), endian);
- try self.file.?.pwriteAll(&buf, off);
- },
- .p64 => {
- var buf: [8]u8 = undefined;
- mem.writeInt(u64, &buf, self.offset_table.items[index], endian);
- try self.file.?.pwriteAll(&buf, off);
- },
+
+ fn writeSectHeader(self: *File.Elf, index: usize) !void {
+ const foreign_endian = self.options.target.cpu.arch.endian() != std.Target.current.cpu.arch.endian();
+ const offset = self.sections.items[index].sh_offset;
+ switch (self.options.target.cpu.arch.ptrBitWidth()) {
+ 32 => {
+ var shdr: [1]elf.Elf32_Shdr = undefined;
+ shdr[0] = sectHeaderTo32(self.sections.items[index]);
+ if (foreign_endian) {
+ bswapAllFields(elf.Elf32_Shdr, &shdr[0]);
+ }
+ return self.file.?.pwriteAll(mem.sliceAsBytes(&shdr), offset);
+ },
+ 64 => {
+ var shdr = [1]elf.Elf64_Shdr{self.sections.items[index]};
+ if (foreign_endian) {
+ bswapAllFields(elf.Elf64_Shdr, &shdr[0]);
+ }
+ return self.file.?.pwriteAll(mem.sliceAsBytes(&shdr), offset);
+ },
+ else => return error.UnsupportedArchitecture,
+ }
}
- }
- fn writeSymbol(self: *ElfFile, index: usize) !void {
- const syms_sect = &self.sections.items[self.symtab_section_index.?];
- // Make sure we are not pointlessly writing symbol data that will have to get relocated
- // due to running out of space.
- if (self.local_symbols.items.len != syms_sect.sh_info) {
- const sym_size: u64 = switch (self.ptr_width) {
- .p32 => @sizeOf(elf.Elf32_Sym),
- .p64 => @sizeOf(elf.Elf64_Sym),
- };
- const sym_align: u16 = switch (self.ptr_width) {
- .p32 => @alignOf(elf.Elf32_Sym),
- .p64 => @alignOf(elf.Elf64_Sym),
+ fn writeOffsetTableEntry(self: *File.Elf, index: usize) !void {
+ const shdr = &self.sections.items[self.got_section_index.?];
+ const phdr = &self.program_headers.items[self.phdr_got_index.?];
+ const entry_size: u16 = switch (self.ptr_width) {
+ .p32 => 4,
+ .p64 => 8,
};
- const needed_size = (self.local_symbols.items.len + self.global_symbols.items.len) * sym_size;
- if (needed_size > self.allocatedSize(syms_sect.sh_offset)) {
- // Move all the symbols to a new file location.
- const new_offset = self.findFreeSpace(needed_size, sym_align);
- const existing_size = @as(u64, syms_sect.sh_info) * sym_size;
- const amt = try self.file.?.copyRangeAll(syms_sect.sh_offset, self.file.?, new_offset, existing_size);
- if (amt != existing_size) return error.InputOutput;
- syms_sect.sh_offset = new_offset;
+ if (self.offset_table_count_dirty) {
+ // TODO Also detect virtual address collisions.
+ const allocated_size = self.allocatedSize(shdr.sh_offset);
+ const needed_size = self.local_symbols.items.len * entry_size;
+ if (needed_size > allocated_size) {
+ // Must move the entire got section.
+ const new_offset = self.findFreeSpace(needed_size, entry_size);
+ const amt = try self.file.?.copyRangeAll(shdr.sh_offset, self.file.?, new_offset, shdr.sh_size);
+ if (amt != shdr.sh_size) return error.InputOutput;
+ shdr.sh_offset = new_offset;
+ phdr.p_offset = new_offset;
+ }
+ shdr.sh_size = needed_size;
+ phdr.p_memsz = needed_size;
+ phdr.p_filesz = needed_size;
+
+ self.shdr_table_dirty = true; // TODO look into making only the one section dirty
+ self.phdr_table_dirty = true; // TODO look into making only the one program header dirty
+
+ self.offset_table_count_dirty = false;
+ }
+ const endian = self.options.target.cpu.arch.endian();
+ const off = shdr.sh_offset + @as(u64, entry_size) * index;
+ switch (self.ptr_width) {
+ .p32 => {
+ var buf: [4]u8 = undefined;
+ mem.writeInt(u32, &buf, @intCast(u32, self.offset_table.items[index]), endian);
+ try self.file.?.pwriteAll(&buf, off);
+ },
+ .p64 => {
+ var buf: [8]u8 = undefined;
+ mem.writeInt(u64, &buf, self.offset_table.items[index], endian);
+ try self.file.?.pwriteAll(&buf, off);
+ },
}
- syms_sect.sh_info = @intCast(u32, self.local_symbols.items.len);
- syms_sect.sh_size = needed_size; // anticipating adding the global symbols later
- self.shdr_table_dirty = true; // TODO look into only writing one section
}
- const foreign_endian = self.options.target.cpu.arch.endian() != std.Target.current.cpu.arch.endian();
- switch (self.ptr_width) {
- .p32 => {
- var sym = [1]elf.Elf32_Sym{
- .{
- .st_name = self.local_symbols.items[index].st_name,
- .st_value = @intCast(u32, self.local_symbols.items[index].st_value),
- .st_size = @intCast(u32, self.local_symbols.items[index].st_size),
- .st_info = self.local_symbols.items[index].st_info,
- .st_other = self.local_symbols.items[index].st_other,
- .st_shndx = self.local_symbols.items[index].st_shndx,
- },
+
+ fn writeSymbol(self: *File.Elf, index: usize) !void {
+ const syms_sect = &self.sections.items[self.symtab_section_index.?];
+ // Make sure we are not pointlessly writing symbol data that will have to get relocated
+ // due to running out of space.
+ if (self.local_symbols.items.len != syms_sect.sh_info) {
+ const sym_size: u64 = switch (self.ptr_width) {
+ .p32 => @sizeOf(elf.Elf32_Sym),
+ .p64 => @sizeOf(elf.Elf64_Sym),
};
- if (foreign_endian) {
- bswapAllFields(elf.Elf32_Sym, &sym[0]);
- }
- const off = syms_sect.sh_offset + @sizeOf(elf.Elf32_Sym) * index;
- try self.file.?.pwriteAll(mem.sliceAsBytes(sym[0..1]), off);
- },
- .p64 => {
- var sym = [1]elf.Elf64_Sym{self.local_symbols.items[index]};
- if (foreign_endian) {
- bswapAllFields(elf.Elf64_Sym, &sym[0]);
+ const sym_align: u16 = switch (self.ptr_width) {
+ .p32 => @alignOf(elf.Elf32_Sym),
+ .p64 => @alignOf(elf.Elf64_Sym),
+ };
+ const needed_size = (self.local_symbols.items.len + self.global_symbols.items.len) * sym_size;
+ if (needed_size > self.allocatedSize(syms_sect.sh_offset)) {
+ // Move all the symbols to a new file location.
+ const new_offset = self.findFreeSpace(needed_size, sym_align);
+ const existing_size = @as(u64, syms_sect.sh_info) * sym_size;
+ const amt = try self.file.?.copyRangeAll(syms_sect.sh_offset, self.file.?, new_offset, existing_size);
+ if (amt != existing_size) return error.InputOutput;
+ syms_sect.sh_offset = new_offset;
}
- const off = syms_sect.sh_offset + @sizeOf(elf.Elf64_Sym) * index;
- try self.file.?.pwriteAll(mem.sliceAsBytes(sym[0..1]), off);
- },
- }
- }
-
- fn writeAllGlobalSymbols(self: *ElfFile) !void {
- const syms_sect = &self.sections.items[self.symtab_section_index.?];
- const sym_size: u64 = switch (self.ptr_width) {
- .p32 => @sizeOf(elf.Elf32_Sym),
- .p64 => @sizeOf(elf.Elf64_Sym),
- };
- const foreign_endian = self.options.target.cpu.arch.endian() != std.Target.current.cpu.arch.endian();
- const global_syms_off = syms_sect.sh_offset + self.local_symbols.items.len * sym_size;
- switch (self.ptr_width) {
- .p32 => {
- const buf = try self.allocator.alloc(elf.Elf32_Sym, self.global_symbols.items.len);
- defer self.allocator.free(buf);
-
- for (buf) |*sym, i| {
- sym.* = .{
- .st_name = self.global_symbols.items[i].st_name,
- .st_value = @intCast(u32, self.global_symbols.items[i].st_value),
- .st_size = @intCast(u32, self.global_symbols.items[i].st_size),
- .st_info = self.global_symbols.items[i].st_info,
- .st_other = self.global_symbols.items[i].st_other,
- .st_shndx = self.global_symbols.items[i].st_shndx,
+ syms_sect.sh_info = @intCast(u32, self.local_symbols.items.len);
+ syms_sect.sh_size = needed_size; // anticipating adding the global symbols later
+ self.shdr_table_dirty = true; // TODO look into only writing one section
+ }
+ const foreign_endian = self.options.target.cpu.arch.endian() != std.Target.current.cpu.arch.endian();
+ switch (self.ptr_width) {
+ .p32 => {
+ var sym = [1]elf.Elf32_Sym{
+ .{
+ .st_name = self.local_symbols.items[index].st_name,
+ .st_value = @intCast(u32, self.local_symbols.items[index].st_value),
+ .st_size = @intCast(u32, self.local_symbols.items[index].st_size),
+ .st_info = self.local_symbols.items[index].st_info,
+ .st_other = self.local_symbols.items[index].st_other,
+ .st_shndx = self.local_symbols.items[index].st_shndx,
+ },
};
if (foreign_endian) {
- bswapAllFields(elf.Elf32_Sym, sym);
+ bswapAllFields(elf.Elf32_Sym, &sym[0]);
}
- }
- try self.file.?.pwriteAll(mem.sliceAsBytes(buf), global_syms_off);
- },
- .p64 => {
- const buf = try self.allocator.alloc(elf.Elf64_Sym, self.global_symbols.items.len);
- defer self.allocator.free(buf);
-
- for (buf) |*sym, i| {
- sym.* = .{
- .st_name = self.global_symbols.items[i].st_name,
- .st_value = self.global_symbols.items[i].st_value,
- .st_size = self.global_symbols.items[i].st_size,
- .st_info = self.global_symbols.items[i].st_info,
- .st_other = self.global_symbols.items[i].st_other,
- .st_shndx = self.global_symbols.items[i].st_shndx,
- };
+ const off = syms_sect.sh_offset + @sizeOf(elf.Elf32_Sym) * index;
+ try self.file.?.pwriteAll(mem.sliceAsBytes(sym[0..1]), off);
+ },
+ .p64 => {
+ var sym = [1]elf.Elf64_Sym{self.local_symbols.items[index]};
if (foreign_endian) {
- bswapAllFields(elf.Elf64_Sym, sym);
+ bswapAllFields(elf.Elf64_Sym, &sym[0]);
}
- }
- try self.file.?.pwriteAll(mem.sliceAsBytes(buf), global_syms_off);
- },
+ const off = syms_sect.sh_offset + @sizeOf(elf.Elf64_Sym) * index;
+ try self.file.?.pwriteAll(mem.sliceAsBytes(sym[0..1]), off);
+ },
+ }
}
- }
+
+ fn writeAllGlobalSymbols(self: *File.Elf) !void {
+ const syms_sect = &self.sections.items[self.symtab_section_index.?];
+ const sym_size: u64 = switch (self.ptr_width) {
+ .p32 => @sizeOf(elf.Elf32_Sym),
+ .p64 => @sizeOf(elf.Elf64_Sym),
+ };
+ const foreign_endian = self.options.target.cpu.arch.endian() != std.Target.current.cpu.arch.endian();
+ const global_syms_off = syms_sect.sh_offset + self.local_symbols.items.len * sym_size;
+ switch (self.ptr_width) {
+ .p32 => {
+ const buf = try self.allocator.alloc(elf.Elf32_Sym, self.global_symbols.items.len);
+ defer self.allocator.free(buf);
+
+ for (buf) |*sym, i| {
+ sym.* = .{
+ .st_name = self.global_symbols.items[i].st_name,
+ .st_value = @intCast(u32, self.global_symbols.items[i].st_value),
+ .st_size = @intCast(u32, self.global_symbols.items[i].st_size),
+ .st_info = self.global_symbols.items[i].st_info,
+ .st_other = self.global_symbols.items[i].st_other,
+ .st_shndx = self.global_symbols.items[i].st_shndx,
+ };
+ if (foreign_endian) {
+ bswapAllFields(elf.Elf32_Sym, sym);
+ }
+ }
+ try self.file.?.pwriteAll(mem.sliceAsBytes(buf), global_syms_off);
+ },
+ .p64 => {
+ const buf = try self.allocator.alloc(elf.Elf64_Sym, self.global_symbols.items.len);
+ defer self.allocator.free(buf);
+
+ for (buf) |*sym, i| {
+ sym.* = .{
+ .st_name = self.global_symbols.items[i].st_name,
+ .st_value = self.global_symbols.items[i].st_value,
+ .st_size = self.global_symbols.items[i].st_size,
+ .st_info = self.global_symbols.items[i].st_info,
+ .st_other = self.global_symbols.items[i].st_other,
+ .st_shndx = self.global_symbols.items[i].st_shndx,
+ };
+ if (foreign_endian) {
+ bswapAllFields(elf.Elf64_Sym, sym);
+ }
+ }
+ try self.file.?.pwriteAll(mem.sliceAsBytes(buf), global_syms_off);
+ },
+ }
+ }
+ };
};
/// Truncates the existing file contents and overwrites the contents.
/// Returns an error if `file` is not already open with +read +write +seek abilities.
-pub fn createElfFile(allocator: *Allocator, file: fs.File, options: Options) !ElfFile {
+pub fn createElfFile(allocator: *Allocator, file: fs.File, options: Options) !File.Elf {
switch (options.output_mode) {
.Exe => {},
.Obj => {},
@@ -1368,7 +1455,7 @@ pub fn createElfFile(allocator: *Allocator, file: fs.File, options: Options) !El
.wasm => return error.TODOImplementWritingWasmObjects,
}
- var self: ElfFile = .{
+ var self: File.Elf = .{
.allocator = allocator,
.file = file,
.options = options,
@@ -1412,7 +1499,7 @@ pub fn createElfFile(allocator: *Allocator, file: fs.File, options: Options) !El
}
/// Returns error.IncrFailed if incremental update could not be performed.
-fn openBinFileInner(allocator: *Allocator, file: fs.File, options: Options) !ElfFile {
+fn openBinFileInner(allocator: *Allocator, file: fs.File, options: Options) !File.Elf {
switch (options.output_mode) {
.Exe => {},
.Obj => {},
@@ -1425,7 +1512,7 @@ fn openBinFileInner(allocator: *Allocator, file: fs.File, options: Options) !Elf
.macho => return error.IncrFailed,
.wasm => return error.IncrFailed,
}
- var self: ElfFile = .{
+ var self: File.Elf = .{
.allocator = allocator,
.file = file,
.owns_file_handle = false,
src-self-hosted/Module.zig
@@ -26,7 +26,7 @@ root_pkg: *Package,
/// Module owns this resource.
/// The `Scope` is either a `Scope.ZIRModule` or `Scope.File`.
root_scope: *Scope,
-bin_file: link.ElfFile,
+bin_file: *link.File,
bin_file_dir: std.fs.Dir,
bin_file_path: []const u8,
/// It's rare for a decl to be exported, so we save memory by having a sparse map of
@@ -45,7 +45,7 @@ export_owners: std.AutoHashMap(*Decl, []*Export),
decl_table: DeclTable,
optimize_mode: std.builtin.Mode,
-link_error_flags: link.ElfFile.ErrorFlags = .{},
+link_error_flags: link.File.ErrorFlags = .{},
work_queue: std.fifo.LinearFifo(WorkItem, .Dynamic),
@@ -91,7 +91,7 @@ pub const Export = struct {
/// Byte offset into the file that contains the export directive.
src: usize,
/// Represents the position of the export, if any, in the output file.
- link: link.ElfFile.Export,
+ link: link.File.Elf.Export,
/// The Decl that performs the export. Note that this is *not* the Decl being exported.
owner_decl: *Decl,
/// The Decl being exported. Note this is *not* the Decl performing the export.
@@ -169,7 +169,7 @@ pub const Decl = struct {
/// Represents the position of the code in the output file.
/// This is populated regardless of semantic analysis and code generation.
- link: link.ElfFile.TextBlock = link.ElfFile.TextBlock.empty,
+ link: link.File.Elf.TextBlock = link.File.Elf.TextBlock.empty,
contents_hash: std.zig.SrcHash,
@@ -722,6 +722,12 @@ pub const AllErrors = struct {
}
};
+pub const CStandard = enum {
+ C99,
+ GNU99,
+ C11,
+};
+
pub const InitOptions = struct {
target: std.Target,
root_pkg: *Package,
@@ -732,17 +738,19 @@ pub const InitOptions = struct {
object_format: ?std.builtin.ObjectFormat = null,
optimize_mode: std.builtin.Mode = .Debug,
keep_source_files_loaded: bool = false,
+ c_standard: ?CStandard = null,
};
pub fn init(gpa: *Allocator, options: InitOptions) !Module {
const bin_file_dir = options.bin_file_dir orelse std.fs.cwd();
- var bin_file = try link.openBinFilePath(gpa, bin_file_dir, options.bin_file_path, .{
+ const bin_file = try link.openBinFilePath(gpa, bin_file_dir, options.bin_file_path, .{
.target = options.target,
.output_mode = options.output_mode,
.link_mode = options.link_mode orelse .Static,
.object_format = options.object_format orelse options.target.getObjectFormat(),
+ .c_standard = options.c_standard,
});
- errdefer bin_file.deinit();
+ errdefer bin_file.*.deinit();
const root_scope = blk: {
if (mem.endsWith(u8, options.root_pkg.root_src_path, ".zig")) {
@@ -793,6 +801,7 @@ pub fn init(gpa: *Allocator, options: InitOptions) !Module {
pub fn deinit(self: *Module) void {
self.bin_file.deinit();
const allocator = self.allocator;
+ allocator.destroy(self.bin_file);
self.deletion_set.deinit(allocator);
self.work_queue.deinit();
@@ -840,7 +849,7 @@ fn freeExportList(allocator: *Allocator, export_list: []*Export) void {
}
pub fn target(self: Module) std.Target {
- return self.bin_file.options.target;
+ return self.bin_file.options().target;
}
/// Detect changes to source files, perform semantic analysis, and update the output files.
@@ -882,7 +891,7 @@ pub fn update(self: *Module) !void {
try self.deleteDecl(decl);
}
- self.link_error_flags = self.bin_file.error_flags;
+ self.link_error_flags = self.bin_file.errorFlags();
// If there are any errors, we anticipate the source files being loaded
// to report error messages. Otherwise we unload all source files to save memory.
@@ -1898,8 +1907,9 @@ fn deleteDeclExports(self: *Module, decl: *Decl) void {
self.decl_exports.removeAssertDiscard(exp.exported_decl);
}
}
-
- self.bin_file.deleteExport(exp.link);
+ if (self.bin_file.cast(link.File.Elf)) |elf| {
+ elf.deleteExport(exp.link);
+ }
if (self.failed_exports.remove(exp)) |entry| {
entry.value.destroy(self.allocator);
}
@@ -1961,7 +1971,7 @@ fn allocateNewDecl(
.analysis = .unreferenced,
.deletion_flag = false,
.contents_hash = contents_hash,
- .link = link.ElfFile.TextBlock.empty,
+ .link = link.File.Elf.TextBlock.empty,
.generation = 0,
};
return new_decl;
@@ -2189,19 +2199,21 @@ fn analyzeExport(self: *Module, scope: *Scope, src: usize, symbol_name: []const
}
try self.symbol_exports.putNoClobber(symbol_name, new_export);
- self.bin_file.updateDeclExports(self, exported_decl, de_gop.entry.value) catch |err| switch (err) {
- error.OutOfMemory => return error.OutOfMemory,
- else => {
- try self.failed_exports.ensureCapacity(self.failed_exports.items().len + 1);
- self.failed_exports.putAssumeCapacityNoClobber(new_export, try ErrorMsg.create(
- self.allocator,
- src,
- "unable to export: {}",
- .{@errorName(err)},
- ));
- new_export.status = .failed_retryable;
- },
- };
+ if (self.bin_file.cast(link.File.Elf)) |elf| {
+ elf.updateDeclExports(self, exported_decl, de_gop.entry.value) catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ else => {
+ try self.failed_exports.ensureCapacity(self.failed_exports.items().len + 1);
+ self.failed_exports.putAssumeCapacityNoClobber(new_export, try ErrorMsg.create(
+ self.allocator,
+ src,
+ "unable to export: {}",
+ .{@errorName(err)},
+ ));
+ new_export.status = .failed_retryable;
+ },
+ };
+ }
}
fn addNewInstArgs(
src-self-hosted/test.zig
@@ -64,10 +64,11 @@ pub const TestContext = struct {
/// such as QEMU is required for tests to complete.
target: std.zig.CrossTarget,
/// In order to be able to run e.g. Execution updates, this must be set
- /// to Executable.
+ /// to Executable. This is ignored when generating C output.
output_mode: std.builtin.OutputMode,
updates: std.ArrayList(Update),
extension: TestType,
+ c_standard: ?Module.CStandard = null,
/// Adds a subcase in which the module is updated with `src`, and the
/// resulting ZIR is validated against `result`.
@@ -187,6 +188,22 @@ pub const TestContext = struct {
return ctx.addObj(name, target, .ZIR);
}
+ pub fn addC(ctx: *TestContext, name: []const u8, target: std.zig.CrossTarget, T: TestType, standard: Module.CStandard) *Case {
+ ctx.cases.append(Case{
+ .name = name,
+ .target = target,
+ .updates = std.ArrayList(Update).init(ctx.cases.allocator),
+ .output_mode = .Obj,
+ .extension = T,
+ .c_standard = standard,
+ }) catch unreachable;
+ return &ctx.cases.items[ctx.cases.items.len - 1];
+ }
+
+ pub fn c11(ctx: *TestContext, name: []const u8, target: std.zig.CrossTarget, src: [:0]const u8, c: [:0]const u8) void {
+ ctx.addC(name, target, .Zig, .C11).addTransform(src, c);
+ }
+
pub fn addCompareOutput(
ctx: *TestContext,
name: []const u8,
@@ -425,6 +442,7 @@ pub const TestContext = struct {
.bin_file_path = bin_name,
.root_pkg = root_pkg,
.keep_source_files_loaded = true,
+ .c_standard = case.c_standard,
});
defer module.deinit();
@@ -463,14 +481,15 @@ pub const TestContext = struct {
var test_node = update_node.start("assert", null);
test_node.activate();
defer test_node.end();
+ const label = if (case.c_standard) |_| "C" else "ZIR";
if (expected_output.len != out_zir.items.len) {
- std.debug.warn("{}\nTransformed ZIR length differs:\n================\nExpected:\n================\n{}\n================\nFound: {}\n================\nTest failed.\n", .{ case.name, expected_output, out_zir.items });
+ std.debug.warn("{}\nTransformed {} length differs:\n================\nExpected:\n================\n{}\n================\nFound:\n================\n{}\n================\nTest failed.\n", .{ case.name, label, expected_output, out_zir.items });
std.process.exit(1);
}
for (expected_output) |e, i| {
if (out_zir.items[i] != e) {
if (expected_output.len != out_zir.items.len) {
- std.debug.warn("{}\nTransformed ZIR differs:\n================\nExpected:\n================\n{}\n================\nFound: {}\n================\nTest failed.\n", .{ case.name, expected_output, out_zir.items });
+ std.debug.warn("{}\nTransformed {} differs:\n================\nExpected:\n================\n{}\n================\nFound:\n================\n{}\n================\nTest failed.\n", .{ case.name, label, expected_output, out_zir.items });
std.process.exit(1);
}
}
test/stage2/cbe.zig
@@ -0,0 +1,18 @@
+const std = @import("std");
+const TestContext = @import("../../src-self-hosted/test.zig").TestContext;
+
+// These tests should work with all platforms, but we're using linux_x64 for
+// now for consistency. Will be expanded eventually.
+const linux_x64 = std.zig.CrossTarget{
+ .cpu_arch = .x86_64,
+ .os_tag = .linux,
+};
+
+pub fn addCases(ctx: *TestContext) !void {
+ // // These tests should work on every platform
+ // ctx.c11("empty start function", linux_x64,
+ // \\export fn start() void {}
+ // ,
+ // \\void start(void) {}
+ // );
+}
test/stage2/test.zig
@@ -4,4 +4,5 @@ pub fn addCases(ctx: *TestContext) !void {
try @import("compile_errors.zig").addCases(ctx);
try @import("compare_output.zig").addCases(ctx);
try @import("zir.zig").addCases(ctx);
+ try @import("cbe.zig").addCases(ctx);
}