Commit 7fead5d6dd
Changed files (4)
src
src/link/Elf/AtomList.zig
@@ -2,7 +2,8 @@ value: i64 = 0,
size: u64 = 0,
alignment: Atom.Alignment = .@"1",
output_section_index: u32 = 0,
-atoms: std.ArrayListUnmanaged(Elf.Ref) = .empty,
+// atoms: std.ArrayListUnmanaged(Elf.Ref) = .empty,
+atoms: std.AutoArrayHashMapUnmanaged(Elf.Ref, void) = .empty,
pub fn deinit(list: *AtomList, allocator: Allocator) void {
list.atoms.deinit(allocator);
@@ -22,7 +23,7 @@ pub fn updateSize(list: *AtomList, elf_file: *Elf) void {
// TODO perhaps a 'stale' flag would be better here?
list.size = 0;
list.alignment = .@"1";
- for (list.atoms.items) |ref| {
+ for (list.atoms.keys()) |ref| {
const atom_ptr = elf_file.atom(ref).?;
assert(atom_ptr.alive);
const off = atom_ptr.alignment.forward(list.size);
@@ -56,13 +57,13 @@ pub fn allocate(list: *AtomList, elf_file: *Elf) !void {
// FIXME:JK this currently ignores Thunks as valid chunks.
{
var idx: usize = 0;
- while (idx < list.atoms.items.len) : (idx += 1) {
- const curr_atom_ptr = elf_file.atom(list.atoms.items[idx]).?;
+ while (idx < list.atoms.keys().len) : (idx += 1) {
+ const curr_atom_ptr = elf_file.atom(list.atoms.keys()[idx]).?;
if (idx > 0) {
- curr_atom_ptr.prev_atom_ref = list.atoms.items[idx - 1];
+ curr_atom_ptr.prev_atom_ref = list.atoms.keys()[idx - 1];
}
- if (idx + 1 < list.atoms.items.len) {
- curr_atom_ptr.next_atom_ref = list.atoms.items[idx + 1];
+ if (idx + 1 < list.atoms.keys().len) {
+ curr_atom_ptr.next_atom_ref = list.atoms.keys()[idx + 1];
}
}
}
@@ -74,7 +75,7 @@ pub fn allocate(list: *AtomList, elf_file: *Elf) !void {
}
// FIXME:JK if we had a link from Atom to parent AtomList we would not need to update Atom's value or osec index
- for (list.atoms.items) |ref| {
+ for (list.atoms.keys()) |ref| {
const atom_ptr = elf_file.atom(ref).?;
atom_ptr.output_section_index = list.output_section_index;
atom_ptr.value += list.value;
@@ -92,7 +93,7 @@ pub fn write(list: AtomList, buffer: *std.ArrayList(u8), undefs: anytype, elf_fi
try buffer.ensureUnusedCapacity(list_size);
buffer.appendNTimesAssumeCapacity(0, list_size);
- for (list.atoms.items) |ref| {
+ for (list.atoms.keys()) |ref| {
const atom_ptr = elf_file.atom(ref).?;
assert(atom_ptr.alive);
@@ -128,7 +129,7 @@ pub fn writeRelocatable(list: AtomList, buffer: *std.ArrayList(u8), elf_file: *E
try buffer.ensureUnusedCapacity(list_size);
buffer.appendNTimesAssumeCapacity(0, list_size);
- for (list.atoms.items) |ref| {
+ for (list.atoms.keys()) |ref| {
const atom_ptr = elf_file.atom(ref).?;
assert(atom_ptr.alive);
@@ -149,13 +150,13 @@ pub fn writeRelocatable(list: AtomList, buffer: *std.ArrayList(u8), elf_file: *E
}
pub fn firstAtom(list: AtomList, elf_file: *Elf) *Atom {
- assert(list.atoms.items.len > 0);
- return elf_file.atom(list.atoms.items[0]).?;
+ assert(list.atoms.keys().len > 0);
+ return elf_file.atom(list.atoms.keys()[0]).?;
}
pub fn lastAtom(list: AtomList, elf_file: *Elf) *Atom {
- assert(list.atoms.items.len > 0);
- return elf_file.atom(list.atoms.items[list.atoms.items.len - 1]).?;
+ assert(list.atoms.keys().len > 0);
+ return elf_file.atom(list.atoms.keys()[list.atoms.keys().len - 1]).?;
}
pub fn format(
@@ -191,9 +192,9 @@ fn format2(
list.alignment.toByteUnits() orelse 0, list.size,
});
try writer.writeAll(" : atoms{ ");
- for (list.atoms.items, 0..) |ref, i| {
+ for (list.atoms.keys(), 0..) |ref, i| {
try writer.print("{}", .{ref});
- if (i < list.atoms.items.len - 1) try writer.writeAll(", ");
+ if (i < list.atoms.keys().len - 1) try writer.writeAll(", ");
}
try writer.writeAll(" }");
}
src/link/Elf/Object.zig
@@ -915,7 +915,7 @@ pub fn initOutputSections(self: *Object, elf_file: *Elf) !void {
});
const atom_list = &elf_file.sections.items(.atom_list_2)[osec];
atom_list.output_section_index = osec;
- try atom_list.atoms.append(elf_file.base.comp.gpa, atom_ptr.ref());
+ _ = try atom_list.atoms.getOrPut(elf_file.base.comp.gpa, atom_ptr.ref());
}
}
src/link/Elf/relocatable.zig
@@ -335,7 +335,7 @@ fn initComdatGroups(elf_file: *Elf) !void {
fn updateSectionSizes(elf_file: *Elf) !void {
const slice = elf_file.sections.slice();
for (slice.items(.atom_list_2)) |*atom_list| {
- if (atom_list.atoms.items.len == 0) continue;
+ if (atom_list.atoms.keys().len == 0) continue;
atom_list.updateSize(elf_file);
try atom_list.allocate(elf_file);
}
@@ -434,7 +434,7 @@ fn writeAtoms(elf_file: *Elf) !void {
const slice = elf_file.sections.slice();
for (slice.items(.shdr), slice.items(.atom_list_2)) |shdr, atom_list| {
if (shdr.sh_type == elf.SHT_NOBITS) continue;
- if (atom_list.atoms.items.len == 0) continue;
+ if (atom_list.atoms.keys().len == 0) continue;
try atom_list.writeRelocatable(&buffer, elf_file);
}
}
src/link/Elf.zig
@@ -3219,7 +3219,7 @@ fn sortInitFini(self: *Elf) !void {
for (slice.items(.shdr), slice.items(.atom_list_2)) |shdr, *atom_list| {
if (shdr.sh_flags & elf.SHF_ALLOC == 0) continue;
- if (atom_list.atoms.items.len == 0) continue;
+ if (atom_list.atoms.keys().len == 0) continue;
var is_init_fini = false;
var is_ctor_dtor = false;
@@ -3236,10 +3236,10 @@ fn sortInitFini(self: *Elf) !void {
if (!is_init_fini and !is_ctor_dtor) continue;
var entries = std.ArrayList(Entry).init(gpa);
- try entries.ensureTotalCapacityPrecise(atom_list.atoms.items.len);
+ try entries.ensureTotalCapacityPrecise(atom_list.atoms.keys().len);
defer entries.deinit();
- for (atom_list.atoms.items) |ref| {
+ for (atom_list.atoms.keys()) |ref| {
const atom_ptr = self.atom(ref).?;
const object = atom_ptr.file(self).?.object;
const priority = blk: {
@@ -3260,7 +3260,7 @@ fn sortInitFini(self: *Elf) !void {
atom_list.atoms.clearRetainingCapacity();
for (entries.items) |entry| {
- atom_list.atoms.appendAssumeCapacity(entry.atom_ref);
+ _ = atom_list.atoms.getOrPutAssumeCapacity(entry.atom_ref);
}
}
}
@@ -3506,7 +3506,7 @@ fn resetShdrIndexes(self: *Elf, backlinks: []const u32) void {
const slice = self.sections.slice();
for (slice.items(.shdr), slice.items(.atom_list_2)) |*shdr, *atom_list| {
atom_list.output_section_index = backlinks[atom_list.output_section_index];
- for (atom_list.atoms.items) |ref| {
+ for (atom_list.atoms.keys()) |ref| {
self.atom(ref).?.output_section_index = atom_list.output_section_index;
}
if (shdr.sh_type == elf.SHT_RELA) {
@@ -3585,7 +3585,7 @@ fn resetShdrIndexes(self: *Elf, backlinks: []const u32) void {
fn updateSectionSizes(self: *Elf) !void {
const slice = self.sections.slice();
for (slice.items(.shdr), slice.items(.atom_list_2)) |shdr, *atom_list| {
- if (atom_list.atoms.items.len == 0) continue;
+ if (atom_list.atoms.keys().len == 0) continue;
if (self.requiresThunks() and shdr.sh_flags & elf.SHF_EXECINSTR != 0) continue;
atom_list.updateSize(self);
try atom_list.allocate(self);
@@ -3594,7 +3594,7 @@ fn updateSectionSizes(self: *Elf) !void {
if (self.requiresThunks()) {
for (slice.items(.shdr), slice.items(.atom_list_2)) |shdr, *atom_list| {
if (shdr.sh_flags & elf.SHF_EXECINSTR == 0) continue;
- if (atom_list.atoms.items.len == 0) continue;
+ if (atom_list.atoms.keys().len == 0) continue;
// Create jump/branch range extenders if needed.
try self.createThunks(atom_list);
@@ -4058,7 +4058,7 @@ fn writeAtoms(self: *Elf) !void {
var has_reloc_errors = false;
for (slice.items(.shdr), slice.items(.atom_list_2)) |shdr, atom_list| {
if (shdr.sh_type == elf.SHT_NOBITS) continue;
- if (atom_list.atoms.items.len == 0) continue;
+ if (atom_list.atoms.keys().len == 0) continue;
atom_list.write(&buffer, &undefs, self) catch |err| switch (err) {
error.UnsupportedCpuArch => {
try self.reportUnsupportedCpuArch();
@@ -5732,20 +5732,20 @@ fn createThunks(elf_file: *Elf, atom_list: *AtomList) !void {
}
}.advance;
- for (atom_list.atoms.items) |ref| {
+ for (atom_list.atoms.keys()) |ref| {
elf_file.atom(ref).?.value = -1;
}
var i: usize = 0;
- while (i < atom_list.atoms.items.len) {
+ while (i < atom_list.atoms.keys().len) {
const start = i;
- const start_atom = elf_file.atom(atom_list.atoms.items[start]).?;
+ const start_atom = elf_file.atom(atom_list.atoms.keys()[start]).?;
assert(start_atom.alive);
start_atom.value = try advance(atom_list, start_atom.size, start_atom.alignment);
i += 1;
- while (i < atom_list.atoms.items.len) : (i += 1) {
- const atom_ptr = elf_file.atom(atom_list.atoms.items[i]).?;
+ while (i < atom_list.atoms.keys().len) : (i += 1) {
+ const atom_ptr = elf_file.atom(atom_list.atoms.keys()[i]).?;
assert(atom_ptr.alive);
if (@as(i64, @intCast(atom_ptr.alignment.forward(atom_list.size))) - start_atom.value >= max_distance)
break;
@@ -5758,7 +5758,7 @@ fn createThunks(elf_file: *Elf, atom_list: *AtomList) !void {
thunk_ptr.output_section_index = atom_list.output_section_index;
// Scan relocs in the group and create trampolines for any unreachable callsite
- for (atom_list.atoms.items[start..i]) |ref| {
+ for (atom_list.atoms.keys()[start..i]) |ref| {
const atom_ptr = elf_file.atom(ref).?;
const file_ptr = atom_ptr.file(elf_file).?;
log.debug("atom({}) {s}", .{ ref, atom_ptr.name(elf_file) });