master
  1value: i64 = 0,
  2size: u64 = 0,
  3alignment: Atom.Alignment = .@"1",
  4output_section_index: u32 = 0,
  5// atoms: std.ArrayList(Elf.Ref) = .empty,
  6atoms: std.AutoArrayHashMapUnmanaged(Elf.Ref, void) = .empty,
  7
  8dirty: bool = true,
  9
 10pub fn deinit(list: *AtomList, allocator: Allocator) void {
 11    list.atoms.deinit(allocator);
 12}
 13
 14pub fn address(list: AtomList, elf_file: *Elf) i64 {
 15    const shdr = elf_file.sections.items(.shdr)[list.output_section_index];
 16    return @as(i64, @intCast(shdr.sh_addr)) + list.value;
 17}
 18
 19pub fn offset(list: AtomList, elf_file: *Elf) u64 {
 20    const shdr = elf_file.sections.items(.shdr)[list.output_section_index];
 21    return shdr.sh_offset + @as(u64, @intCast(list.value));
 22}
 23
 24pub fn updateSize(list: *AtomList, elf_file: *Elf) void {
 25    assert(list.dirty);
 26    for (list.atoms.keys()) |ref| {
 27        const atom_ptr = elf_file.atom(ref).?;
 28        assert(atom_ptr.alive);
 29        const off = atom_ptr.alignment.forward(list.size);
 30        const padding = off - list.size;
 31        atom_ptr.value = @intCast(off);
 32        list.size += padding + atom_ptr.size;
 33        list.alignment = list.alignment.max(atom_ptr.alignment);
 34    }
 35}
 36
 37pub fn allocate(list: *AtomList, elf_file: *Elf) !void {
 38    assert(list.dirty);
 39
 40    const alloc_res = try elf_file.allocateChunk(.{
 41        .shndx = list.output_section_index,
 42        .size = list.size,
 43        .alignment = list.alignment,
 44        .requires_padding = false,
 45    });
 46    list.value = @intCast(alloc_res.value);
 47
 48    log.debug("allocated atom_list({d}) at 0x{x}", .{ list.output_section_index, list.address(elf_file) });
 49
 50    const slice = elf_file.sections.slice();
 51    const shdr = &slice.items(.shdr)[list.output_section_index];
 52    const last_atom_ref = &slice.items(.last_atom)[list.output_section_index];
 53
 54    const expand_section = if (elf_file.atom(alloc_res.placement)) |placement_atom|
 55        placement_atom.nextAtom(elf_file) == null
 56    else
 57        true;
 58    if (expand_section) last_atom_ref.* = list.lastAtom(elf_file).ref();
 59    shdr.sh_addralign = @max(shdr.sh_addralign, list.alignment.toByteUnits().?);
 60
 61    // This currently ignores Thunks as valid chunks.
 62    {
 63        var idx: usize = 0;
 64        while (idx < list.atoms.keys().len) : (idx += 1) {
 65            const curr_atom_ptr = elf_file.atom(list.atoms.keys()[idx]).?;
 66            if (idx > 0) {
 67                curr_atom_ptr.prev_atom_ref = list.atoms.keys()[idx - 1];
 68            }
 69            if (idx + 1 < list.atoms.keys().len) {
 70                curr_atom_ptr.next_atom_ref = list.atoms.keys()[idx + 1];
 71            }
 72        }
 73    }
 74
 75    if (elf_file.atom(alloc_res.placement)) |placement_atom| {
 76        list.firstAtom(elf_file).prev_atom_ref = placement_atom.ref();
 77        list.lastAtom(elf_file).next_atom_ref = placement_atom.next_atom_ref;
 78        placement_atom.next_atom_ref = list.firstAtom(elf_file).ref();
 79    }
 80
 81    // If we had a link from Atom to parent AtomList we would not need to
 82    // update Atom's value or osec index.
 83    for (list.atoms.keys()) |ref| {
 84        const atom_ptr = elf_file.atom(ref).?;
 85        atom_ptr.output_section_index = list.output_section_index;
 86        atom_ptr.value += list.value;
 87    }
 88
 89    list.dirty = false;
 90}
 91
 92pub fn write(list: AtomList, buffer: *std.Io.Writer.Allocating, undefs: anytype, elf_file: *Elf) !void {
 93    const gpa = elf_file.base.comp.gpa;
 94    const osec = elf_file.sections.items(.shdr)[list.output_section_index];
 95    assert(osec.sh_type != elf.SHT_NOBITS);
 96    assert(!list.dirty);
 97
 98    log.debug("writing atoms in section '{s}'", .{elf_file.getShString(osec.sh_name)});
 99
100    const list_size = math.cast(usize, list.size) orelse return error.Overflow;
101    try buffer.writer.splatByteAll(0, list_size);
102
103    for (list.atoms.keys()) |ref| {
104        const atom_ptr = elf_file.atom(ref).?;
105        assert(atom_ptr.alive);
106
107        const off = math.cast(usize, atom_ptr.value - list.value) orelse return error.Overflow;
108        const size = math.cast(usize, atom_ptr.size) orelse return error.Overflow;
109
110        log.debug("  atom({f}) at 0x{x}", .{ ref, list.offset(elf_file) + off });
111
112        const object = atom_ptr.file(elf_file).?.object;
113        const code = try object.codeDecompressAlloc(elf_file, ref.index);
114        defer gpa.free(code);
115        const out_code = buffer.written()[off..][0..size];
116        @memcpy(out_code, code);
117
118        if (osec.sh_flags & elf.SHF_ALLOC == 0)
119            try atom_ptr.resolveRelocsNonAlloc(elf_file, out_code, undefs)
120        else
121            try atom_ptr.resolveRelocsAlloc(elf_file, out_code);
122    }
123
124    try elf_file.base.file.?.pwriteAll(buffer.written(), list.offset(elf_file));
125    buffer.clearRetainingCapacity();
126}
127
128pub fn writeRelocatable(list: AtomList, buffer: *std.array_list.Managed(u8), elf_file: *Elf) !void {
129    const gpa = elf_file.base.comp.gpa;
130    const osec = elf_file.sections.items(.shdr)[list.output_section_index];
131    assert(osec.sh_type != elf.SHT_NOBITS);
132
133    log.debug("writing atoms in section '{s}'", .{elf_file.getShString(osec.sh_name)});
134
135    const list_size = math.cast(usize, list.size) orelse return error.Overflow;
136    try buffer.ensureUnusedCapacity(list_size);
137    buffer.appendNTimesAssumeCapacity(0, list_size);
138
139    for (list.atoms.keys()) |ref| {
140        const atom_ptr = elf_file.atom(ref).?;
141        assert(atom_ptr.alive);
142
143        const off = math.cast(usize, atom_ptr.value - list.value) orelse return error.Overflow;
144        const size = math.cast(usize, atom_ptr.size) orelse return error.Overflow;
145
146        log.debug("  atom({f}) at 0x{x}", .{ ref, list.offset(elf_file) + off });
147
148        const object = atom_ptr.file(elf_file).?.object;
149        const code = try object.codeDecompressAlloc(elf_file, ref.index);
150        defer gpa.free(code);
151        const out_code = buffer.items[off..][0..size];
152        @memcpy(out_code, code);
153    }
154
155    try elf_file.base.file.?.pwriteAll(buffer.items, list.offset(elf_file));
156    buffer.clearRetainingCapacity();
157}
158
159pub fn firstAtom(list: AtomList, elf_file: *Elf) *Atom {
160    assert(list.atoms.keys().len > 0);
161    return elf_file.atom(list.atoms.keys()[0]).?;
162}
163
164pub fn lastAtom(list: AtomList, elf_file: *Elf) *Atom {
165    assert(list.atoms.keys().len > 0);
166    return elf_file.atom(list.atoms.keys()[list.atoms.keys().len - 1]).?;
167}
168
169const Format = struct {
170    atom_list: AtomList,
171    elf_file: *Elf,
172
173    fn default(f: Format, writer: *std.Io.Writer) std.Io.Writer.Error!void {
174        const list = f.atom_list;
175        try writer.print("list : @{x} : shdr({d}) : align({x}) : size({x})", .{
176            list.address(f.elf_file),
177            list.output_section_index,
178            list.alignment.toByteUnits() orelse 0,
179            list.size,
180        });
181        try writer.writeAll(" : atoms{ ");
182        for (list.atoms.keys(), 0..) |ref, i| {
183            try writer.print("{f}", .{ref});
184            if (i < list.atoms.keys().len - 1) try writer.writeAll(", ");
185        }
186        try writer.writeAll(" }");
187    }
188};
189
190pub fn fmt(atom_list: AtomList, elf_file: *Elf) std.fmt.Alt(Format, Format.default) {
191    return .{ .data = .{ .atom_list = atom_list, .elf_file = elf_file } };
192}
193
194const assert = std.debug.assert;
195const elf = std.elf;
196const log = std.log.scoped(.link);
197const math = std.math;
198const std = @import("std");
199
200const Allocator = std.mem.Allocator;
201const Atom = @import("Atom.zig");
202const AtomList = @This();
203const Elf = @import("../Elf.zig");
204const Object = @import("Object.zig");