master
1pub const Section = struct {
2 value: u64 = 0,
3 size: u64 = 0,
4 alignment: Atom.Alignment = .@"1",
5 entsize: u32 = 0,
6 name_offset: u32 = 0,
7 type: u32 = 0,
8 flags: u64 = 0,
9 output_section_index: u32 = 0,
10 bytes: std.ArrayList(u8) = .empty,
11 table: std.HashMapUnmanaged(
12 String,
13 Subsection.Index,
14 IndexContext,
15 std.hash_map.default_max_load_percentage,
16 ) = .{},
17 subsections: std.ArrayList(Subsection) = .empty,
18 finalized_subsections: std.ArrayList(Subsection.Index) = .empty,
19
20 pub fn deinit(msec: *Section, allocator: Allocator) void {
21 msec.bytes.deinit(allocator);
22 msec.table.deinit(allocator);
23 msec.subsections.deinit(allocator);
24 msec.finalized_subsections.deinit(allocator);
25 }
26
27 pub fn name(msec: Section, elf_file: *Elf) [:0]const u8 {
28 return elf_file.getShString(msec.name_offset);
29 }
30
31 pub fn address(msec: Section, elf_file: *Elf) i64 {
32 const shdr = elf_file.sections.items(.shdr)[msec.output_section_index];
33 return @intCast(shdr.sh_addr + msec.value);
34 }
35
36 const InsertResult = struct {
37 found_existing: bool,
38 key: String,
39 sub: *Subsection.Index,
40 };
41
42 pub fn insert(msec: *Section, allocator: Allocator, string: []const u8) !InsertResult {
43 const gop = try msec.table.getOrPutContextAdapted(
44 allocator,
45 string,
46 IndexAdapter{ .bytes = msec.bytes.items },
47 IndexContext{ .bytes = msec.bytes.items },
48 );
49 if (!gop.found_existing) {
50 const index: u32 = @intCast(msec.bytes.items.len);
51 try msec.bytes.appendSlice(allocator, string);
52 gop.key_ptr.* = .{ .pos = index, .len = @intCast(string.len) };
53 }
54 return .{ .found_existing = gop.found_existing, .key = gop.key_ptr.*, .sub = gop.value_ptr };
55 }
56
57 pub fn insertZ(msec: *Section, allocator: Allocator, string: []const u8) !InsertResult {
58 const with_null = try allocator.alloc(u8, string.len + 1);
59 defer allocator.free(with_null);
60 @memcpy(with_null[0..string.len], string);
61 with_null[string.len] = 0;
62 return msec.insert(allocator, with_null);
63 }
64
65 /// Finalizes the merge section and clears hash table.
66 /// Sorts all owned subsections.
67 pub fn finalize(msec: *Section, allocator: Allocator) !void {
68 try msec.finalized_subsections.ensureTotalCapacityPrecise(allocator, msec.subsections.items.len);
69
70 var it = msec.table.iterator();
71 while (it.next()) |entry| {
72 const msub = msec.mergeSubsection(entry.value_ptr.*);
73 if (!msub.alive) continue;
74 msec.finalized_subsections.appendAssumeCapacity(entry.value_ptr.*);
75 }
76 msec.table.clearAndFree(allocator);
77
78 const sortFn = struct {
79 pub fn sortFn(ctx: *Section, lhs: Subsection.Index, rhs: Subsection.Index) bool {
80 const lhs_msub = ctx.mergeSubsection(lhs);
81 const rhs_msub = ctx.mergeSubsection(rhs);
82 if (lhs_msub.alignment.compareStrict(.eq, rhs_msub.alignment)) {
83 if (lhs_msub.size == rhs_msub.size) {
84 const lhs_string = ctx.bytes.items[lhs_msub.string_index..][0..lhs_msub.size];
85 const rhs_string = ctx.bytes.items[rhs_msub.string_index..][0..rhs_msub.size];
86 return mem.order(u8, lhs_string, rhs_string) == .lt;
87 }
88 return lhs_msub.size < rhs_msub.size;
89 }
90 return lhs_msub.alignment.compareStrict(.lt, rhs_msub.alignment);
91 }
92 }.sortFn;
93
94 std.mem.sort(Subsection.Index, msec.finalized_subsections.items, msec, sortFn);
95 }
96
97 pub fn updateSize(msec: *Section) void {
98 // TODO a 'stale' flag would be better here perhaps?
99 msec.size = 0;
100 msec.alignment = .@"1";
101 msec.entsize = 0;
102 for (msec.finalized_subsections.items) |msub_index| {
103 const msub = msec.mergeSubsection(msub_index);
104 assert(msub.alive);
105 const offset = msub.alignment.forward(msec.size);
106 const padding = offset - msec.size;
107 msub.value = @intCast(offset);
108 msec.size += padding + msub.size;
109 msec.alignment = msec.alignment.max(msub.alignment);
110 msec.entsize = if (msec.entsize == 0) msub.entsize else @min(msec.entsize, msub.entsize);
111 }
112 }
113
114 pub fn initOutputSection(msec: *Section, elf_file: *Elf) !void {
115 msec.output_section_index = elf_file.sectionByName(msec.name(elf_file)) orelse try elf_file.addSection(.{
116 .name = msec.name_offset,
117 .type = msec.type,
118 .flags = msec.flags,
119 });
120 }
121
122 pub fn addMergeSubsection(msec: *Section, allocator: Allocator) !Subsection.Index {
123 const index: Subsection.Index = @intCast(msec.subsections.items.len);
124 const msub = try msec.subsections.addOne(allocator);
125 msub.* = .{};
126 return index;
127 }
128
129 pub fn mergeSubsection(msec: *Section, index: Subsection.Index) *Subsection {
130 assert(index < msec.subsections.items.len);
131 return &msec.subsections.items[index];
132 }
133
134 pub const IndexContext = struct {
135 bytes: []const u8,
136
137 pub fn eql(_: @This(), a: String, b: String) bool {
138 return a.pos == b.pos;
139 }
140
141 pub fn hash(ctx: @This(), key: String) u64 {
142 const str = ctx.bytes[key.pos..][0..key.len];
143 return std.hash_map.hashString(str);
144 }
145 };
146
147 pub const IndexAdapter = struct {
148 bytes: []const u8,
149
150 pub fn eql(ctx: @This(), a: []const u8, b: String) bool {
151 const str = ctx.bytes[b.pos..][0..b.len];
152 return mem.eql(u8, a, str);
153 }
154
155 pub fn hash(_: @This(), adapted_key: []const u8) u64 {
156 return std.hash_map.hashString(adapted_key);
157 }
158 };
159
160 pub fn fmt(msec: Section, elf_file: *Elf) std.fmt.Alt(Format, Format.default) {
161 return .{ .data = .{
162 .msec = msec,
163 .elf_file = elf_file,
164 } };
165 }
166
167 const Format = struct {
168 msec: Section,
169 elf_file: *Elf,
170
171 pub fn default(f: Format, writer: *std.Io.Writer) std.Io.Writer.Error!void {
172 const msec = f.msec;
173 const elf_file = f.elf_file;
174 try writer.print("{s} : @{x} : size({x}) : align({x}) : entsize({x}) : type({x}) : flags({x})\n", .{
175 msec.name(elf_file),
176 msec.address(elf_file),
177 msec.size,
178 msec.alignment.toByteUnits() orelse 0,
179 msec.entsize,
180 msec.type,
181 msec.flags,
182 });
183 for (msec.subsections.items) |msub| {
184 try writer.print(" {f}\n", .{msub.fmt(elf_file)});
185 }
186 }
187 };
188
189 pub const Index = u32;
190};
191
192pub const Subsection = struct {
193 value: i64 = 0,
194 merge_section_index: Section.Index = 0,
195 string_index: u32 = 0,
196 size: u32 = 0,
197 alignment: Atom.Alignment = .@"1",
198 entsize: u32 = 0,
199 alive: bool = false,
200
201 pub fn address(msub: Subsection, elf_file: *Elf) i64 {
202 return msub.mergeSection(elf_file).address(elf_file) + msub.value;
203 }
204
205 pub fn mergeSection(msub: Subsection, elf_file: *Elf) *Section {
206 return elf_file.mergeSection(msub.merge_section_index);
207 }
208
209 pub fn getString(msub: Subsection, elf_file: *Elf) []const u8 {
210 const msec = msub.mergeSection(elf_file);
211 return msec.bytes.items[msub.string_index..][0..msub.size];
212 }
213
214 pub fn fmt(msub: Subsection, elf_file: *Elf) std.fmt.Alt(Format, Format.default) {
215 return .{ .data = .{
216 .msub = msub,
217 .elf_file = elf_file,
218 } };
219 }
220
221 const Format = struct {
222 msub: Subsection,
223 elf_file: *Elf,
224
225 pub fn default(ctx: Format, writer: *std.Io.Writer) std.Io.Writer.Error!void {
226 const msub = ctx.msub;
227 const elf_file = ctx.elf_file;
228 try writer.print("@{x} : align({x}) : size({x})", .{
229 msub.address(elf_file),
230 msub.alignment,
231 msub.size,
232 });
233 if (!msub.alive) try writer.writeAll(" : [*]");
234 }
235 };
236
237 pub const Index = u32;
238};
239
240pub const InputSection = struct {
241 merge_section_index: Section.Index = 0,
242 atom_index: Atom.Index = 0,
243 offsets: std.ArrayList(u32) = .empty,
244 subsections: std.ArrayList(Subsection.Index) = .empty,
245 bytes: std.ArrayList(u8) = .empty,
246 strings: std.ArrayList(String) = .empty,
247
248 pub fn deinit(imsec: *InputSection, allocator: Allocator) void {
249 imsec.offsets.deinit(allocator);
250 imsec.subsections.deinit(allocator);
251 imsec.bytes.deinit(allocator);
252 imsec.strings.deinit(allocator);
253 }
254
255 pub fn clearAndFree(imsec: *InputSection, allocator: Allocator) void {
256 imsec.bytes.clearAndFree(allocator);
257 // TODO: imsec.strings.clearAndFree(allocator);
258 }
259
260 const FindSubsectionResult = struct {
261 msub_index: Subsection.Index,
262 offset: u32,
263 };
264
265 pub fn findSubsection(imsec: InputSection, offset: u32) ?FindSubsectionResult {
266 // TODO: binary search
267 for (imsec.offsets.items, 0..) |off, index| {
268 if (offset < off) return .{
269 .msub_index = imsec.subsections.items[index - 1],
270 .offset = offset - imsec.offsets.items[index - 1],
271 };
272 }
273 const last = imsec.offsets.items.len - 1;
274 const last_off = imsec.offsets.items[last];
275 const last_len = imsec.strings.items[last].len;
276 if (offset < last_off + last_len) return .{
277 .msub_index = imsec.subsections.items[last],
278 .offset = offset - last_off,
279 };
280 return null;
281 }
282
283 pub fn insert(imsec: *InputSection, allocator: Allocator, string: []const u8) !void {
284 const index: u32 = @intCast(imsec.bytes.items.len);
285 try imsec.bytes.appendSlice(allocator, string);
286 try imsec.strings.append(allocator, .{ .pos = index, .len = @intCast(string.len) });
287 }
288
289 pub const Index = u32;
290};
291
292const String = struct { pos: u32, len: u32 };
293
294const assert = std.debug.assert;
295const mem = std.mem;
296const std = @import("std");
297
298const Allocator = mem.Allocator;
299const Atom = @import("Atom.zig");
300const Elf = @import("../Elf.zig");
301const Merge = @This();