Commit eb5d67b146

Jakub Konka <kubkon@jakubkonka.com>
2024-04-21 10:37:49
Merge pull request #19714 from ziglang/elf-merge-strings
link/elf: implement string merging
1 parent 082e509
src/link/Elf/Atom.zig
@@ -1,5 +1,5 @@
 /// Address allocated for this Atom.
-value: u64 = 0,
+value: i64 = 0,
 
 /// Name of this Atom.
 name_offset: u32 = 0,
@@ -22,32 +22,19 @@ output_section_index: u32 = 0,
 /// Index of the input section containing this atom's relocs.
 relocs_section_index: u32 = 0,
 
-/// Start index of the relocations belonging to this atom.
-rel_index: u32 = 0,
-
-/// Number of relocations belonging to this atom.
-rel_num: u32 = 0,
-
 /// Index of this atom in the linker's atoms table.
 atom_index: Index = 0,
 
-/// Index of the thunk for this atom.
-thunk_index: Thunk.Index = 0,
-
-/// Flags we use for state tracking.
-flags: Flags = .{},
-
-/// Start index of FDEs referencing this atom.
-fde_start: u32 = 0,
-
-/// End index of FDEs referencing this atom.
-fde_end: u32 = 0,
-
 /// Points to the previous and next neighbors, based on the `text_offset`.
 /// This can be used to find, for example, the capacity of this `TextBlock`.
 prev_index: Index = 0,
 next_index: Index = 0,
 
+/// Flags we use for state tracking.
+flags: Flags = .{},
+
+extra_index: u32 = 0,
+
 pub const Alignment = @import("../../InternPool.zig").Alignment;
 
 pub fn name(self: Atom, elf_file: *Elf) []const u8 {
@@ -57,10 +44,22 @@ pub fn name(self: Atom, elf_file: *Elf) []const u8 {
     };
 }
 
-pub fn address(self: Atom, elf_file: *Elf) u64 {
+pub fn address(self: Atom, elf_file: *Elf) i64 {
     const shndx = self.outputShndx() orelse return self.value;
     const shdr = elf_file.shdrs.items[shndx];
-    return shdr.sh_addr + self.value;
+    return @as(i64, @intCast(shdr.sh_addr)) + self.value;
+}
+
+pub fn debugTombstoneValue(self: Atom, target: Symbol, elf_file: *Elf) ?u64 {
+    if (target.mergeSubsection(elf_file)) |msub| {
+        if (msub.alive) return null;
+    }
+    if (target.atom(elf_file)) |atom_ptr| {
+        if (atom_ptr.flags.alive) return null;
+    }
+    const atom_name = self.name(elf_file);
+    if (!mem.startsWith(u8, atom_name, ".debug")) return null;
+    return if (mem.eql(u8, atom_name, ".debug_loc") or mem.eql(u8, atom_name, ".debug_ranges")) 1 else 0;
 }
 
 pub fn file(self: Atom, elf_file: *Elf) ?File {
@@ -68,7 +67,9 @@ pub fn file(self: Atom, elf_file: *Elf) ?File {
 }
 
 pub fn thunk(self: Atom, elf_file: *Elf) *Thunk {
-    return elf_file.thunk(self.thunk_index);
+    assert(self.flags.thunk);
+    const extras = self.extra(elf_file).?;
+    return elf_file.thunk(extras.thunk);
 }
 
 pub fn inputShdr(self: Atom, elf_file: *Elf) elf.Elf64_Shdr {
@@ -102,13 +103,13 @@ pub fn capacity(self: Atom, elf_file: *Elf) u64 {
         next.address(elf_file)
     else
         std.math.maxInt(u32);
-    return next_addr - self.address(elf_file);
+    return @intCast(next_addr - self.address(elf_file));
 }
 
 pub fn freeListEligible(self: Atom, elf_file: *Elf) bool {
     // No need to keep a free list node for the last block.
     const next = elf_file.atom(self.next_index) orelse return false;
-    const cap = next.address(elf_file) - self.address(elf_file);
+    const cap: u64 = @intCast(next.address(elf_file) - self.address(elf_file));
     const ideal_cap = Elf.padToIdeal(self.size);
     if (cap <= ideal_cap) return false;
     const surplus = cap - ideal_cap;
@@ -141,8 +142,8 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void {
             // Is it enough that we could fit this new atom?
             const cap = big_atom.capacity(elf_file);
             const ideal_capacity = Elf.padToIdeal(cap);
-            const ideal_capacity_end_vaddr = std.math.add(u64, big_atom.value, ideal_capacity) catch ideal_capacity;
-            const capacity_end_vaddr = big_atom.value + cap;
+            const ideal_capacity_end_vaddr = std.math.add(u64, @intCast(big_atom.value), ideal_capacity) catch ideal_capacity;
+            const capacity_end_vaddr = @as(u64, @intCast(big_atom.value)) + cap;
             const new_start_vaddr_unaligned = capacity_end_vaddr - new_atom_ideal_capacity;
             const new_start_vaddr = self.alignment.backward(new_start_vaddr_unaligned);
             if (new_start_vaddr < ideal_capacity_end_vaddr) {
@@ -167,14 +168,14 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void {
             if (!keep_free_list_node) {
                 free_list_removal = i;
             }
-            break :blk new_start_vaddr;
+            break :blk @intCast(new_start_vaddr);
         } else if (elf_file.atom(last_atom_index.*)) |last| {
             const ideal_capacity = Elf.padToIdeal(last.size);
-            const ideal_capacity_end_vaddr = last.value + ideal_capacity;
+            const ideal_capacity_end_vaddr = @as(u64, @intCast(last.value)) + ideal_capacity;
             const new_start_vaddr = self.alignment.forward(ideal_capacity_end_vaddr);
             // Set up the metadata to be updated, after errors are no longer possible.
             atom_placement = last.atom_index;
-            break :blk new_start_vaddr;
+            break :blk @intCast(new_start_vaddr);
         } else {
             break :blk 0;
         }
@@ -184,7 +185,7 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void {
         self.atom_index,
         self.name(elf_file),
         self.address(elf_file),
-        self.address(elf_file) + self.size,
+        self.address(elf_file) + @as(i64, @intCast(self.size)),
     });
 
     const expand_section = if (atom_placement) |placement_index|
@@ -192,7 +193,7 @@ pub fn allocate(self: *Atom, elf_file: *Elf) !void {
     else
         true;
     if (expand_section) {
-        const needed_size = self.value + self.size;
+        const needed_size: u64 = @intCast(self.value + @as(i64, @intCast(self.size)));
         try elf_file.growAllocSection(self.outputShndx().?, needed_size);
         last_atom_index.* = self.atom_index;
 
@@ -242,7 +243,7 @@ pub fn shrink(self: *Atom, elf_file: *Elf) void {
 }
 
 pub fn grow(self: *Atom, elf_file: *Elf) !void {
-    if (!self.alignment.check(self.value) or self.size > self.capacity(elf_file))
+    if (!self.alignment.check(@intCast(self.value)) or self.size > self.capacity(elf_file))
         try self.allocate(elf_file);
 }
 
@@ -309,11 +310,14 @@ pub fn free(self: *Atom, elf_file: *Elf) void {
 
 pub fn relocs(self: Atom, elf_file: *Elf) []const elf.Elf64_Rela {
     const shndx = self.relocsShndx() orelse return &[0]elf.Elf64_Rela{};
-    return switch (self.file(elf_file).?) {
-        .zig_object => |x| x.relocs.items[shndx].items,
-        .object => |x| x.relocs.items[self.rel_index..][0..self.rel_num],
+    switch (self.file(elf_file).?) {
+        .zig_object => |x| return x.relocs.items[shndx].items,
+        .object => |x| {
+            const extras = self.extra(elf_file).?;
+            return x.relocs.items[extras.rel_index..][0..extras.rel_count];
+        },
         else => unreachable,
-    };
+    }
 }
 
 pub fn writeRelocs(self: Atom, elf_file: *Elf, out_relocs: *std.ArrayList(elf.Elf64_Rela)) !void {
@@ -329,11 +333,14 @@ pub fn writeRelocs(self: Atom, elf_file: *Elf, out_relocs: *std.ArrayList(elf.El
         };
         const target = elf_file.symbol(target_index);
         const r_type = rel.r_type();
-        const r_offset = self.value + rel.r_offset;
+        const r_offset: u64 = @intCast(self.value + @as(i64, @intCast(rel.r_offset)));
         var r_addend = rel.r_addend;
         var r_sym: u32 = 0;
         switch (target.type(elf_file)) {
-            elf.STT_SECTION => {
+            elf.STT_SECTION => if (target.mergeSubsection(elf_file)) |msub| {
+                r_addend += @intCast(target.address(.{}, elf_file));
+                r_sym = elf_file.sectionSymbolOutputSymtabIndex(msub.mergeSection(elf_file).output_section_index);
+            } else {
                 r_addend += @intCast(target.address(.{}, elf_file));
                 r_sym = elf_file.sectionSymbolOutputSymtabIndex(target.outputShndx().?);
             },
@@ -359,9 +366,10 @@ pub fn writeRelocs(self: Atom, elf_file: *Elf, out_relocs: *std.ArrayList(elf.El
 }
 
 pub fn fdes(self: Atom, elf_file: *Elf) []Fde {
-    if (self.fde_start == self.fde_end) return &[0]Fde{};
+    if (!self.flags.fde) return &[0]Fde{};
+    const extras = self.extra(elf_file).?;
     const object = self.file(elf_file).?.object;
-    return object.fdes.items[self.fde_start..self.fde_end];
+    return object.fdes.items[extras.fde_start..][0..extras.fde_count];
 }
 
 pub fn markFdesDead(self: Atom, elf_file: *Elf) void {
@@ -419,6 +427,12 @@ pub fn scanRelocs(self: Atom, elf_file: *Elf, code: ?[]const u8, undefs: anytype
         };
         const symbol = elf_file.symbol(symbol_index);
 
+        const is_synthetic_symbol = switch (file_ptr) {
+            .zig_object => false, // TODO: implement this once we support merge sections in ZigObject
+            .object => |x| rel.r_sym() >= x.symtab.items.len,
+            else => unreachable,
+        };
+
         // Check for violation of One Definition Rule for COMDATs.
         if (symbol.file(elf_file) == null) {
             // TODO convert into an error
@@ -431,7 +445,8 @@ pub fn scanRelocs(self: Atom, elf_file: *Elf, code: ?[]const u8, undefs: anytype
         }
 
         // Report an undefined symbol.
-        if (try self.reportUndefined(elf_file, symbol, symbol_index, rel, undefs)) continue;
+        if (!is_synthetic_symbol and (try self.reportUndefined(elf_file, symbol, symbol_index, rel, undefs)))
+            continue;
 
         if (symbol.isIFunc(elf_file)) {
             symbol.flags.needs_got = true;
@@ -743,21 +758,21 @@ pub fn resolveRelocsAlloc(self: Atom, elf_file: *Elf, code: []u8) RelocError!voi
         // https://intezer.com/blog/malware-analysis/executable-and-linkable-format-101-part-3-relocations/
         //
         // Address of the source atom.
-        const P = @as(i64, @intCast(self.address(elf_file) + rel.r_offset));
+        const P = self.address(elf_file) + @as(i64, @intCast(rel.r_offset));
         // Addend from the relocation.
         const A = rel.r_addend;
         // Address of the target symbol - can be address of the symbol within an atom or address of PLT stub.
-        const S = @as(i64, @intCast(target.address(.{}, elf_file)));
+        const S = target.address(.{}, elf_file);
         // Address of the global offset table.
-        const GOT = @as(i64, @intCast(elf_file.gotAddress()));
+        const GOT = elf_file.gotAddress();
         // Address of the .zig.got table entry if any.
-        const ZIG_GOT = @as(i64, @intCast(target.zigGotAddress(elf_file)));
+        const ZIG_GOT = target.zigGotAddress(elf_file);
         // Relative offset to the start of the global offset table.
-        const G = @as(i64, @intCast(target.gotAddress(elf_file))) - GOT;
+        const G = target.gotAddress(elf_file) - GOT;
         // // Address of the thread pointer.
-        const TP = @as(i64, @intCast(elf_file.tpAddress()));
+        const TP = elf_file.tpAddress();
         // Address of the dynamic thread pointer.
-        const DTP = @as(i64, @intCast(elf_file.dtpAddress()));
+        const DTP = elf_file.dtpAddress();
 
         relocs_log.debug("  {s}: {x}: [{x} => {x}] G({x}) ZG({x}) ({s})", .{
             relocation.fmtRelocType(rel.r_type(), cpu_arch),
@@ -814,9 +829,9 @@ fn resolveDynAbsReloc(
     const comp = elf_file.base.comp;
     const gpa = comp.gpa;
     const cpu_arch = elf_file.getTarget().cpu.arch;
-    const P = self.address(elf_file) + rel.r_offset;
+    const P: u64 = @intCast(self.address(elf_file) + @as(i64, @intCast(rel.r_offset)));
     const A = rel.r_addend;
-    const S = @as(i64, @intCast(target.address(.{}, elf_file)));
+    const S = target.address(.{}, elf_file);
     const is_writeable = self.inputShdr(elf_file).sh_flags & elf.SHF_WRITE != 0;
 
     const num_dynrelocs = switch (self.file(elf_file).?) {
@@ -884,7 +899,7 @@ fn resolveDynAbsReloc(
         },
 
         .ifunc => {
-            const S_ = @as(i64, @intCast(target.address(.{ .plt = false }, elf_file)));
+            const S_ = target.address(.{ .plt = false }, elf_file);
             elf_file.addRelaDynAssumeCapacity(.{
                 .offset = P,
                 .type = relocation.encode(.irel, cpu_arch),
@@ -924,6 +939,11 @@ pub fn resolveRelocsNonAlloc(self: Atom, elf_file: *Elf, code: []u8, undefs: any
             else => unreachable,
         };
         const target = elf_file.symbol(target_index);
+        const is_synthetic_symbol = switch (file_ptr) {
+            .zig_object => false, // TODO: implement this once we support merge sections in ZigObject
+            .object => |x| rel.r_sym() >= x.symtab.items.len,
+            else => unreachable,
+        };
 
         // Check for violation of One Definition Rule for COMDATs.
         if (target.file(elf_file) == null) {
@@ -937,20 +957,21 @@ pub fn resolveRelocsNonAlloc(self: Atom, elf_file: *Elf, code: []u8, undefs: any
         }
 
         // Report an undefined symbol.
-        if (try self.reportUndefined(elf_file, target, target_index, rel, undefs)) continue;
+        if (!is_synthetic_symbol and (try self.reportUndefined(elf_file, target, target_index, rel, undefs)))
+            continue;
 
         // We will use equation format to resolve relocations:
         // https://intezer.com/blog/malware-analysis/executable-and-linkable-format-101-part-3-relocations/
         //
-        const P = @as(i64, @intCast(self.address(elf_file) + rel.r_offset));
+        const P = self.address(elf_file) + @as(i64, @intCast(rel.r_offset));
         // Addend from the relocation.
         const A = rel.r_addend;
         // Address of the target symbol - can be address of the symbol within an atom or address of PLT stub.
-        const S = @as(i64, @intCast(target.address(.{}, elf_file)));
+        const S = target.address(.{}, elf_file);
         // Address of the global offset table.
-        const GOT = @as(i64, @intCast(elf_file.gotAddress()));
+        const GOT = elf_file.gotAddress();
         // Address of the dynamic thread pointer.
-        const DTP = @as(i64, @intCast(elf_file.dtpAddress()));
+        const DTP = elf_file.dtpAddress();
 
         const args = ResolveArgs{ P, A, S, GOT, 0, 0, DTP, 0 };
 
@@ -984,6 +1005,35 @@ pub fn resolveRelocsNonAlloc(self: Atom, elf_file: *Elf, code: []u8, undefs: any
     if (has_reloc_errors) return error.RelocFailure;
 }
 
+const AddExtraOpts = struct {
+    thunk: ?u32 = null,
+    fde_start: ?u32 = null,
+    fde_count: ?u32 = null,
+    rel_index: ?u32 = null,
+    rel_count: ?u32 = null,
+};
+
+pub fn addExtra(atom: *Atom, opts: AddExtraOpts, elf_file: *Elf) !void {
+    if (atom.extra(elf_file) == null) {
+        atom.extra_index = try elf_file.addAtomExtra(.{});
+    }
+    var extras = atom.extra(elf_file).?;
+    inline for (@typeInfo(@TypeOf(opts)).Struct.fields) |field| {
+        if (@field(opts, field.name)) |x| {
+            @field(extras, field.name) = x;
+        }
+    }
+    atom.setExtra(extras, elf_file);
+}
+
+pub inline fn extra(atom: Atom, elf_file: *Elf) ?Extra {
+    return elf_file.atomExtra(atom.extra_index);
+}
+
+pub inline fn setExtra(atom: Atom, extras: Extra, elf_file: *Elf) void {
+    elf_file.setAtomExtra(atom.extra_index, extras);
+}
+
 pub fn format(
     atom: Atom,
     comptime unused_fmt_string: []const u8,
@@ -1023,12 +1073,13 @@ fn format2(
         atom.atom_index,           atom.name(elf_file), atom.address(elf_file),
         atom.output_section_index, atom.alignment,      atom.size,
     });
-    if (atom.fde_start != atom.fde_end) {
+    if (atom.flags.fde) {
         try writer.writeAll(" : fdes{ ");
-        for (atom.fdes(elf_file), atom.fde_start..) |fde, i| {
+        const extras = atom.extra(elf_file).?;
+        for (atom.fdes(elf_file), extras.fde_start..) |fde, i| {
             try writer.print("{d}", .{i});
             if (!fde.alive) try writer.writeAll("([*])");
-            if (i < atom.fde_end - 1) try writer.writeAll(", ");
+            if (i - extras.fde_start < extras.fde_count - 1) try writer.writeAll(", ");
         }
         try writer.writeAll(" }");
     }
@@ -1045,6 +1096,12 @@ pub const Flags = packed struct {
 
     /// Specifies if the atom has been visited during garbage collection.
     visited: bool = false,
+
+    /// Whether this atom has a range extension thunk.
+    thunk: bool = false,
+
+    /// Whether this atom has FDE records.
+    fde: bool = false,
 };
 
 const x86_64 = struct {
@@ -1235,10 +1292,10 @@ const x86_64 = struct {
 
             .TLSGD => {
                 if (target.flags.has_tlsgd) {
-                    const S_ = @as(i64, @intCast(target.tlsGdAddress(elf_file)));
+                    const S_ = target.tlsGdAddress(elf_file);
                     try cwriter.writeInt(i32, @as(i32, @intCast(S_ + A - P)), .little);
                 } else if (target.flags.has_gottp) {
-                    const S_ = @as(i64, @intCast(target.gotTpAddress(elf_file)));
+                    const S_ = target.gotTpAddress(elf_file);
                     try x86_64.relaxTlsGdToIe(atom, &.{ rel, it.next().? }, @intCast(S_ - P), elf_file, stream);
                 } else {
                     try x86_64.relaxTlsGdToLe(
@@ -1254,13 +1311,13 @@ const x86_64 = struct {
             .TLSLD => {
                 if (elf_file.got.tlsld_index) |entry_index| {
                     const tlsld_entry = elf_file.got.entries.items[entry_index];
-                    const S_ = @as(i64, @intCast(tlsld_entry.address(elf_file)));
+                    const S_ = tlsld_entry.address(elf_file);
                     try cwriter.writeInt(i32, @as(i32, @intCast(S_ + A - P)), .little);
                 } else {
                     try x86_64.relaxTlsLdToLe(
                         atom,
                         &.{ rel, it.next().? },
-                        @as(i32, @intCast(TP - @as(i64, @intCast(elf_file.tlsAddress())))),
+                        @as(i32, @intCast(TP - elf_file.tlsAddress())),
                         elf_file,
                         stream,
                     );
@@ -1269,7 +1326,7 @@ const x86_64 = struct {
 
             .GOTPC32_TLSDESC => {
                 if (target.flags.has_tlsdesc) {
-                    const S_ = @as(i64, @intCast(target.tlsDescAddress(elf_file)));
+                    const S_ = target.tlsDescAddress(elf_file);
                     try cwriter.writeInt(i32, @as(i32, @intCast(S_ + A - P)), .little);
                 } else {
                     x86_64.relaxGotPcTlsDesc(code[r_offset - 3 ..]) catch {
@@ -1293,7 +1350,7 @@ const x86_64 = struct {
 
             .GOTTPOFF => {
                 if (target.flags.has_gottp) {
-                    const S_ = @as(i64, @intCast(target.gotTpAddress(elf_file)));
+                    const S_ = target.gotTpAddress(elf_file);
                     try cwriter.writeInt(i32, @as(i32, @intCast(S_ + A - P)), .little);
                 } else {
                     x86_64.relaxGotTpOff(code[r_offset - 3 ..]);
@@ -1336,9 +1393,18 @@ const x86_64 = struct {
             .@"16" => try cwriter.writeInt(u16, @as(u16, @bitCast(@as(i16, @intCast(S + A)))), .little),
             .@"32" => try cwriter.writeInt(u32, @as(u32, @bitCast(@as(i32, @intCast(S + A)))), .little),
             .@"32S" => try cwriter.writeInt(i32, @as(i32, @intCast(S + A)), .little),
-            .@"64" => try cwriter.writeInt(i64, S + A, .little),
-            .DTPOFF32 => try cwriter.writeInt(i32, @as(i32, @intCast(S + A - DTP)), .little),
-            .DTPOFF64 => try cwriter.writeInt(i64, S + A - DTP, .little),
+            .@"64" => if (atom.debugTombstoneValue(target.*, elf_file)) |value|
+                try cwriter.writeInt(u64, value, .little)
+            else
+                try cwriter.writeInt(i64, S + A, .little),
+            .DTPOFF32 => if (atom.debugTombstoneValue(target.*, elf_file)) |value|
+                try cwriter.writeInt(u64, value, .little)
+            else
+                try cwriter.writeInt(i32, @as(i32, @intCast(S + A - DTP)), .little),
+            .DTPOFF64 => if (atom.debugTombstoneValue(target.*, elf_file)) |value|
+                try cwriter.writeInt(u64, value, .little)
+            else
+                try cwriter.writeInt(i64, S + A - DTP, .little),
             .GOTOFF64 => try cwriter.writeInt(i64, S + A - GOT, .little),
             .GOTPC64 => try cwriter.writeInt(i64, GOT + A, .little),
             .SIZE32 => {
@@ -1720,7 +1786,7 @@ const aarch64 = struct {
                         .object => |x| x.symbols.items[rel.r_sym()],
                         else => unreachable,
                     };
-                    const S_: i64 = @intCast(th.targetAddress(target_index, elf_file));
+                    const S_ = th.targetAddress(target_index, elf_file);
                     break :blk math.cast(i28, S_ + A - P) orelse return error.Overflow;
                 };
                 aarch64_util.writeBranchImm(disp, code);
@@ -1738,16 +1804,12 @@ const aarch64 = struct {
 
             .ADR_PREL_PG_HI21 => {
                 // TODO: check for relaxation of ADRP+ADD
-                const saddr = @as(u64, @intCast(P));
-                const taddr = @as(u64, @intCast(S + A));
-                const pages = @as(u21, @bitCast(try aarch64_util.calcNumberOfPages(saddr, taddr)));
+                const pages = @as(u21, @bitCast(try aarch64_util.calcNumberOfPages(P, S + A)));
                 aarch64_util.writeAdrpInst(pages, code);
             },
 
             .ADR_GOT_PAGE => if (target.flags.has_got) {
-                const saddr = @as(u64, @intCast(P));
-                const taddr = @as(u64, @intCast(G + GOT + A));
-                const pages = @as(u21, @bitCast(try aarch64_util.calcNumberOfPages(saddr, taddr)));
+                const pages = @as(u21, @bitCast(try aarch64_util.calcNumberOfPages(P, G + GOT + A)));
                 aarch64_util.writeAdrpInst(pages, code);
             } else {
                 // TODO: relax
@@ -1802,46 +1864,38 @@ const aarch64 = struct {
             },
 
             .TLSIE_ADR_GOTTPREL_PAGE21 => {
-                const S_: i64 = @intCast(target.gotTpAddress(elf_file));
-                const saddr: u64 = @intCast(P);
-                const taddr: u64 = @intCast(S_ + A);
-                relocs_log.debug("      [{x} => {x}]", .{ P, taddr });
-                const pages: u21 = @bitCast(try aarch64_util.calcNumberOfPages(saddr, taddr));
+                const S_ = target.gotTpAddress(elf_file);
+                relocs_log.debug("      [{x} => {x}]", .{ P, S_ + A });
+                const pages: u21 = @bitCast(try aarch64_util.calcNumberOfPages(P, S_ + A));
                 aarch64_util.writeAdrpInst(pages, code);
             },
 
             .TLSIE_LD64_GOTTPREL_LO12_NC => {
-                const S_: i64 = @intCast(target.gotTpAddress(elf_file));
-                const taddr: u64 = @intCast(S_ + A);
-                relocs_log.debug("      [{x} => {x}]", .{ P, taddr });
-                const offset: u12 = try math.divExact(u12, @truncate(taddr), 8);
+                const S_ = target.gotTpAddress(elf_file);
+                relocs_log.debug("      [{x} => {x}]", .{ P, S_ + A });
+                const offset: u12 = try math.divExact(u12, @truncate(@as(u64, @bitCast(S_ + A))), 8);
                 aarch64_util.writeLoadStoreRegInst(offset, code);
             },
 
             .TLSGD_ADR_PAGE21 => {
-                const S_: i64 = @intCast(target.tlsGdAddress(elf_file));
-                const saddr: u64 = @intCast(P);
-                const taddr: u64 = @intCast(S_ + A);
-                relocs_log.debug("      [{x} => {x}]", .{ P, taddr });
-                const pages: u21 = @bitCast(try aarch64_util.calcNumberOfPages(saddr, taddr));
+                const S_ = target.tlsGdAddress(elf_file);
+                relocs_log.debug("      [{x} => {x}]", .{ P, S_ + A });
+                const pages: u21 = @bitCast(try aarch64_util.calcNumberOfPages(P, S_ + A));
                 aarch64_util.writeAdrpInst(pages, code);
             },
 
             .TLSGD_ADD_LO12_NC => {
-                const S_: i64 = @intCast(target.tlsGdAddress(elf_file));
-                const taddr: u64 = @intCast(S_ + A);
-                relocs_log.debug("      [{x} => {x}]", .{ P, taddr });
-                const offset: u12 = @truncate(taddr);
+                const S_ = target.tlsGdAddress(elf_file);
+                relocs_log.debug("      [{x} => {x}]", .{ P, S_ + A });
+                const offset: u12 = @truncate(@as(u64, @bitCast(S_ + A)));
                 aarch64_util.writeAddImmInst(offset, code);
             },
 
             .TLSDESC_ADR_PAGE21 => {
                 if (target.flags.has_tlsdesc) {
-                    const S_: i64 = @intCast(target.tlsDescAddress(elf_file));
-                    const saddr: u64 = @intCast(P);
-                    const taddr: u64 = @intCast(S_ + A);
-                    relocs_log.debug("      [{x} => {x}]", .{ P, taddr });
-                    const pages: u21 = @bitCast(try aarch64_util.calcNumberOfPages(saddr, taddr));
+                    const S_ = target.tlsDescAddress(elf_file);
+                    relocs_log.debug("      [{x} => {x}]", .{ P, S_ + A });
+                    const pages: u21 = @bitCast(try aarch64_util.calcNumberOfPages(P, S_ + A));
                     aarch64_util.writeAdrpInst(pages, code);
                 } else {
                     relocs_log.debug("      relaxing adrp => nop", .{});
@@ -1851,10 +1905,9 @@ const aarch64 = struct {
 
             .TLSDESC_LD64_LO12 => {
                 if (target.flags.has_tlsdesc) {
-                    const S_: i64 = @intCast(target.tlsDescAddress(elf_file));
-                    const taddr: u64 = @intCast(S_ + A);
-                    relocs_log.debug("      [{x} => {x}]", .{ P, taddr });
-                    const offset: u12 = try math.divExact(u12, @truncate(taddr), 8);
+                    const S_ = target.tlsDescAddress(elf_file);
+                    relocs_log.debug("      [{x} => {x}]", .{ P, S_ + A });
+                    const offset: u12 = try math.divExact(u12, @truncate(@as(u64, @bitCast(S_ + A))), 8);
                     aarch64_util.writeLoadStoreRegInst(offset, code);
                 } else {
                     relocs_log.debug("      relaxing ldr => nop", .{});
@@ -1864,10 +1917,9 @@ const aarch64 = struct {
 
             .TLSDESC_ADD_LO12 => {
                 if (target.flags.has_tlsdesc) {
-                    const S_: i64 = @intCast(target.tlsDescAddress(elf_file));
-                    const taddr: u64 = @intCast(S_ + A);
-                    relocs_log.debug("      [{x} => {x}]", .{ P, taddr });
-                    const offset: u12 = @truncate(taddr);
+                    const S_ = target.tlsDescAddress(elf_file);
+                    relocs_log.debug("      [{x} => {x}]", .{ P, S_ + A });
+                    const offset: u12 = @truncate(@as(u64, @bitCast(S_ + A)));
                     aarch64_util.writeAddImmInst(offset, code);
                 } else {
                     const old_inst = Instruction{
@@ -1912,7 +1964,6 @@ const aarch64 = struct {
     ) !void {
         _ = it;
         _ = code;
-        _ = target;
 
         const r_type: elf.R_AARCH64 = @enumFromInt(rel.r_type());
         const cwriter = stream.writer();
@@ -1922,7 +1973,10 @@ const aarch64 = struct {
         switch (r_type) {
             .NONE => unreachable,
             .ABS32 => try cwriter.writeInt(i32, @as(i32, @intCast(S + A)), .little),
-            .ABS64 => try cwriter.writeInt(i64, S + A, .little),
+            .ABS64 => if (atom.debugTombstoneValue(target.*, elf_file)) |value|
+                try cwriter.writeInt(u64, value, .little)
+            else
+                try cwriter.writeInt(i64, S + A, .little),
             else => try atom.reportUnhandledRelocError(rel, elf_file),
         }
     }
@@ -2047,7 +2101,7 @@ const riscv = struct {
                 const atom_addr = atom.address(elf_file);
                 const pos = it.pos;
                 const pair = while (it.prev()) |pair| {
-                    if (S == atom_addr + pair.r_offset) break pair;
+                    if (S == atom_addr + @as(i64, @intCast(pair.r_offset))) break pair;
                 } else {
                     // TODO: implement searching forward
                     var err = try elf_file.addErrorWithNotes(1);
@@ -2065,10 +2119,10 @@ const riscv = struct {
                     .object => |x| elf_file.symbol(x.symbols.items[pair.r_sym()]),
                     else => unreachable,
                 };
-                const S_ = @as(i64, @intCast(target_.address(.{}, elf_file)));
+                const S_ = target_.address(.{}, elf_file);
                 const A_ = pair.r_addend;
-                const P_ = @as(i64, @intCast(atom_addr + pair.r_offset));
-                const G_ = @as(i64, @intCast(target_.gotAddress(elf_file))) - GOT;
+                const P_ = atom_addr + @as(i64, @intCast(pair.r_offset));
+                const G_ = target_.gotAddress(elf_file) - GOT;
                 const disp = switch (@as(elf.R_RISCV, @enumFromInt(pair.r_type()))) {
                     .PCREL_HI20 => math.cast(i32, S_ + A_ - P_) orelse return error.Overflow,
                     .GOT_HI20 => math.cast(i32, G_ + GOT + A_ - P_) orelse return error.Overflow,
@@ -2096,7 +2150,6 @@ const riscv = struct {
         code: []u8,
         stream: anytype,
     ) !void {
-        _ = target;
         _ = it;
 
         const r_type: elf.R_RISCV = @enumFromInt(rel.r_type());
@@ -2111,7 +2164,10 @@ const riscv = struct {
             .NONE => unreachable,
 
             .@"32" => try cwriter.writeInt(i32, @as(i32, @intCast(S + A)), .little),
-            .@"64" => try cwriter.writeInt(i64, S + A, .little),
+            .@"64" => if (atom.debugTombstoneValue(target.*, elf_file)) |value|
+                try cwriter.writeInt(u64, value, .little)
+            else
+                try cwriter.writeInt(i64, S + A, .little),
 
             .ADD8 => riscv_util.writeAddend(i8, .add, code[r_offset..][0..1], S + A),
             .SUB8 => riscv_util.writeAddend(i8, .sub, code[r_offset..][0..1], S + A),
@@ -2170,6 +2226,23 @@ const RelocsIterator = struct {
     }
 };
 
+pub const Extra = struct {
+    /// Index of the range extension thunk of this atom.
+    thunk: u32 = 0,
+
+    /// Start index of FDEs referencing this atom.
+    fde_start: u32 = 0,
+
+    /// Count of FDEs referencing this atom.
+    fde_count: u32 = 0,
+
+    /// Start index of relocations belonging to this atom.
+    rel_index: u32 = 0,
+
+    /// Count of relocations belonging to this atom.
+    rel_count: u32 = 0,
+};
+
 const std = @import("std");
 const assert = std.debug.assert;
 const elf = std.elf;
src/link/Elf/gc.zig
@@ -68,6 +68,10 @@ fn collectRoots(roots: *std.ArrayList(*Atom), files: []const File.Index, elf_fil
 }
 
 fn markSymbol(sym: *Symbol, roots: *std.ArrayList(*Atom), elf_file: *Elf) !void {
+    if (sym.mergeSubsection(elf_file)) |msub| {
+        msub.alive = true;
+        return;
+    }
     const atom = sym.atom(elf_file) orelse return;
     if (markAtom(atom)) try roots.append(atom);
 }
@@ -96,6 +100,10 @@ fn markLive(atom: *Atom, elf_file: *Elf) void {
 
     for (atom.relocs(elf_file)) |rel| {
         const target_sym = elf_file.symbol(file.symbol(rel.r_sym()));
+        if (target_sym.mergeSubsection(elf_file)) |msub| {
+            msub.alive = true;
+            continue;
+        }
         const target_atom = target_sym.atom(elf_file) orelse continue;
         target_atom.flags.alive = true;
         gc_track_live_log.debug("{}marking live atom({d})", .{ track_live_level, target_atom.atom_index });
src/link/Elf/LinkerDefined.zig
@@ -60,10 +60,10 @@ pub fn updateSymtabSize(self: *LinkerDefined, elf_file: *Elf) !void {
         if (file_ptr.index() != self.index) continue;
         global.flags.output_symtab = true;
         if (global.isLocal(elf_file)) {
-            try global.setOutputSymtabIndex(self.output_symtab_ctx.nlocals, elf_file);
+            try global.addExtra(.{ .symtab = self.output_symtab_ctx.nlocals }, elf_file);
             self.output_symtab_ctx.nlocals += 1;
         } else {
-            try global.setOutputSymtabIndex(self.output_symtab_ctx.nglobals, elf_file);
+            try global.addExtra(.{ .symtab = self.output_symtab_ctx.nglobals }, elf_file);
             self.output_symtab_ctx.nglobals += 1;
         }
         self.output_symtab_ctx.strsize += @as(u32, @intCast(global.name(elf_file).len)) + 1;
src/link/Elf/merge_section.zig
@@ -0,0 +1,285 @@
+pub const MergeSection = struct {
+    name_offset: u32 = 0,
+    type: u32 = 0,
+    flags: u64 = 0,
+    output_section_index: u32 = 0,
+    bytes: std.ArrayListUnmanaged(u8) = .{},
+    table: std.HashMapUnmanaged(
+        String,
+        MergeSubsection.Index,
+        IndexContext,
+        std.hash_map.default_max_load_percentage,
+    ) = .{},
+    subsections: std.ArrayListUnmanaged(MergeSubsection.Index) = .{},
+
+    pub fn deinit(msec: *MergeSection, allocator: Allocator) void {
+        msec.bytes.deinit(allocator);
+        msec.table.deinit(allocator);
+        msec.subsections.deinit(allocator);
+    }
+
+    pub fn name(msec: MergeSection, elf_file: *Elf) [:0]const u8 {
+        return elf_file.strings.getAssumeExists(msec.name_offset);
+    }
+
+    pub fn address(msec: MergeSection, elf_file: *Elf) i64 {
+        const shdr = elf_file.shdrs.items[msec.output_section_index];
+        return @intCast(shdr.sh_addr);
+    }
+
+    const InsertResult = struct {
+        found_existing: bool,
+        key: String,
+        sub: *MergeSubsection.Index,
+    };
+
+    pub fn insert(msec: *MergeSection, allocator: Allocator, string: []const u8) !InsertResult {
+        const gop = try msec.table.getOrPutContextAdapted(
+            allocator,
+            string,
+            IndexAdapter{ .bytes = msec.bytes.items },
+            IndexContext{ .bytes = msec.bytes.items },
+        );
+        if (!gop.found_existing) {
+            const index: u32 = @intCast(msec.bytes.items.len);
+            try msec.bytes.appendSlice(allocator, string);
+            gop.key_ptr.* = .{ .pos = index, .len = @intCast(string.len) };
+        }
+        return .{ .found_existing = gop.found_existing, .key = gop.key_ptr.*, .sub = gop.value_ptr };
+    }
+
+    pub fn insertZ(msec: *MergeSection, allocator: Allocator, string: []const u8) !InsertResult {
+        const with_null = try allocator.alloc(u8, string.len + 1);
+        defer allocator.free(with_null);
+        @memcpy(with_null[0..string.len], string);
+        with_null[string.len] = 0;
+        return msec.insert(allocator, with_null);
+    }
+
+    /// Finalizes the merge section and clears hash table.
+    /// Sorts all owned subsections.
+    pub fn finalize(msec: *MergeSection, elf_file: *Elf) !void {
+        const gpa = elf_file.base.comp.gpa;
+        try msec.subsections.ensureTotalCapacityPrecise(gpa, msec.table.count());
+
+        var it = msec.table.iterator();
+        while (it.next()) |entry| {
+            const msub = elf_file.mergeSubsection(entry.value_ptr.*);
+            if (!msub.alive) continue;
+            msec.subsections.appendAssumeCapacity(entry.value_ptr.*);
+        }
+        msec.table.clearAndFree(gpa);
+
+        const sortFn = struct {
+            pub fn sortFn(ctx: *Elf, lhs: MergeSubsection.Index, rhs: MergeSubsection.Index) bool {
+                const lhs_msub = ctx.mergeSubsection(lhs);
+                const rhs_msub = ctx.mergeSubsection(rhs);
+                if (lhs_msub.alignment.compareStrict(.eq, rhs_msub.alignment)) {
+                    if (lhs_msub.size == rhs_msub.size) {
+                        return mem.order(u8, lhs_msub.getString(ctx), rhs_msub.getString(ctx)) == .lt;
+                    }
+                    return lhs_msub.size < rhs_msub.size;
+                }
+                return lhs_msub.alignment.compareStrict(.lt, rhs_msub.alignment);
+            }
+        }.sortFn;
+
+        std.mem.sort(MergeSubsection.Index, msec.subsections.items, elf_file, sortFn);
+    }
+
+    pub const IndexContext = struct {
+        bytes: []const u8,
+
+        pub fn eql(_: @This(), a: String, b: String) bool {
+            return a.pos == b.pos;
+        }
+
+        pub fn hash(ctx: @This(), key: String) u64 {
+            const str = ctx.bytes[key.pos..][0..key.len];
+            return std.hash_map.hashString(str);
+        }
+    };
+
+    pub const IndexAdapter = struct {
+        bytes: []const u8,
+
+        pub fn eql(ctx: @This(), a: []const u8, b: String) bool {
+            const str = ctx.bytes[b.pos..][0..b.len];
+            return mem.eql(u8, a, str);
+        }
+
+        pub fn hash(_: @This(), adapted_key: []const u8) u64 {
+            return std.hash_map.hashString(adapted_key);
+        }
+    };
+
+    pub fn format(
+        msec: MergeSection,
+        comptime unused_fmt_string: []const u8,
+        options: std.fmt.FormatOptions,
+        writer: anytype,
+    ) !void {
+        _ = msec;
+        _ = unused_fmt_string;
+        _ = options;
+        _ = writer;
+        @compileError("do not format MergeSection directly");
+    }
+
+    pub fn fmt(msec: MergeSection, elf_file: *Elf) std.fmt.Formatter(format2) {
+        return .{ .data = .{
+            .msec = msec,
+            .elf_file = elf_file,
+        } };
+    }
+
+    const FormatContext = struct {
+        msec: MergeSection,
+        elf_file: *Elf,
+    };
+
+    pub fn format2(
+        ctx: FormatContext,
+        comptime unused_fmt_string: []const u8,
+        options: std.fmt.FormatOptions,
+        writer: anytype,
+    ) !void {
+        _ = options;
+        _ = unused_fmt_string;
+        const msec = ctx.msec;
+        const elf_file = ctx.elf_file;
+        try writer.print("{s} : @{x} : type({x}) : flags({x})\n", .{
+            msec.name(elf_file),
+            msec.address(elf_file),
+            msec.type,
+            msec.flags,
+        });
+        for (msec.subsections.items) |index| {
+            try writer.print("   {}\n", .{elf_file.mergeSubsection(index).fmt(elf_file)});
+        }
+    }
+
+    pub const Index = u32;
+};
+
+pub const MergeSubsection = struct {
+    value: i64 = 0,
+    merge_section_index: MergeSection.Index = 0,
+    string_index: u32 = 0,
+    size: u32 = 0,
+    alignment: Atom.Alignment = .@"1",
+    entsize: u32 = 0,
+    alive: bool = false,
+
+    pub fn address(msub: MergeSubsection, elf_file: *Elf) i64 {
+        return msub.mergeSection(elf_file).address(elf_file) + msub.value;
+    }
+
+    pub fn mergeSection(msub: MergeSubsection, elf_file: *Elf) *MergeSection {
+        return elf_file.mergeSection(msub.merge_section_index);
+    }
+
+    pub fn getString(msub: MergeSubsection, elf_file: *Elf) []const u8 {
+        const msec = msub.mergeSection(elf_file);
+        return msec.bytes.items[msub.string_index..][0..msub.size];
+    }
+
+    pub fn format(
+        msub: MergeSubsection,
+        comptime unused_fmt_string: []const u8,
+        options: std.fmt.FormatOptions,
+        writer: anytype,
+    ) !void {
+        _ = msub;
+        _ = unused_fmt_string;
+        _ = options;
+        _ = writer;
+        @compileError("do not format MergeSubsection directly");
+    }
+
+    pub fn fmt(msub: MergeSubsection, elf_file: *Elf) std.fmt.Formatter(format2) {
+        return .{ .data = .{
+            .msub = msub,
+            .elf_file = elf_file,
+        } };
+    }
+
+    const FormatContext = struct {
+        msub: MergeSubsection,
+        elf_file: *Elf,
+    };
+
+    pub fn format2(
+        ctx: FormatContext,
+        comptime unused_fmt_string: []const u8,
+        options: std.fmt.FormatOptions,
+        writer: anytype,
+    ) !void {
+        _ = options;
+        _ = unused_fmt_string;
+        const msub = ctx.msub;
+        const elf_file = ctx.elf_file;
+        try writer.print("@{x} : align({x}) : size({x})", .{
+            msub.address(elf_file),
+            msub.alignment,
+            msub.size,
+        });
+        if (!msub.alive) try writer.writeAll(" : [*]");
+    }
+
+    pub const Index = u32;
+};
+
+pub const InputMergeSection = struct {
+    merge_section_index: MergeSection.Index = 0,
+    atom_index: Atom.Index = 0,
+    offsets: std.ArrayListUnmanaged(u32) = .{},
+    subsections: std.ArrayListUnmanaged(MergeSubsection.Index) = .{},
+    bytes: std.ArrayListUnmanaged(u8) = .{},
+    strings: std.ArrayListUnmanaged(String) = .{},
+
+    pub fn deinit(imsec: *InputMergeSection, allocator: Allocator) void {
+        imsec.offsets.deinit(allocator);
+        imsec.subsections.deinit(allocator);
+        imsec.bytes.deinit(allocator);
+        imsec.strings.deinit(allocator);
+    }
+
+    pub fn clearAndFree(imsec: *InputMergeSection, allocator: Allocator) void {
+        imsec.bytes.clearAndFree(allocator);
+        // TODO: imsec.strings.clearAndFree(allocator);
+    }
+
+    pub fn findSubsection(imsec: InputMergeSection, offset: u32) ?struct { MergeSubsection.Index, u32 } {
+        // TODO: binary search
+        for (imsec.offsets.items, 0..) |off, index| {
+            if (offset < off) return .{
+                imsec.subsections.items[index - 1],
+                offset - imsec.offsets.items[index - 1],
+            };
+        }
+        const last = imsec.offsets.items.len - 1;
+        const last_off = imsec.offsets.items[last];
+        const last_len = imsec.strings.items[last].len;
+        if (offset < last_off + last_len) return .{ imsec.subsections.items[last], offset - last_off };
+        return null;
+    }
+
+    pub fn insert(imsec: *InputMergeSection, allocator: Allocator, string: []const u8) !void {
+        const index: u32 = @intCast(imsec.bytes.items.len);
+        try imsec.bytes.appendSlice(allocator, string);
+        try imsec.strings.append(allocator, .{ .pos = index, .len = @intCast(string.len) });
+    }
+
+    pub const Index = u32;
+};
+
+const String = struct { pos: u32, len: u32 };
+
+const assert = std.debug.assert;
+const mem = std.mem;
+const std = @import("std");
+
+const Allocator = mem.Allocator;
+const Atom = @import("Atom.zig");
+const Elf = @import("../Elf.zig");
src/link/Elf/Object.zig
@@ -15,6 +15,8 @@ comdat_groups: std.ArrayListUnmanaged(Elf.ComdatGroup.Index) = .{},
 comdat_group_data: std.ArrayListUnmanaged(u32) = .{},
 relocs: std.ArrayListUnmanaged(elf.Elf64_Rela) = .{},
 
+merge_sections: std.ArrayListUnmanaged(InputMergeSection.Index) = .{},
+
 fdes: std.ArrayListUnmanaged(Fde) = .{},
 cies: std.ArrayListUnmanaged(Cie) = .{},
 eh_frame_data: std.ArrayListUnmanaged(u8) = .{},
@@ -51,6 +53,7 @@ pub fn deinit(self: *Object, allocator: Allocator) void {
     self.fdes.deinit(allocator);
     self.cies.deinit(allocator);
     self.eh_frame_data.deinit(allocator);
+    self.merge_sections.deinit(allocator);
 }
 
 pub fn parse(self: *Object, elf_file: *Elf) !void {
@@ -242,11 +245,12 @@ fn initAtoms(self: *Object, allocator: Allocator, handle: std.fs.File, elf_file:
                 const relocs = try self.preadRelocsAlloc(allocator, handle, @intCast(i));
                 defer allocator.free(relocs);
                 atom.relocs_section_index = @intCast(i);
-                atom.rel_index = @intCast(self.relocs.items.len);
-                atom.rel_num = @intCast(relocs.len);
+                const rel_index: u32 = @intCast(self.relocs.items.len);
+                const rel_count: u32 = @intCast(relocs.len);
+                try atom.addExtra(.{ .rel_index = rel_index, .rel_count = rel_count }, elf_file);
                 try self.relocs.appendUnalignedSlice(allocator, relocs);
                 if (elf_file.getTarget().cpu.arch == .riscv64) {
-                    sortRelocs(self.relocs.items[atom.rel_index..][0..atom.rel_num]);
+                    sortRelocs(self.relocs.items[rel_index..][0..rel_count]);
                 }
             }
         },
@@ -279,8 +283,7 @@ fn initOutputSection(self: Object, elf_file: *Elf, shdr: elf.Elf64_Shdr) error{O
     const name = blk: {
         const name = self.getString(shdr.sh_name);
         if (elf_file.base.isRelocatable()) break :blk name;
-        if (shdr.sh_flags & elf.SHF_MERGE != 0 and shdr.sh_flags & elf.SHF_STRINGS == 0)
-            break :blk name; // TODO: consider dropping SHF_STRINGS once ICF is implemented
+        if (shdr.sh_flags & elf.SHF_MERGE != 0) break :blk name;
         const sh_name_prefixes: []const [:0]const u8 = &.{
             ".text",       ".data.rel.ro", ".data", ".rodata", ".bss.rel.ro",       ".bss",
             ".init_array", ".fini_array",  ".tbss", ".tdata",  ".gcc_except_table", ".ctors",
@@ -334,7 +337,6 @@ fn skipShdr(self: *Object, index: u32, elf_file: *Elf) bool {
     const name = self.getString(shdr.sh_name);
     const ignore = blk: {
         if (mem.startsWith(u8, name, ".note")) break :blk true;
-        if (mem.startsWith(u8, name, ".comment")) break :blk true;
         if (mem.startsWith(u8, name, ".llvm_addrsig")) break :blk true;
         if (mem.startsWith(u8, name, ".riscv.attributes")) break :blk true; // TODO: riscv attributes
         if (comp.config.debug_format == .strip and shdr.sh_flags & elf.SHF_ALLOC == 0 and
@@ -353,7 +355,7 @@ fn initSymtab(self: *Object, allocator: Allocator, elf_file: *Elf) !void {
         const index = try elf_file.addSymbol();
         self.symbols.appendAssumeCapacity(index);
         const sym_ptr = elf_file.symbol(index);
-        sym_ptr.value = sym.st_value;
+        sym_ptr.value = @intCast(sym.st_value);
         sym_ptr.name_offset = sym.st_name;
         sym_ptr.esym_index = @as(u32, @intCast(i));
         sym_ptr.atom_index = if (sym.st_shndx == elf.SHN_ABS) 0 else self.atoms.items[sym.st_shndx];
@@ -445,13 +447,14 @@ fn parseEhFrame(self: *Object, allocator: Allocator, handle: std.fs.File, shndx:
     while (i < self.fdes.items.len) {
         const fde = self.fdes.items[i];
         const atom = fde.atom(elf_file);
-        atom.fde_start = i;
+        const start = i;
         i += 1;
         while (i < self.fdes.items.len) : (i += 1) {
             const next_fde = self.fdes.items[i];
             if (atom.atom_index != next_fde.atom(elf_file).atom_index) break;
         }
-        atom.fde_end = i;
+        try atom.addExtra(.{ .fde_start = start, .fde_count = i - start }, elf_file);
+        atom.flags.fde = true;
     }
 }
 
@@ -545,7 +548,7 @@ pub fn resolveSymbols(self: *Object, elf_file: *Elf) void {
                 elf.SHN_ABS, elf.SHN_COMMON => 0,
                 else => self.atoms.items[esym.st_shndx],
             };
-            global.value = esym.st_value;
+            global.value = @intCast(esym.st_value);
             global.atom_index = atom_index;
             global.esym_index = esym_index;
             global.file_index = self.index;
@@ -657,6 +660,178 @@ pub fn checkDuplicates(self: *Object, dupes: anytype, elf_file: *Elf) error{OutO
     }
 }
 
+pub fn initMergeSections(self: *Object, elf_file: *Elf) !void {
+    const gpa = elf_file.base.comp.gpa;
+
+    try self.merge_sections.resize(gpa, self.shdrs.items.len);
+    @memset(self.merge_sections.items, 0);
+
+    for (self.shdrs.items, 0..) |shdr, shndx| {
+        if (shdr.sh_flags & elf.SHF_MERGE == 0) continue;
+
+        const atom_index = self.atoms.items[shndx];
+        const atom_ptr = elf_file.atom(atom_index) orelse continue;
+        if (!atom_ptr.flags.alive) continue;
+        if (atom_ptr.relocs(elf_file).len > 0) continue;
+
+        const imsec_idx = try elf_file.addInputMergeSection();
+        const imsec = elf_file.inputMergeSection(imsec_idx).?;
+        self.merge_sections.items[shndx] = imsec_idx;
+
+        imsec.merge_section_index = try elf_file.getOrCreateMergeSection(atom_ptr.name(elf_file), shdr.sh_flags, shdr.sh_type);
+        imsec.atom_index = atom_index;
+
+        const data = try self.codeDecompressAlloc(elf_file, atom_index);
+        defer gpa.free(data);
+
+        if (shdr.sh_flags & elf.SHF_STRINGS != 0) {
+            const sh_entsize: u32 = switch (shdr.sh_entsize) {
+                // According to mold's source code, GHC emits MS sections with sh_entsize = 0.
+                // This actually can also happen for output created with `-r` mode.
+                0 => 1,
+                else => |x| @intCast(x),
+            };
+
+            const isNull = struct {
+                fn isNull(slice: []u8) bool {
+                    for (slice) |x| if (x != 0) return false;
+                    return true;
+                }
+            }.isNull;
+
+            var start: u32 = 0;
+            while (start < data.len) {
+                var end = start;
+                while (end < data.len - sh_entsize and !isNull(data[end .. end + sh_entsize])) : (end += sh_entsize) {}
+                if (!isNull(data[end .. end + sh_entsize])) {
+                    var err = try elf_file.addErrorWithNotes(1);
+                    try err.addMsg(elf_file, "string not null terminated", .{});
+                    try err.addNote(elf_file, "in {}:{s}", .{ self.fmtPath(), atom_ptr.name(elf_file) });
+                    return error.MalformedObject;
+                }
+                end += sh_entsize;
+                const string = data[start..end];
+                try imsec.insert(gpa, string);
+                try imsec.offsets.append(gpa, start);
+                start = end;
+            }
+        } else {
+            const sh_entsize: u32 = @intCast(shdr.sh_entsize);
+            if (sh_entsize == 0) continue; // Malformed, don't split but don't error out
+            if (shdr.sh_size % sh_entsize != 0) {
+                var err = try elf_file.addErrorWithNotes(1);
+                try err.addMsg(elf_file, "size not a multiple of sh_entsize", .{});
+                try err.addNote(elf_file, "in {}:{s}", .{ self.fmtPath(), atom_ptr.name(elf_file) });
+                return error.MalformedObject;
+            }
+
+            var pos: u32 = 0;
+            while (pos < data.len) : (pos += sh_entsize) {
+                const string = data.ptr[pos..][0..sh_entsize];
+                try imsec.insert(gpa, string);
+                try imsec.offsets.append(gpa, pos);
+            }
+        }
+
+        atom_ptr.flags.alive = false;
+    }
+}
+
+pub fn resolveMergeSubsections(self: *Object, elf_file: *Elf) !void {
+    const gpa = elf_file.base.comp.gpa;
+
+    for (self.merge_sections.items) |index| {
+        const imsec = elf_file.inputMergeSection(index) orelse continue;
+        if (imsec.offsets.items.len == 0) continue;
+        const msec = elf_file.mergeSection(imsec.merge_section_index);
+        const atom_ptr = elf_file.atom(imsec.atom_index).?;
+        const isec = atom_ptr.inputShdr(elf_file);
+
+        try imsec.subsections.resize(gpa, imsec.strings.items.len);
+
+        for (imsec.strings.items, imsec.subsections.items) |str, *imsec_msub| {
+            const string = imsec.bytes.items[str.pos..][0..str.len];
+            const res = try msec.insert(gpa, string);
+            if (!res.found_existing) {
+                const msub_index = try elf_file.addMergeSubsection();
+                const msub = elf_file.mergeSubsection(msub_index);
+                msub.merge_section_index = imsec.merge_section_index;
+                msub.string_index = res.key.pos;
+                msub.alignment = atom_ptr.alignment;
+                msub.size = res.key.len;
+                msub.entsize = math.cast(u32, isec.sh_entsize) orelse return error.Overflow;
+                msub.alive = !elf_file.base.gc_sections or isec.sh_flags & elf.SHF_ALLOC == 0;
+                res.sub.* = msub_index;
+            }
+            imsec_msub.* = res.sub.*;
+        }
+
+        imsec.clearAndFree(gpa);
+    }
+
+    for (self.symtab.items, 0..) |*esym, idx| {
+        const sym_index = self.symbols.items[idx];
+        const sym = elf_file.symbol(sym_index);
+
+        if (esym.st_shndx == elf.SHN_COMMON or esym.st_shndx == elf.SHN_UNDEF or esym.st_shndx == elf.SHN_ABS) continue;
+
+        const imsec_index = self.merge_sections.items[esym.st_shndx];
+        const imsec = elf_file.inputMergeSection(imsec_index) orelse continue;
+        if (imsec.offsets.items.len == 0) continue;
+        const msub_index, const offset = imsec.findSubsection(@intCast(esym.st_value)) orelse {
+            var err = try elf_file.addErrorWithNotes(2);
+            try err.addMsg(elf_file, "invalid symbol value: {x}", .{esym.st_value});
+            try err.addNote(elf_file, "for symbol {s}", .{sym.name(elf_file)});
+            try err.addNote(elf_file, "in {}", .{self.fmtPath()});
+            return error.MalformedObject;
+        };
+
+        try sym.addExtra(.{ .subsection = msub_index }, elf_file);
+        sym.flags.merge_subsection = true;
+        sym.value = offset;
+    }
+
+    for (self.atoms.items) |atom_index| {
+        const atom_ptr = elf_file.atom(atom_index) orelse continue;
+        if (!atom_ptr.flags.alive) continue;
+        const extras = atom_ptr.extra(elf_file) orelse continue;
+        const relocs = self.relocs.items[extras.rel_index..][0..extras.rel_count];
+        for (relocs) |*rel| {
+            const esym = self.symtab.items[rel.r_sym()];
+            if (esym.st_type() != elf.STT_SECTION) continue;
+
+            const imsec_index = self.merge_sections.items[esym.st_shndx];
+            const imsec = elf_file.inputMergeSection(imsec_index) orelse continue;
+            if (imsec.offsets.items.len == 0) continue;
+            const msub_index, const offset = imsec.findSubsection(@intCast(@as(i64, @intCast(esym.st_value)) + rel.r_addend)) orelse {
+                var err = try elf_file.addErrorWithNotes(1);
+                try err.addMsg(elf_file, "invalid relocation at offset 0x{x}", .{rel.r_offset});
+                try err.addNote(elf_file, "in {}:{s}", .{ self.fmtPath(), atom_ptr.name(elf_file) });
+                return error.MalformedObject;
+            };
+            const msub = elf_file.mergeSubsection(msub_index);
+            const msec = msub.mergeSection(elf_file);
+
+            const out_sym_idx: u64 = @intCast(self.symbols.items.len);
+            try self.symbols.ensureUnusedCapacity(gpa, 1);
+            const name = try std.fmt.allocPrint(gpa, "{s}$subsection{d}", .{ msec.name(elf_file), msub_index });
+            defer gpa.free(name);
+            const sym_index = try elf_file.addSymbol();
+            const sym = elf_file.symbol(sym_index);
+            sym.* = .{
+                .value = @bitCast(@as(i64, @intCast(offset)) - rel.r_addend),
+                .name_offset = try self.addString(gpa, name),
+                .esym_index = rel.r_sym(),
+                .file_index = self.index,
+            };
+            try sym.addExtra(.{ .subsection = msub_index }, elf_file);
+            sym.flags.merge_subsection = true;
+            self.symbols.addOneAssumeCapacity().* = sym_index;
+            rel.r_info = (out_sym_idx << 32) | rel.r_type();
+        }
+    }
+}
+
 /// We will create dummy shdrs per each resolved common symbols to make it
 /// play nicely with the rest of the system.
 pub fn convertCommonSymbols(self: *Object, elf_file: *Elf) !void {
@@ -747,6 +922,11 @@ pub fn addAtomsToOutputSections(self: *Object, elf_file: *Elf) !void {
 
     for (self.locals()) |local_index| {
         const local = elf_file.symbol(local_index);
+        if (local.mergeSubsection(elf_file)) |msub| {
+            if (!msub.alive) continue;
+            local.output_section_index = msub.mergeSection(elf_file).output_section_index;
+            continue;
+        }
         const atom = local.atom(elf_file) orelse continue;
         if (!atom.flags.alive) continue;
         local.output_section_index = atom.output_section_index;
@@ -754,11 +934,23 @@ pub fn addAtomsToOutputSections(self: *Object, elf_file: *Elf) !void {
 
     for (self.globals()) |global_index| {
         const global = elf_file.symbol(global_index);
+        if (global.file(elf_file).?.index() != self.index) continue;
+        if (global.mergeSubsection(elf_file)) |msub| {
+            if (!msub.alive) continue;
+            global.output_section_index = msub.mergeSection(elf_file).output_section_index;
+            continue;
+        }
         const atom = global.atom(elf_file) orelse continue;
         if (!atom.flags.alive) continue;
-        if (global.file(elf_file).?.index() != self.index) continue;
         global.output_section_index = atom.output_section_index;
     }
+
+    for (self.symbols.items[self.symtab.items.len..]) |local_index| {
+        const local = elf_file.symbol(local_index);
+        const msub = local.mergeSubsection(elf_file).?;
+        if (!msub.alive) continue;
+        local.output_section_index = msub.mergeSection(elf_file).output_section_index;
+    }
 }
 
 pub fn initRelaSections(self: Object, elf_file: *Elf) !void {
@@ -843,9 +1035,17 @@ pub fn writeAr(self: Object, elf_file: *Elf, writer: anytype) !void {
 }
 
 pub fn updateSymtabSize(self: *Object, elf_file: *Elf) !void {
+    const isAlive = struct {
+        fn isAlive(sym: *const Symbol, ctx: *Elf) bool {
+            if (sym.mergeSubsection(ctx)) |msub| return msub.alive;
+            if (sym.atom(ctx)) |atom_ptr| return atom_ptr.flags.alive;
+            return true;
+        }
+    }.isAlive;
+
     for (self.locals()) |local_index| {
         const local = elf_file.symbol(local_index);
-        if (local.atom(elf_file)) |atom| if (!atom.flags.alive) continue;
+        if (!isAlive(local, elf_file)) continue;
         const esym = local.elfSym(elf_file);
         switch (esym.st_type()) {
             elf.STT_SECTION => continue,
@@ -853,7 +1053,7 @@ pub fn updateSymtabSize(self: *Object, elf_file: *Elf) !void {
             else => {},
         }
         local.flags.output_symtab = true;
-        try local.setOutputSymtabIndex(self.output_symtab_ctx.nlocals, elf_file);
+        try local.addExtra(.{ .symtab = self.output_symtab_ctx.nlocals }, elf_file);
         self.output_symtab_ctx.nlocals += 1;
         self.output_symtab_ctx.strsize += @as(u32, @intCast(local.name(elf_file).len)) + 1;
     }
@@ -862,13 +1062,13 @@ pub fn updateSymtabSize(self: *Object, elf_file: *Elf) !void {
         const global = elf_file.symbol(global_index);
         const file_ptr = global.file(elf_file) orelse continue;
         if (file_ptr.index() != self.index) continue;
-        if (global.atom(elf_file)) |atom| if (!atom.flags.alive) continue;
+        if (!isAlive(global, elf_file)) continue;
         global.flags.output_symtab = true;
         if (global.isLocal(elf_file)) {
-            try global.setOutputSymtabIndex(self.output_symtab_ctx.nlocals, elf_file);
+            try global.addExtra(.{ .symtab = self.output_symtab_ctx.nlocals }, elf_file);
             self.output_symtab_ctx.nlocals += 1;
         } else {
-            try global.setOutputSymtabIndex(self.output_symtab_ctx.nglobals, elf_file);
+            try global.addExtra(.{ .symtab = self.output_symtab_ctx.nglobals }, elf_file);
             self.output_symtab_ctx.nglobals += 1;
         }
         self.output_symtab_ctx.strsize += @as(u32, @intCast(global.name(elf_file).len)) + 1;
@@ -902,14 +1102,16 @@ pub fn writeSymtab(self: Object, elf_file: *Elf) void {
 
 pub fn locals(self: Object) []const Symbol.Index {
     if (self.symbols.items.len == 0) return &[0]Symbol.Index{};
-    const end = self.first_global orelse self.symbols.items.len;
+    assert(self.symbols.items.len >= self.symtab.items.len);
+    const end = self.first_global orelse self.symtab.items.len;
     return self.symbols.items[0..end];
 }
 
 pub fn globals(self: Object) []const Symbol.Index {
     if (self.symbols.items.len == 0) return &[0]Symbol.Index{};
-    const start = self.first_global orelse self.symbols.items.len;
-    return self.symbols.items[start..];
+    assert(self.symbols.items.len >= self.symtab.items.len);
+    const start = self.first_global orelse self.symtab.items.len;
+    return self.symbols.items[start..self.symtab.items.len];
 }
 
 /// Returns atom's code and optionally uncompresses data if required (for compressed sections).
@@ -954,6 +1156,14 @@ pub fn getString(self: Object, off: u32) [:0]const u8 {
     return mem.sliceTo(@as([*:0]const u8, @ptrCast(self.strtab.items.ptr + off)), 0);
 }
 
+fn addString(self: *Object, allocator: Allocator, str: []const u8) !u32 {
+    const off: u32 = @intCast(self.strtab.items.len);
+    try self.strtab.ensureUnusedCapacity(allocator, str.len + 1);
+    self.strtab.appendSliceAssumeCapacity(str);
+    self.strtab.appendAssumeCapacity(0);
+    return off;
+}
+
 /// Caller owns the memory.
 fn preadShdrContentsAlloc(self: Object, allocator: Allocator, handle: std.fs.File, index: u32) ![]u8 {
     assert(index < self.shdrs.items.len);
@@ -1159,5 +1369,6 @@ const Cie = eh_frame.Cie;
 const Elf = @import("../Elf.zig");
 const Fde = eh_frame.Fde;
 const File = @import("file.zig").File;
+const InputMergeSection = @import("merge_section.zig").InputMergeSection;
 const Symbol = @import("Symbol.zig");
 const Alignment = Atom.Alignment;
src/link/Elf/relocatable.zig
@@ -34,12 +34,16 @@ pub fn flushStaticLib(elf_file: *Elf, comp: *Compilation, module_obj_path: ?[]co
     // First, we flush relocatable object file generated with our backends.
     if (elf_file.zigObjectPtr()) |zig_object| {
         zig_object.resolveSymbols(elf_file);
+        try elf_file.addCommentString();
+        try elf_file.finalizeMergeSections();
         zig_object.claimUnresolvedObject(elf_file);
 
+        try elf_file.initMergeSections();
         try elf_file.initSymtab();
         try elf_file.initShStrtab();
         try elf_file.sortShdrs();
         try zig_object.addAtomsToRelaSections(elf_file);
+        try elf_file.updateMergeSectionSizes();
         try updateSectionSizes(elf_file);
 
         try allocateAllocSections(elf_file);
@@ -49,6 +53,7 @@ pub fn flushStaticLib(elf_file: *Elf, comp: *Compilation, module_obj_path: ?[]co
             state_log.debug("{}", .{elf_file.dumpState()});
         }
 
+        try elf_file.writeMergeSections();
         try writeSyntheticSections(elf_file);
         try elf_file.writeShdrTable();
         try elf_file.writeElfHeader();
@@ -179,9 +184,13 @@ pub fn flushObject(elf_file: *Elf, comp: *Compilation, module_obj_path: ?[]const
     // input Object files.
     elf_file.resolveSymbols();
     elf_file.markEhFrameAtomsDead();
+    try elf_file.resolveMergeSections();
+    try elf_file.addCommentString();
+    try elf_file.finalizeMergeSections();
     claimUnresolved(elf_file);
 
     try initSections(elf_file);
+    try elf_file.initMergeSections();
     try elf_file.sortShdrs();
     if (elf_file.zigObjectPtr()) |zig_object| {
         try zig_object.addAtomsToRelaSections(elf_file);
@@ -191,6 +200,7 @@ pub fn flushObject(elf_file: *Elf, comp: *Compilation, module_obj_path: ?[]const
         try object.addAtomsToOutputSections(elf_file);
         try object.addAtomsToRelaSections(elf_file);
     }
+    try elf_file.updateMergeSectionSizes();
     try updateSectionSizes(elf_file);
 
     try allocateAllocSections(elf_file);
@@ -201,6 +211,7 @@ pub fn flushObject(elf_file: *Elf, comp: *Compilation, module_obj_path: ?[]const
     }
 
     try writeAtoms(elf_file);
+    try elf_file.writeMergeSections();
     try writeSyntheticSections(elf_file);
     try elf_file.writeShdrTable();
     try elf_file.writeElfHeader();
@@ -328,7 +339,7 @@ fn updateSectionSizes(elf_file: *Elf) !void {
             if (!atom_ptr.flags.alive) continue;
             const offset = atom_ptr.alignment.forward(shdr.sh_size);
             const padding = offset - shdr.sh_size;
-            atom_ptr.value = offset;
+            atom_ptr.value = @intCast(offset);
             shdr.sh_size += padding + atom_ptr.size;
             shdr.sh_addralign = @max(shdr.sh_addralign, atom_ptr.alignment.toByteUnits() orelse 1);
         }
@@ -434,7 +445,7 @@ fn writeAtoms(elf_file: *Elf) !void {
             const atom_ptr = elf_file.atom(atom_index).?;
             assert(atom_ptr.flags.alive);
 
-            const offset = math.cast(usize, atom_ptr.value - shdr.sh_addr - base_offset) orelse
+            const offset = math.cast(usize, atom_ptr.value - @as(i64, @intCast(shdr.sh_addr - base_offset))) orelse
                 return error.Overflow;
             const size = math.cast(usize, atom_ptr.size) orelse return error.Overflow;
 
src/link/Elf/SharedObject.zig
@@ -231,7 +231,7 @@ pub fn resolveSymbols(self: *SharedObject, elf_file: *Elf) void {
 
         const global = elf_file.symbol(index);
         if (self.asFile().symbolRank(this_sym, false) < global.symbolRank(elf_file)) {
-            global.value = this_sym.st_value;
+            global.value = @intCast(this_sym.st_value);
             global.atom_index = 0;
             global.esym_index = esym_index;
             global.version_index = self.versyms.items[esym_index];
@@ -269,7 +269,7 @@ pub fn updateSymtabSize(self: *SharedObject, elf_file: *Elf) !void {
         if (file_ptr.index() != self.index) continue;
         if (global.isLocal(elf_file)) continue;
         global.flags.output_symtab = true;
-        try global.setOutputSymtabIndex(self.output_symtab_ctx.nglobals, elf_file);
+        try global.addExtra(.{ .symtab = self.output_symtab_ctx.nglobals }, elf_file);
         self.output_symtab_ctx.nglobals += 1;
         self.output_symtab_ctx.strsize += @as(u32, @intCast(global.name(elf_file).len)) + 1;
     }
src/link/Elf/Symbol.zig
@@ -1,7 +1,7 @@
 //! Represents a defined symbol.
 
 /// Allocated address value of this symbol.
-value: u64 = 0,
+value: i64 = 0,
 
 /// Offset into the linker's string table.
 name_offset: u32 = 0,
@@ -14,7 +14,7 @@ file_index: File.Index = 0,
 /// Use `atom` to get the pointer to the atom.
 atom_index: Atom.Index = 0,
 
-/// Assigned output section index for this atom.
+/// Assigned output section index for this symbol.
 output_section_index: u32 = 0,
 
 /// Index of the source symbol this symbol references.
@@ -33,7 +33,8 @@ extra_index: u32 = 0,
 pub fn isAbs(symbol: Symbol, elf_file: *Elf) bool {
     const file_ptr = symbol.file(elf_file).?;
     if (file_ptr == .shared_object) return symbol.elfSym(elf_file).st_shndx == elf.SHN_ABS;
-    return !symbol.flags.import and symbol.atom(elf_file) == null and symbol.outputShndx() == null and
+    return !symbol.flags.import and symbol.atom(elf_file) == null and
+        symbol.mergeSubsection(elf_file) == null and symbol.outputShndx() == null and
         file_ptr != .linker_defined;
 }
 
@@ -70,6 +71,12 @@ pub fn atom(symbol: Symbol, elf_file: *Elf) ?*Atom {
     return elf_file.atom(symbol.atom_index);
 }
 
+pub fn mergeSubsection(symbol: Symbol, elf_file: *Elf) ?*MergeSubsection {
+    if (!symbol.flags.merge_subsection) return null;
+    const extras = symbol.extra(elf_file).?;
+    return elf_file.mergeSubsection(extras.subsection);
+}
+
 pub fn file(symbol: Symbol, elf_file: *Elf) ?File {
     return elf_file.file(symbol.file_index);
 }
@@ -92,7 +99,11 @@ pub fn symbolRank(symbol: Symbol, elf_file: *Elf) u32 {
     return file_ptr.symbolRank(sym, in_archive);
 }
 
-pub fn address(symbol: Symbol, opts: struct { plt: bool = true }, elf_file: *Elf) u64 {
+pub fn address(symbol: Symbol, opts: struct { plt: bool = true }, elf_file: *Elf) i64 {
+    if (symbol.mergeSubsection(elf_file)) |msub| {
+        if (!msub.alive) return 0;
+        return msub.address(elf_file) + symbol.value;
+    }
     if (symbol.flags.has_copy_rel) {
         return symbol.copyRelAddress(elf_file);
     }
@@ -108,19 +119,23 @@ pub fn address(symbol: Symbol, opts: struct { plt: bool = true }, elf_file: *Elf
         if (!atom_ptr.flags.alive) {
             if (mem.eql(u8, atom_ptr.name(elf_file), ".eh_frame")) {
                 const sym_name = symbol.name(elf_file);
+                const sh_addr, const sh_size = blk: {
+                    const shndx = elf_file.eh_frame_section_index orelse break :blk .{ 0, 0 };
+                    const shdr = elf_file.shdrs.items[shndx];
+                    break :blk .{ shdr.sh_addr, shdr.sh_size };
+                };
                 if (mem.startsWith(u8, sym_name, "__EH_FRAME_BEGIN__") or
                     mem.startsWith(u8, sym_name, "__EH_FRAME_LIST__") or
                     mem.startsWith(u8, sym_name, ".eh_frame_seg") or
                     symbol.elfSym(elf_file).st_type() == elf.STT_SECTION)
                 {
-                    return elf_file.shdrs.items[elf_file.eh_frame_section_index.?].sh_addr;
+                    return @intCast(sh_addr);
                 }
 
                 if (mem.startsWith(u8, sym_name, "__FRAME_END__") or
                     mem.startsWith(u8, sym_name, "__EH_FRAME_LIST_END__"))
                 {
-                    const shdr = elf_file.shdrs.items[elf_file.eh_frame_section_index.?];
-                    return shdr.sh_addr + shdr.sh_size;
+                    return @intCast(sh_addr + sh_size);
                 }
 
                 // TODO I think we potentially should error here
@@ -143,65 +158,57 @@ pub fn outputSymtabIndex(symbol: Symbol, elf_file: *Elf) ?u32 {
     return if (symbol.isLocal(elf_file)) idx + symtab_ctx.ilocal else idx + symtab_ctx.iglobal;
 }
 
-pub fn setOutputSymtabIndex(symbol: *Symbol, index: u32, elf_file: *Elf) !void {
-    if (symbol.extra(elf_file)) |extras| {
-        var new_extras = extras;
-        new_extras.symtab = index;
-        symbol.setExtra(new_extras, elf_file);
-    } else try symbol.addExtra(.{ .symtab = index }, elf_file);
-}
-
-pub fn gotAddress(symbol: Symbol, elf_file: *Elf) u64 {
+pub fn gotAddress(symbol: Symbol, elf_file: *Elf) i64 {
     if (!symbol.flags.has_got) return 0;
     const extras = symbol.extra(elf_file).?;
     const entry = elf_file.got.entries.items[extras.got];
     return entry.address(elf_file);
 }
 
-pub fn pltGotAddress(symbol: Symbol, elf_file: *Elf) u64 {
+pub fn pltGotAddress(symbol: Symbol, elf_file: *Elf) i64 {
     if (!(symbol.flags.has_plt and symbol.flags.has_got)) return 0;
     const extras = symbol.extra(elf_file).?;
     const shdr = elf_file.shdrs.items[elf_file.plt_got_section_index.?];
     const cpu_arch = elf_file.getTarget().cpu.arch;
-    return shdr.sh_addr + extras.plt_got * PltGotSection.entrySize(cpu_arch);
+    return @intCast(shdr.sh_addr + extras.plt_got * PltGotSection.entrySize(cpu_arch));
 }
 
-pub fn pltAddress(symbol: Symbol, elf_file: *Elf) u64 {
+pub fn pltAddress(symbol: Symbol, elf_file: *Elf) i64 {
     if (!symbol.flags.has_plt) return 0;
     const extras = symbol.extra(elf_file).?;
     const shdr = elf_file.shdrs.items[elf_file.plt_section_index.?];
     const cpu_arch = elf_file.getTarget().cpu.arch;
-    return shdr.sh_addr + extras.plt * PltSection.entrySize(cpu_arch) + PltSection.preambleSize(cpu_arch);
+    return @intCast(shdr.sh_addr + extras.plt * PltSection.entrySize(cpu_arch) + PltSection.preambleSize(cpu_arch));
 }
 
-pub fn gotPltAddress(symbol: Symbol, elf_file: *Elf) u64 {
+pub fn gotPltAddress(symbol: Symbol, elf_file: *Elf) i64 {
     if (!symbol.flags.has_plt) return 0;
     const extras = symbol.extra(elf_file).?;
     const shdr = elf_file.shdrs.items[elf_file.got_plt_section_index.?];
-    return shdr.sh_addr + extras.plt * 8 + GotPltSection.preamble_size;
+    return @intCast(shdr.sh_addr + extras.plt * 8 + GotPltSection.preamble_size);
 }
 
-pub fn copyRelAddress(symbol: Symbol, elf_file: *Elf) u64 {
+pub fn copyRelAddress(symbol: Symbol, elf_file: *Elf) i64 {
     if (!symbol.flags.has_copy_rel) return 0;
     const shdr = elf_file.shdrs.items[elf_file.copy_rel_section_index.?];
-    return shdr.sh_addr + symbol.value;
+    return @as(i64, @intCast(shdr.sh_addr)) + symbol.value;
 }
 
-pub fn tlsGdAddress(symbol: Symbol, elf_file: *Elf) u64 {
+pub fn tlsGdAddress(symbol: Symbol, elf_file: *Elf) i64 {
     if (!symbol.flags.has_tlsgd) return 0;
     const extras = symbol.extra(elf_file).?;
     const entry = elf_file.got.entries.items[extras.tlsgd];
     return entry.address(elf_file);
 }
 
-pub fn gotTpAddress(symbol: Symbol, elf_file: *Elf) u64 {
+pub fn gotTpAddress(symbol: Symbol, elf_file: *Elf) i64 {
     if (!symbol.flags.has_gottp) return 0;
     const extras = symbol.extra(elf_file).?;
     const entry = elf_file.got.entries.items[extras.gottp];
     return entry.address(elf_file);
 }
 
-pub fn tlsDescAddress(symbol: Symbol, elf_file: *Elf) u64 {
+pub fn tlsDescAddress(symbol: Symbol, elf_file: *Elf) i64 {
     if (!symbol.flags.has_tlsdesc) return 0;
     const extras = symbol.extra(elf_file).?;
     const entry = elf_file.got.entries.items[extras.tlsdesc];
@@ -221,7 +228,7 @@ pub fn getOrCreateZigGotEntry(symbol: *Symbol, symbol_index: Index, elf_file: *E
     return .{ .found_existing = false, .index = index };
 }
 
-pub fn zigGotAddress(symbol: Symbol, elf_file: *Elf) u64 {
+pub fn zigGotAddress(symbol: Symbol, elf_file: *Elf) i64 {
     if (!symbol.flags.has_zig_got) return 0;
     const extras = symbol.extra(elf_file).?;
     return elf_file.zig_got.entryAddress(extras.zig_got, elf_file);
@@ -240,8 +247,31 @@ pub fn dsoAlignment(symbol: Symbol, elf_file: *Elf) !u64 {
         @min(alignment, try std.math.powi(u64, 2, @ctz(esym.st_value)));
 }
 
-pub fn addExtra(symbol: *Symbol, extras: Extra, elf_file: *Elf) !void {
-    symbol.extra_index = try elf_file.addSymbolExtra(extras);
+const AddExtraOpts = struct {
+    got: ?u32 = null,
+    plt: ?u32 = null,
+    plt_got: ?u32 = null,
+    dynamic: ?u32 = null,
+    symtab: ?u32 = null,
+    copy_rel: ?u32 = null,
+    tlsgd: ?u32 = null,
+    gottp: ?u32 = null,
+    tlsdesc: ?u32 = null,
+    zig_got: ?u32 = null,
+    subsection: ?u32 = null,
+};
+
+pub fn addExtra(symbol: *Symbol, opts: AddExtraOpts, elf_file: *Elf) !void {
+    if (symbol.extra(elf_file) == null) {
+        symbol.extra_index = try elf_file.addSymbolExtra(.{});
+    }
+    var extras = symbol.extra(elf_file).?;
+    inline for (@typeInfo(@TypeOf(opts)).Struct.fields) |field| {
+        if (@field(opts, field.name)) |x| {
+            @field(extras, field.name) = x;
+        }
+    }
+    symbol.setExtra(extras, elf_file);
 }
 
 pub fn extra(symbol: Symbol, elf_file: *Elf) ?Extra {
@@ -266,6 +296,7 @@ pub fn setOutputSym(symbol: Symbol, elf_file: *Elf, out: *elf.Elf64_Sym) void {
         if (symbol.flags.has_copy_rel) break :blk @intCast(elf_file.copy_rel_section_index.?);
         if (file_ptr == .shared_object or esym.st_shndx == elf.SHN_UNDEF) break :blk elf.SHN_UNDEF;
         if (elf_file.base.isRelocatable() and esym.st_shndx == elf.SHN_COMMON) break :blk elf.SHN_COMMON;
+        if (symbol.mergeSubsection(elf_file)) |msub| break :blk @intCast(msub.mergeSection(elf_file).output_section_index);
         if (symbol.atom(elf_file) == null and file_ptr != .linker_defined) break :blk elf.SHN_ABS;
         break :blk @intCast(symbol.outputShndx() orelse elf.SHN_UNDEF);
     };
@@ -284,7 +315,7 @@ pub fn setOutputSym(symbol: Symbol, elf_file: *Elf, out: *elf.Elf64_Sym) void {
     out.st_info = (st_bind << 4) | st_type;
     out.st_other = esym.st_other;
     out.st_shndx = st_shndx;
-    out.st_value = st_value;
+    out.st_value = @intCast(st_value);
     out.st_size = esym.st_size;
 }
 
@@ -436,6 +467,9 @@ pub const Flags = packed struct {
     /// TODO this is really not needed if only we operated on esyms between
     /// codegen and ZigObject.
     is_tls: bool = false,
+
+    /// Whether the symbol is a merge subsection.
+    merge_subsection: bool = false,
 };
 
 pub const Extra = struct {
@@ -449,6 +483,7 @@ pub const Extra = struct {
     gottp: u32 = 0,
     tlsdesc: u32 = 0,
     zig_got: u32 = 0,
+    subsection: u32 = 0,
 };
 
 pub const Index = u32;
@@ -465,6 +500,7 @@ const File = @import("file.zig").File;
 const GotSection = synthetic_sections.GotSection;
 const GotPltSection = synthetic_sections.GotPltSection;
 const LinkerDefined = @import("LinkerDefined.zig");
+const MergeSubsection = @import("merge_section.zig").MergeSubsection;
 const Object = @import("Object.zig");
 const PltSection = synthetic_sections.PltSection;
 const PltGotSection = synthetic_sections.PltGotSection;
src/link/Elf/synthetic_sections.zig
@@ -259,11 +259,7 @@ pub const ZigGotSection = struct {
         if (elf_file.isEffectivelyDynLib() or (elf_file.base.isExe() and comp.config.pie)) {
             zig_got.flags.needs_rela = true;
         }
-        if (symbol.extra(elf_file)) |extra| {
-            var new_extra = extra;
-            new_extra.zig_got = index;
-            symbol.setExtra(new_extra, elf_file);
-        } else try symbol.addExtra(.{ .zig_got = index }, elf_file);
+        try symbol.addExtra(.{ .zig_got = index }, elf_file);
         return index;
     }
 
@@ -274,11 +270,11 @@ pub const ZigGotSection = struct {
         return shdr.sh_offset + @as(u64, entry_size) * index;
     }
 
-    pub fn entryAddress(zig_got: ZigGotSection, index: Index, elf_file: *Elf) u64 {
+    pub fn entryAddress(zig_got: ZigGotSection, index: Index, elf_file: *Elf) i64 {
         _ = zig_got;
         const entry_size = elf_file.archPtrWidthBytes();
         const shdr = elf_file.shdrs.items[elf_file.zig_got_section_index.?];
-        return shdr.sh_addr + @as(u64, entry_size) * index;
+        return @as(i64, @intCast(shdr.sh_addr)) + entry_size * index;
     }
 
     pub fn size(zig_got: ZigGotSection, elf_file: *Elf) usize {
@@ -295,23 +291,23 @@ pub const ZigGotSection = struct {
         const target = elf_file.getTarget();
         const endian = target.cpu.arch.endian();
         const off = zig_got.entryOffset(index, elf_file);
-        const vaddr = zig_got.entryAddress(index, elf_file);
+        const vaddr: u64 = @intCast(zig_got.entryAddress(index, elf_file));
         const entry = zig_got.entries.items[index];
         const value = elf_file.symbol(entry).address(.{}, elf_file);
         switch (entry_size) {
             2 => {
                 var buf: [2]u8 = undefined;
-                std.mem.writeInt(u16, &buf, @as(u16, @intCast(value)), endian);
+                std.mem.writeInt(u16, &buf, @intCast(value), endian);
                 try elf_file.base.file.?.pwriteAll(&buf, off);
             },
             4 => {
                 var buf: [4]u8 = undefined;
-                std.mem.writeInt(u32, &buf, @as(u32, @intCast(value)), endian);
+                std.mem.writeInt(u32, &buf, @intCast(value), endian);
                 try elf_file.base.file.?.pwriteAll(&buf, off);
             },
             8 => {
                 var buf: [8]u8 = undefined;
-                std.mem.writeInt(u64, &buf, value, endian);
+                std.mem.writeInt(u64, &buf, @intCast(value), endian);
                 try elf_file.base.file.?.pwriteAll(&buf, off);
 
                 if (elf_file.base.child_pid) |pid| {
@@ -360,9 +356,9 @@ pub const ZigGotSection = struct {
             const symbol = elf_file.symbol(entry);
             const offset = symbol.zigGotAddress(elf_file);
             elf_file.addRelaDynAssumeCapacity(.{
-                .offset = offset,
+                .offset = @intCast(offset),
                 .type = relocation.encode(.rel, cpu_arch),
-                .addend = @intCast(symbol.address(.{ .plt = false }, elf_file)),
+                .addend = symbol.address(.{ .plt = false }, elf_file),
             });
         }
     }
@@ -390,7 +386,7 @@ pub const ZigGotSection = struct {
                 .st_info = elf.STT_OBJECT,
                 .st_other = 0,
                 .st_shndx = @intCast(elf_file.zig_got_section_index.?),
-                .st_value = st_value,
+                .st_value = @intCast(st_value),
                 .st_size = st_size,
             };
         }
@@ -461,10 +457,10 @@ pub const GotSection = struct {
             };
         }
 
-        pub fn address(entry: Entry, elf_file: *Elf) u64 {
-            const ptr_bytes = @as(u64, elf_file.archPtrWidthBytes());
+        pub fn address(entry: Entry, elf_file: *Elf) i64 {
+            const ptr_bytes = elf_file.archPtrWidthBytes();
             const shdr = &elf_file.shdrs.items[elf_file.got_section_index.?];
-            return shdr.sh_addr + @as(u64, entry.cell_index) * ptr_bytes;
+            return @as(i64, @intCast(shdr.sh_addr)) + entry.cell_index * ptr_bytes;
         }
     };
 
@@ -499,11 +495,7 @@ pub const GotSection = struct {
         {
             got.flags.needs_rela = true;
         }
-        if (symbol.extra(elf_file)) |extra| {
-            var new_extra = extra;
-            new_extra.got = index;
-            symbol.setExtra(new_extra, elf_file);
-        } else try symbol.addExtra(.{ .got = index }, elf_file);
+        try symbol.addExtra(.{ .got = index }, elf_file);
         return index;
     }
 
@@ -529,11 +521,7 @@ pub const GotSection = struct {
         const symbol = elf_file.symbol(sym_index);
         symbol.flags.has_tlsgd = true;
         if (symbol.flags.import or elf_file.isEffectivelyDynLib()) got.flags.needs_rela = true;
-        if (symbol.extra(elf_file)) |extra| {
-            var new_extra = extra;
-            new_extra.tlsgd = index;
-            symbol.setExtra(new_extra, elf_file);
-        } else try symbol.addExtra(.{ .tlsgd = index }, elf_file);
+        try symbol.addExtra(.{ .tlsgd = index }, elf_file);
     }
 
     pub fn addGotTpSymbol(got: *GotSection, sym_index: Symbol.Index, elf_file: *Elf) !void {
@@ -546,11 +534,7 @@ pub const GotSection = struct {
         const symbol = elf_file.symbol(sym_index);
         symbol.flags.has_gottp = true;
         if (symbol.flags.import or elf_file.isEffectivelyDynLib()) got.flags.needs_rela = true;
-        if (symbol.extra(elf_file)) |extra| {
-            var new_extra = extra;
-            new_extra.gottp = index;
-            symbol.setExtra(new_extra, elf_file);
-        } else try symbol.addExtra(.{ .gottp = index }, elf_file);
+        try symbol.addExtra(.{ .gottp = index }, elf_file);
     }
 
     pub fn addTlsDescSymbol(got: *GotSection, sym_index: Symbol.Index, elf_file: *Elf) !void {
@@ -563,11 +547,7 @@ pub const GotSection = struct {
         const symbol = elf_file.symbol(sym_index);
         symbol.flags.has_tlsdesc = true;
         got.flags.needs_rela = true;
-        if (symbol.extra(elf_file)) |extra| {
-            var new_extra = extra;
-            new_extra.tlsdesc = index;
-            symbol.setExtra(new_extra, elf_file);
-        } else try symbol.addExtra(.{ .tlsdesc = index }, elf_file);
+        try symbol.addExtra(.{ .tlsdesc = index }, elf_file);
     }
 
     pub fn size(got: GotSection, elf_file: *Elf) usize {
@@ -628,8 +608,7 @@ pub const GotSection = struct {
                             0;
                         try writeInt(offset, elf_file, writer);
                     } else {
-                        const offset = @as(i64, @intCast(symbol.?.address(.{}, elf_file))) -
-                            @as(i64, @intCast(elf_file.tpAddress()));
+                        const offset = symbol.?.address(.{}, elf_file) - elf_file.tpAddress();
                         try writeInt(offset, elf_file, writer);
                     }
                 },
@@ -640,7 +619,7 @@ pub const GotSection = struct {
                     } else {
                         try writeInt(0, elf_file, writer);
                         const offset = if (apply_relocs)
-                            @as(i64, @intCast(symbol.?.address(.{}, elf_file))) - @as(i64, @intCast(elf_file.tlsAddress()))
+                            symbol.?.address(.{}, elf_file) - elf_file.tlsAddress()
                         else
                             0;
                         try writeInt(offset, elf_file, writer);
@@ -666,7 +645,7 @@ pub const GotSection = struct {
 
             switch (entry.tag) {
                 .got => {
-                    const offset = symbol.?.gotAddress(elf_file);
+                    const offset: u64 = @intCast(symbol.?.gotAddress(elf_file));
                     if (symbol.?.flags.import) {
                         elf_file.addRelaDynAssumeCapacity(.{
                             .offset = offset,
@@ -679,7 +658,7 @@ pub const GotSection = struct {
                         elf_file.addRelaDynAssumeCapacity(.{
                             .offset = offset,
                             .type = relocation.encode(.irel, cpu_arch),
-                            .addend = @intCast(symbol.?.address(.{ .plt = false }, elf_file)),
+                            .addend = symbol.?.address(.{ .plt = false }, elf_file),
                         });
                         continue;
                     }
@@ -689,14 +668,14 @@ pub const GotSection = struct {
                         elf_file.addRelaDynAssumeCapacity(.{
                             .offset = offset,
                             .type = relocation.encode(.rel, cpu_arch),
-                            .addend = @intCast(symbol.?.address(.{ .plt = false }, elf_file)),
+                            .addend = symbol.?.address(.{ .plt = false }, elf_file),
                         });
                     }
                 },
 
                 .tlsld => {
                     if (is_dyn_lib) {
-                        const offset = entry.address(elf_file);
+                        const offset: u64 = @intCast(entry.address(elf_file));
                         elf_file.addRelaDynAssumeCapacity(.{
                             .offset = offset,
                             .type = relocation.encode(.dtpmod, cpu_arch),
@@ -705,7 +684,7 @@ pub const GotSection = struct {
                 },
 
                 .tlsgd => {
-                    const offset = symbol.?.tlsGdAddress(elf_file);
+                    const offset: u64 = @intCast(symbol.?.tlsGdAddress(elf_file));
                     if (symbol.?.flags.import) {
                         elf_file.addRelaDynAssumeCapacity(.{
                             .offset = offset,
@@ -727,7 +706,7 @@ pub const GotSection = struct {
                 },
 
                 .gottp => {
-                    const offset = symbol.?.gotTpAddress(elf_file);
+                    const offset: u64 = @intCast(symbol.?.gotTpAddress(elf_file));
                     if (symbol.?.flags.import) {
                         elf_file.addRelaDynAssumeCapacity(.{
                             .offset = offset,
@@ -738,18 +717,18 @@ pub const GotSection = struct {
                         elf_file.addRelaDynAssumeCapacity(.{
                             .offset = offset,
                             .type = relocation.encode(.tpoff, cpu_arch),
-                            .addend = @intCast(symbol.?.address(.{}, elf_file) - elf_file.tlsAddress()),
+                            .addend = symbol.?.address(.{}, elf_file) - elf_file.tlsAddress(),
                         });
                     }
                 },
 
                 .tlsdesc => {
-                    const offset = symbol.?.tlsDescAddress(elf_file);
+                    const offset: u64 = @intCast(symbol.?.tlsDescAddress(elf_file));
                     elf_file.addRelaDynAssumeCapacity(.{
                         .offset = offset,
                         .sym = if (symbol.?.flags.import) extra.?.dynamic else 0,
                         .type = relocation.encode(.tlsdesc, cpu_arch),
-                        .addend = if (symbol.?.flags.import) 0 else @intCast(symbol.?.address(.{}, elf_file) - elf_file.tlsAddress()),
+                        .addend = if (symbol.?.flags.import) 0 else symbol.?.address(.{}, elf_file) - elf_file.tlsAddress(),
                     });
                 },
             }
@@ -826,7 +805,7 @@ pub const GotSection = struct {
                 .st_info = elf.STT_OBJECT,
                 .st_other = 0,
                 .st_shndx = @intCast(elf_file.got_section_index.?),
-                .st_value = st_value,
+                .st_value = @intCast(st_value),
                 .st_size = st_size,
             };
         }
@@ -877,11 +856,7 @@ pub const PltSection = struct {
         const index = @as(u32, @intCast(plt.symbols.items.len));
         const symbol = elf_file.symbol(sym_index);
         symbol.flags.has_plt = true;
-        if (symbol.extra(elf_file)) |extra| {
-            var new_extra = extra;
-            new_extra.plt = index;
-            symbol.setExtra(new_extra, elf_file);
-        } else try symbol.addExtra(.{ .plt = index }, elf_file);
+        try symbol.addExtra(.{ .plt = index }, elf_file);
         try plt.symbols.append(gpa, sym_index);
     }
 
@@ -924,7 +899,7 @@ pub const PltSection = struct {
             const sym = elf_file.symbol(sym_index);
             assert(sym.flags.import);
             const extra = sym.extra(elf_file).?;
-            const r_offset = sym.gotPltAddress(elf_file);
+            const r_offset: u64 = @intCast(sym.gotPltAddress(elf_file));
             const r_sym: u64 = extra.dynamic;
             const r_type = relocation.encode(.jump_slot, cpu_arch);
             elf_file.rela_plt.appendAssumeCapacity(.{
@@ -960,7 +935,7 @@ pub const PltSection = struct {
                 .st_info = elf.STT_FUNC,
                 .st_other = 0,
                 .st_shndx = @intCast(elf_file.plt_section_index.?),
-                .st_value = sym.pltAddress(elf_file),
+                .st_value = @intCast(sym.pltAddress(elf_file)),
                 .st_size = entrySize(cpu_arch),
             };
         }
@@ -1033,13 +1008,13 @@ pub const PltSection = struct {
     const aarch64 = struct {
         fn write(plt: PltSection, elf_file: *Elf, writer: anytype) !void {
             {
-                const plt_addr = elf_file.shdrs.items[elf_file.plt_section_index.?].sh_addr;
-                const got_plt_addr = elf_file.shdrs.items[elf_file.got_plt_section_index.?].sh_addr;
+                const plt_addr: i64 = @intCast(elf_file.shdrs.items[elf_file.plt_section_index.?].sh_addr);
+                const got_plt_addr: i64 = @intCast(elf_file.shdrs.items[elf_file.got_plt_section_index.?].sh_addr);
                 // TODO: relax if possible
                 // .got.plt[2]
                 const pages = try aarch64_util.calcNumberOfPages(plt_addr + 4, got_plt_addr + 16);
-                const ldr_off = try math.divExact(u12, @truncate(got_plt_addr + 16), 8);
-                const add_off: u12 = @truncate(got_plt_addr + 16);
+                const ldr_off = try math.divExact(u12, @truncate(@as(u64, @bitCast(got_plt_addr + 16))), 8);
+                const add_off: u12 = @truncate(@as(u64, @bitCast(got_plt_addr + 16)));
 
                 const preamble = &[_]Instruction{
                     Instruction.stp(
@@ -1067,8 +1042,8 @@ pub const PltSection = struct {
                 const target_addr = sym.gotPltAddress(elf_file);
                 const source_addr = sym.pltAddress(elf_file);
                 const pages = try aarch64_util.calcNumberOfPages(source_addr, target_addr);
-                const ldr_off = try math.divExact(u12, @truncate(target_addr), 8);
-                const add_off: u12 = @truncate(target_addr);
+                const ldr_off = try math.divExact(u12, @truncate(@as(u64, @bitCast(target_addr))), 8);
+                const add_off: u12 = @truncate(@as(u64, @bitCast(target_addr)));
                 const insts = &[_]Instruction{
                     Instruction.adrp(.x16, pages),
                     Instruction.ldr(.x17, .x16, Instruction.LoadStoreOffset.imm(ldr_off)),
@@ -1101,7 +1076,7 @@ pub const GotPltSection = struct {
         {
             // [0]: _DYNAMIC
             const symbol = elf_file.symbol(elf_file.dynamic_index.?);
-            try writer.writeInt(u64, symbol.address(.{}, elf_file), .little);
+            try writer.writeInt(u64, @intCast(symbol.address(.{}, elf_file)), .little);
         }
         // [1]: 0x0
         // [2]: 0x0
@@ -1132,11 +1107,7 @@ pub const PltGotSection = struct {
         const symbol = elf_file.symbol(sym_index);
         symbol.flags.has_plt = true;
         symbol.flags.has_got = true;
-        if (symbol.extra(elf_file)) |extra| {
-            var new_extra = extra;
-            new_extra.plt_got = index;
-            symbol.setExtra(new_extra, elf_file);
-        } else try symbol.addExtra(.{ .plt_got = index }, elf_file);
+        try symbol.addExtra(.{ .plt_got = index }, elf_file);
         try plt_got.symbols.append(gpa, sym_index);
     }
 
@@ -1181,7 +1152,7 @@ pub const PltGotSection = struct {
                 .st_info = elf.STT_FUNC,
                 .st_other = 0,
                 .st_shndx = @intCast(elf_file.plt_got_section_index.?),
-                .st_value = sym.pltGotAddress(elf_file),
+                .st_value = @intCast(sym.pltGotAddress(elf_file)),
                 .st_size = 16,
             };
         }
@@ -1212,7 +1183,7 @@ pub const PltGotSection = struct {
                 const target_addr = sym.gotAddress(elf_file);
                 const source_addr = sym.pltGotAddress(elf_file);
                 const pages = try aarch64_util.calcNumberOfPages(source_addr, target_addr);
-                const off = try math.divExact(u12, @truncate(target_addr), 8);
+                const off = try math.divExact(u12, @truncate(@as(u64, @bitCast(target_addr))), 8);
                 const insts = &[_]Instruction{
                     Instruction.adrp(.x16, pages),
                     Instruction.ldr(.x17, .x16, Instruction.LoadStoreOffset.imm(off)),
@@ -1248,12 +1219,7 @@ pub const CopyRelSection = struct {
         symbol.flags.@"export" = true;
         symbol.flags.has_copy_rel = true;
         symbol.flags.weak = false;
-
-        if (symbol.extra(elf_file)) |extra| {
-            var new_extra = extra;
-            new_extra.copy_rel = index;
-            symbol.setExtra(new_extra, elf_file);
-        } else try symbol.addExtra(.{ .copy_rel = index }, elf_file);
+        try symbol.addExtra(.{ .copy_rel = index }, elf_file);
         try copy_rel.symbols.append(gpa, sym_index);
 
         const shared_object = symbol.file(elf_file).?.shared_object;
@@ -1280,9 +1246,9 @@ pub const CopyRelSection = struct {
             const symbol = elf_file.symbol(sym_index);
             const shared_object = symbol.file(elf_file).?.shared_object;
             const alignment = try symbol.dsoAlignment(elf_file);
-            symbol.value = mem.alignForward(u64, shdr.sh_size, alignment);
+            symbol.value = @intCast(mem.alignForward(u64, shdr.sh_size, alignment));
             shdr.sh_addralign = @max(shdr.sh_addralign, alignment);
-            shdr.sh_size = symbol.value + symbol.elfSym(elf_file).st_size;
+            shdr.sh_size = @as(u64, @intCast(symbol.value)) + symbol.elfSym(elf_file).st_size;
 
             const aliases = shared_object.symbolAliases(sym_index, elf_file);
             for (aliases) |alias| {
@@ -1303,7 +1269,7 @@ pub const CopyRelSection = struct {
             assert(sym.flags.import and sym.flags.has_copy_rel);
             const extra = sym.extra(elf_file).?;
             elf_file.addRelaDynAssumeCapacity(.{
-                .offset = sym.address(.{}, elf_file),
+                .offset = @intCast(sym.address(.{}, elf_file)),
                 .sym = extra.dynamic,
                 .type = relocation.encode(.copy, cpu_arch),
             });
@@ -1335,11 +1301,7 @@ pub const DynsymSection = struct {
         const index = @as(u32, @intCast(dynsym.entries.items.len + 1));
         const sym = elf_file.symbol(sym_index);
         sym.flags.has_dynamic = true;
-        if (sym.extra(elf_file)) |extra| {
-            var new_extra = extra;
-            new_extra.dynamic = index;
-            sym.setExtra(new_extra, elf_file);
-        } else try sym.addExtra(.{ .dynamic = index }, elf_file);
+        try sym.addExtra(.{ .dynamic = index }, elf_file);
         const off = try elf_file.insertDynString(sym.name(elf_file));
         try dynsym.entries.append(gpa, .{ .symbol_index = sym_index, .off = off });
     }
src/link/Elf/thunks.zig
@@ -7,7 +7,7 @@ pub fn createThunks(shndx: u32, elf_file: *Elf) !void {
     assert(atoms.len > 0);
 
     for (atoms) |atom_index| {
-        elf_file.atom(atom_index).?.value = @bitCast(@as(i64, -1));
+        elf_file.atom(atom_index).?.value = -1;
     }
 
     var i: usize = 0;
@@ -22,7 +22,8 @@ pub fn createThunks(shndx: u32, elf_file: *Elf) !void {
             const atom_index = atoms[i];
             const atom = elf_file.atom(atom_index).?;
             assert(atom.flags.alive);
-            if (atom.alignment.forward(shdr.sh_size) - start_atom.value >= max_distance) break;
+            if (@as(i64, @intCast(atom.alignment.forward(shdr.sh_size))) - start_atom.value >= max_distance)
+                break;
             atom.value = try advance(shdr, atom.size, atom.alignment);
         }
 
@@ -50,7 +51,8 @@ pub fn createThunks(shndx: u32, elf_file: *Elf) !void {
                 };
                 try thunk.symbols.put(gpa, target, {});
             }
-            atom.thunk_index = thunk_index;
+            try atom.addExtra(.{ .thunk = thunk_index }, elf_file);
+            atom.flags.thunk = true;
         }
 
         thunk.value = try advance(shdr, thunk.size(elf_file), Atom.Alignment.fromNonzeroByteUnits(2));
@@ -59,12 +61,12 @@ pub fn createThunks(shndx: u32, elf_file: *Elf) !void {
     }
 }
 
-fn advance(shdr: *elf.Elf64_Shdr, size: u64, alignment: Atom.Alignment) !u64 {
+fn advance(shdr: *elf.Elf64_Shdr, size: u64, alignment: Atom.Alignment) !i64 {
     const offset = alignment.forward(shdr.sh_size);
     const padding = offset - shdr.sh_size;
     shdr.sh_size += padding + size;
     shdr.sh_addralign = @max(shdr.sh_addralign, alignment.toByteUnits() orelse 1);
-    return offset;
+    return @intCast(offset);
 }
 
 /// A branch will need an extender if its target is larger than
@@ -78,7 +80,7 @@ fn maxAllowedDistance(cpu_arch: std.Target.Cpu.Arch) u32 {
 }
 
 pub const Thunk = struct {
-    value: u64 = 0,
+    value: i64 = 0,
     output_section_index: u32 = 0,
     symbols: std.AutoArrayHashMapUnmanaged(Symbol.Index, void) = .{},
     output_symtab_ctx: Elf.SymtabCtx = .{},
@@ -92,14 +94,14 @@ pub const Thunk = struct {
         return thunk.symbols.keys().len * trampolineSize(cpu_arch);
     }
 
-    pub fn address(thunk: Thunk, elf_file: *Elf) u64 {
+    pub fn address(thunk: Thunk, elf_file: *Elf) i64 {
         const shdr = elf_file.shdrs.items[thunk.output_section_index];
-        return shdr.sh_addr + thunk.value;
+        return @as(i64, @intCast(shdr.sh_addr)) + thunk.value;
     }
 
-    pub fn targetAddress(thunk: Thunk, sym_index: Symbol.Index, elf_file: *Elf) u64 {
+    pub fn targetAddress(thunk: Thunk, sym_index: Symbol.Index, elf_file: *Elf) i64 {
         const cpu_arch = elf_file.getTarget().cpu.arch;
-        return thunk.address(elf_file) + thunk.symbols.getIndex(sym_index).? * trampolineSize(cpu_arch);
+        return thunk.address(elf_file) + @as(i64, @intCast(thunk.symbols.getIndex(sym_index).? * trampolineSize(cpu_arch)));
     }
 
     pub fn write(thunk: Thunk, elf_file: *Elf, writer: anytype) !void {
@@ -131,7 +133,7 @@ pub const Thunk = struct {
                 .st_info = elf.STT_FUNC,
                 .st_other = 0,
                 .st_shndx = @intCast(thunk.output_section_index),
-                .st_value = thunk.targetAddress(sym_index, elf_file),
+                .st_value = @intCast(thunk.targetAddress(sym_index, elf_file)),
                 .st_size = trampolineSize(cpu_arch),
             };
         }
@@ -204,9 +206,9 @@ const aarch64 = struct {
         if (target.flags.has_plt) return false;
         if (atom.output_section_index != target.output_section_index) return false;
         const target_atom = target.atom(elf_file).?;
-        if (target_atom.value == @as(u64, @bitCast(@as(i64, -1)))) return false;
-        const saddr = @as(i64, @intCast(atom.address(elf_file) + rel.r_offset));
-        const taddr: i64 = @intCast(target.address(.{}, elf_file));
+        if (target_atom.value == -1) return false;
+        const saddr = atom.address(elf_file) + @as(i64, @intCast(rel.r_offset));
+        const taddr = target.address(.{}, elf_file);
         _ = math.cast(i28, taddr + rel.r_addend - saddr) orelse return false;
         return true;
     }
@@ -214,11 +216,11 @@ const aarch64 = struct {
     fn write(thunk: Thunk, elf_file: *Elf, writer: anytype) !void {
         for (thunk.symbols.keys(), 0..) |sym_index, i| {
             const sym = elf_file.symbol(sym_index);
-            const saddr = thunk.address(elf_file) + i * trampoline_size;
+            const saddr = thunk.address(elf_file) + @as(i64, @intCast(i * trampoline_size));
             const taddr = sym.address(.{}, elf_file);
             const pages = try util.calcNumberOfPages(saddr, taddr);
             try writer.writeInt(u32, Instruction.adrp(.x16, pages).toU32(), .little);
-            const off: u12 = @truncate(taddr);
+            const off: u12 = @truncate(@as(u64, @bitCast(taddr)));
             try writer.writeInt(u32, Instruction.add(.x16, .x16, off, false).toU32(), .little);
             try writer.writeInt(u32, Instruction.br(.x16).toU32(), .little);
         }
src/link/Elf/ZigObject.zig
@@ -343,7 +343,7 @@ pub fn resolveSymbols(self: *ZigObject, elf_file: *Elf) void {
                 atom.outputShndx().?
             else
                 elf.SHN_UNDEF;
-            global.value = esym.st_value;
+            global.value = @intCast(esym.st_value);
             global.atom_index = atom_index;
             global.esym_index = esym_index;
             global.file_index = self.index;
@@ -566,7 +566,7 @@ pub fn updateSymtabSize(self: *ZigObject, elf_file: *Elf) !void {
             else => {},
         }
         local.flags.output_symtab = true;
-        try local.setOutputSymtabIndex(self.output_symtab_ctx.nlocals, elf_file);
+        try local.addExtra(.{ .symtab = self.output_symtab_ctx.nlocals }, elf_file);
         self.output_symtab_ctx.nlocals += 1;
         self.output_symtab_ctx.strsize += @as(u32, @intCast(local.name(elf_file).len)) + 1;
     }
@@ -578,10 +578,10 @@ pub fn updateSymtabSize(self: *ZigObject, elf_file: *Elf) !void {
         if (global.atom(elf_file)) |atom| if (!atom.flags.alive) continue;
         global.flags.output_symtab = true;
         if (global.isLocal(elf_file)) {
-            try global.setOutputSymtabIndex(self.output_symtab_ctx.nlocals, elf_file);
+            try global.addExtra(.{ .symtab = self.output_symtab_ctx.nlocals }, elf_file);
             self.output_symtab_ctx.nlocals += 1;
         } else {
-            try global.setOutputSymtabIndex(self.output_symtab_ctx.nglobals, elf_file);
+            try global.addExtra(.{ .symtab = self.output_symtab_ctx.nglobals }, elf_file);
             self.output_symtab_ctx.nglobals += 1;
         }
         self.output_symtab_ctx.strsize += @as(u32, @intCast(global.name(elf_file).len)) + 1;
@@ -631,7 +631,7 @@ pub fn codeAlloc(self: ZigObject, elf_file: *Elf, atom_index: Atom.Index) ![]u8
         return code;
     }
 
-    const file_offset = shdr.sh_offset + atom.value;
+    const file_offset = shdr.sh_offset + @as(u64, @intCast(atom.value));
     const size = std.math.cast(usize, atom.size) orelse return error.Overflow;
     const code = try gpa.alloc(u8, size);
     errdefer gpa.free(code);
@@ -659,7 +659,7 @@ pub fn getDeclVAddr(
         .r_info = (@as(u64, @intCast(this_sym.esym_index)) << 32) | r_type,
         .r_addend = reloc_info.addend,
     });
-    return vaddr;
+    return @intCast(vaddr);
 }
 
 pub fn getAnonDeclVAddr(
@@ -678,7 +678,7 @@ pub fn getAnonDeclVAddr(
         .r_info = (@as(u64, @intCast(sym.esym_index)) << 32) | r_type,
         .r_addend = reloc_info.addend,
     });
-    return vaddr;
+    return @intCast(vaddr);
 }
 
 pub fn lowerAnonDecl(
@@ -929,7 +929,7 @@ fn updateDeclCode(
 
     if (old_size > 0 and elf_file.base.child_pid == null) {
         const capacity = atom_ptr.capacity(elf_file);
-        const need_realloc = code.len > capacity or !required_alignment.check(atom_ptr.value);
+        const need_realloc = code.len > capacity or !required_alignment.check(@intCast(atom_ptr.value));
         if (need_realloc) {
             try atom_ptr.grow(elf_file);
             log.debug("growing {} from 0x{x} to 0x{x}", .{ decl_name.fmt(&mod.intern_pool), old_vaddr, atom_ptr.value });
@@ -984,7 +984,7 @@ fn updateDeclCode(
 
     const shdr = elf_file.shdrs.items[shdr_index];
     if (shdr.sh_type != elf.SHT_NOBITS) {
-        const file_offset = shdr.sh_offset + atom_ptr.value;
+        const file_offset = shdr.sh_offset + @as(u64, @intCast(atom_ptr.value));
         try elf_file.base.file.?.pwriteAll(code, file_offset);
     }
 }
@@ -1107,7 +1107,7 @@ pub fn updateFunc(
         try self.dwarf.?.commitDeclState(
             mod,
             decl_index,
-            sym.address(.{}, elf_file),
+            @intCast(sym.address(.{}, elf_file)),
             sym.atom(elf_file).?.size,
             ds,
         );
@@ -1186,7 +1186,7 @@ pub fn updateDecl(
         try self.dwarf.?.commitDeclState(
             mod,
             decl_index,
-            sym.address(.{}, elf_file),
+            @intCast(sym.address(.{}, elf_file)),
             sym.atom(elf_file).?.size,
             ds,
         );
@@ -1275,7 +1275,7 @@ fn updateLazySymbol(
     }
 
     const shdr = elf_file.shdrs.items[output_section_index];
-    const file_offset = shdr.sh_offset + atom_ptr.value;
+    const file_offset = shdr.sh_offset + @as(u64, @intCast(atom_ptr.value));
     try elf_file.base.file.?.pwriteAll(code, file_offset);
 }
 
@@ -1373,7 +1373,7 @@ fn lowerConst(
     local_esym.st_value = 0;
 
     const shdr = elf_file.shdrs.items[output_section_index];
-    const file_offset = shdr.sh_offset + atom_ptr.value;
+    const file_offset = shdr.sh_offset + @as(u64, @intCast(atom_ptr.value));
     try elf_file.base.file.?.pwriteAll(code, file_offset);
 
     return .{ .ok = sym_index };
@@ -1457,7 +1457,7 @@ pub fn updateExports(
 
         const actual_esym_index = global_esym_index & symbol_mask;
         const global_esym = &self.global_esyms.items(.elf_sym)[actual_esym_index];
-        global_esym.st_value = elf_file.symbol(sym_index).value;
+        global_esym.st_value = @intCast(elf_file.symbol(sym_index).value);
         global_esym.st_shndx = esym.st_shndx;
         global_esym.st_info = (stb_bits << 4) | stt_bits;
         global_esym.st_name = name_off;
src/link/MachO/Atom.zig
@@ -770,7 +770,7 @@ fn resolveRelocInner(
                 };
                 break :target math.cast(u64, target) orelse return error.Overflow;
             };
-            const pages = @as(u21, @bitCast(try aarch64.calcNumberOfPages(source, target)));
+            const pages = @as(u21, @bitCast(try aarch64.calcNumberOfPages(@intCast(source), @intCast(target))));
             aarch64.writeAdrpInst(pages, code[rel_offset..][0..4]);
         },
 
src/link/MachO/synthetic.zig
@@ -267,7 +267,7 @@ pub const StubsSection = struct {
                 },
                 .aarch64 => {
                     // TODO relax if possible
-                    const pages = try aarch64.calcNumberOfPages(source, target);
+                    const pages = try aarch64.calcNumberOfPages(@intCast(source), @intCast(target));
                     try writer.writeInt(u32, aarch64.Instruction.adrp(.x16, pages).toU32(), .little);
                     const off = try math.divExact(u12, @truncate(target), 8);
                     try writer.writeInt(
@@ -411,7 +411,7 @@ pub const StubsHelperSection = struct {
             .aarch64 => {
                 {
                     // TODO relax if possible
-                    const pages = try aarch64.calcNumberOfPages(sect.addr, dyld_private_addr);
+                    const pages = try aarch64.calcNumberOfPages(@intCast(sect.addr), @intCast(dyld_private_addr));
                     try writer.writeInt(u32, aarch64.Instruction.adrp(.x17, pages).toU32(), .little);
                     const off: u12 = @truncate(dyld_private_addr);
                     try writer.writeInt(u32, aarch64.Instruction.add(.x17, .x17, off, false).toU32(), .little);
@@ -424,7 +424,7 @@ pub const StubsHelperSection = struct {
                 ).toU32(), .little);
                 {
                     // TODO relax if possible
-                    const pages = try aarch64.calcNumberOfPages(sect.addr + 12, dyld_stub_binder_addr);
+                    const pages = try aarch64.calcNumberOfPages(@intCast(sect.addr + 12), @intCast(dyld_stub_binder_addr));
                     try writer.writeInt(u32, aarch64.Instruction.adrp(.x16, pages).toU32(), .little);
                     const off = try math.divExact(u12, @truncate(dyld_stub_binder_addr), 8);
                     try writer.writeInt(u32, aarch64.Instruction.ldr(
@@ -679,7 +679,7 @@ pub const ObjcStubsSection = struct {
                     {
                         const target = sym.getObjcSelrefsAddress(macho_file);
                         const source = addr;
-                        const pages = try aarch64.calcNumberOfPages(source, target);
+                        const pages = try aarch64.calcNumberOfPages(@intCast(source), @intCast(target));
                         try writer.writeInt(u32, aarch64.Instruction.adrp(.x1, pages).toU32(), .little);
                         const off = try math.divExact(u12, @truncate(target), 8);
                         try writer.writeInt(
@@ -692,7 +692,7 @@ pub const ObjcStubsSection = struct {
                         const target_sym = macho_file.getSymbol(macho_file.objc_msg_send_index.?);
                         const target = target_sym.getGotAddress(macho_file);
                         const source = addr + 2 * @sizeOf(u32);
-                        const pages = try aarch64.calcNumberOfPages(source, target);
+                        const pages = try aarch64.calcNumberOfPages(@intCast(source), @intCast(target));
                         try writer.writeInt(u32, aarch64.Instruction.adrp(.x16, pages).toU32(), .little);
                         const off = try math.divExact(u12, @truncate(target), 8);
                         try writer.writeInt(
src/link/MachO/thunks.zig
@@ -99,7 +99,7 @@ pub const Thunk = struct {
             const sym = macho_file.getSymbol(sym_index);
             const saddr = thunk.getAddress(macho_file) + i * trampoline_size;
             const taddr = sym.getAddress(.{}, macho_file);
-            const pages = try aarch64.calcNumberOfPages(saddr, taddr);
+            const pages = try aarch64.calcNumberOfPages(@intCast(saddr), @intCast(taddr));
             try writer.writeInt(u32, aarch64.Instruction.adrp(.x16, pages).toU32(), .little);
             const off: u12 = @truncate(taddr);
             try writer.writeInt(u32, aarch64.Instruction.add(.x16, .x16, off, false).toU32(), .little);
src/link/aarch64.zig
@@ -25,7 +25,7 @@ pub fn writeLoadStoreRegInst(value: u12, code: *[4]u8) void {
     mem.writeInt(u32, code, inst.toU32(), .little);
 }
 
-pub fn calcNumberOfPages(saddr: u64, taddr: u64) error{Overflow}!i21 {
+pub fn calcNumberOfPages(saddr: i64, taddr: i64) error{Overflow}!i21 {
     const spage = math.cast(i32, saddr >> 12) orelse return error.Overflow;
     const tpage = math.cast(i32, taddr >> 12) orelse return error.Overflow;
     const pages = math.cast(i21, tpage - spage) orelse return error.Overflow;
src/link/Elf.zig
@@ -205,10 +205,19 @@ num_ifunc_dynrelocs: usize = 0,
 
 /// List of atoms that are owned directly by the linker.
 atoms: std.ArrayListUnmanaged(Atom) = .{},
+atoms_extra: std.ArrayListUnmanaged(u32) = .{},
 
 /// List of range extension thunks.
 thunks: std.ArrayListUnmanaged(Thunk) = .{},
 
+/// List of output merge sections with deduped contents.
+merge_sections: std.ArrayListUnmanaged(MergeSection) = .{},
+/// List of output merge subsections.
+/// Each subsection is akin to Atom but belongs to a MergeSection.
+merge_subsections: std.ArrayListUnmanaged(MergeSubsection) = .{},
+/// List of input merge sections as parsed from input relocatables.
+merge_input_sections: std.ArrayListUnmanaged(InputMergeSection) = .{},
+
 /// Table of last atom index in a section and matching atom free list if any.
 last_atom_and_free_list_table: LastAtomAndFreeListTable = .{},
 
@@ -369,6 +378,7 @@ pub fn createEmpty(
     try self.symbols_extra.append(gpa, 0);
     // Allocate atom index 0 to null atom
     try self.atoms.append(gpa, .{});
+    try self.atoms_extra.append(gpa, 0);
     // Append null file at index 0
     try self.files.append(gpa, .null);
     // Append null byte to string tables
@@ -378,6 +388,8 @@ pub fn createEmpty(
     _ = try self.addSection(.{ .name = "" });
     // Append null symbol in output symtab
     try self.symtab.append(gpa, null_sym);
+    // Append null input merge section.
+    try self.merge_input_sections.append(gpa, .{});
 
     if (!is_obj_or_ar) {
         try self.dynstrtab.append(gpa, 0);
@@ -491,7 +503,20 @@ pub fn deinit(self: *Elf) void {
     self.start_stop_indexes.deinit(gpa);
 
     self.atoms.deinit(gpa);
+    self.atoms_extra.deinit(gpa);
+    for (self.thunks.items) |*th| {
+        th.deinit(gpa);
+    }
     self.thunks.deinit(gpa);
+    for (self.merge_sections.items) |*sect| {
+        sect.deinit(gpa);
+    }
+    self.merge_sections.deinit(gpa);
+    self.merge_subsections.deinit(gpa);
+    for (self.merge_input_sections.items) |*sect| {
+        sect.deinit(gpa);
+    }
+    self.merge_input_sections.deinit(gpa);
     for (self.last_atom_and_free_list_table.values()) |*value| {
         value.free_list.deinit(gpa);
     }
@@ -1289,6 +1314,7 @@ pub fn flushModule(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node)
     // symbol for potential resolution at load-time.
     self.resolveSymbols();
     self.markEhFrameAtomsDead();
+    try self.resolveMergeSections();
 
     try self.convertCommonSymbols();
     self.markImportsExports();
@@ -1313,7 +1339,10 @@ pub fn flushModule(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node)
         else => |e| return e,
     };
 
+    try self.addCommentString();
+    try self.finalizeMergeSections();
     try self.initOutputSections();
+    try self.initMergeSections();
     try self.addLinkerDefinedSymbols();
     self.claimUnresolved();
 
@@ -1332,6 +1361,7 @@ pub fn flushModule(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node)
     self.sortDynamicSymtab();
     try self.setHashSections();
     try self.setVersionSymtab();
+    try self.updateMergeSectionSizes();
     try self.updateSectionSizes();
 
     try self.allocatePhdrTable();
@@ -1359,7 +1389,7 @@ pub fn flushModule(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node)
             if (shdr.sh_type == elf.SHT_NOBITS) continue;
             const code = try zig_object.codeAlloc(self, atom_index);
             defer gpa.free(code);
-            const file_offset = shdr.sh_offset + atom_ptr.value;
+            const file_offset = shdr.sh_offset + @as(u64, @intCast(atom_ptr.value));
             atom_ptr.resolveRelocsAlloc(self, code) catch |err| switch (err) {
                 error.RelocFailure, error.RelaxFailure => has_reloc_errors = true,
                 error.UnsupportedCpuArch => {
@@ -1377,6 +1407,7 @@ pub fn flushModule(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node)
     try self.writePhdrTable();
     try self.writeShdrTable();
     try self.writeAtoms();
+    try self.writeMergeSections();
     self.writeSyntheticSections() catch |err| switch (err) {
         error.RelocFailure => return error.FlushFailure,
         error.UnsupportedCpuArch => {
@@ -2946,7 +2977,10 @@ pub fn writeElfHeader(self: *Elf) !void {
     mem.writeInt(u32, hdr_buf[index..][0..4], 1, endian);
     index += 4;
 
-    const e_entry = if (self.entry_index) |entry_index| self.symbol(entry_index).address(.{}, self) else 0;
+    const e_entry = if (self.entry_index) |entry_index|
+        @as(u64, @intCast(self.symbol(entry_index).address(.{}, self)))
+    else
+        0;
     const phdr_table_offset = if (self.phdr_table_index) |phndx| self.phdrs.items[phndx].p_offset else 0;
     switch (self.ptr_width) {
         .p32 => {
@@ -3132,14 +3166,14 @@ fn allocateLinkerDefinedSymbols(self: *Elf) void {
     if (self.dynamic_section_index) |shndx| {
         const shdr = &self.shdrs.items[shndx];
         const symbol_ptr = self.symbol(self.dynamic_index.?);
-        symbol_ptr.value = shdr.sh_addr;
+        symbol_ptr.value = @intCast(shdr.sh_addr);
         symbol_ptr.output_section_index = shndx;
     }
 
     // __ehdr_start
     {
         const symbol_ptr = self.symbol(self.ehdr_start_index.?);
-        symbol_ptr.value = self.image_base;
+        symbol_ptr.value = @intCast(self.image_base);
         symbol_ptr.output_section_index = 1;
     }
 
@@ -3149,9 +3183,9 @@ fn allocateLinkerDefinedSymbols(self: *Elf) void {
         const end_sym = self.symbol(self.init_array_end_index.?);
         const shdr = &self.shdrs.items[shndx];
         start_sym.output_section_index = shndx;
-        start_sym.value = shdr.sh_addr;
+        start_sym.value = @intCast(shdr.sh_addr);
         end_sym.output_section_index = shndx;
-        end_sym.value = shdr.sh_addr + shdr.sh_size;
+        end_sym.value = @intCast(shdr.sh_addr + shdr.sh_size);
     }
 
     // __fini_array_start, __fini_array_end
@@ -3160,9 +3194,9 @@ fn allocateLinkerDefinedSymbols(self: *Elf) void {
         const end_sym = self.symbol(self.fini_array_end_index.?);
         const shdr = &self.shdrs.items[shndx];
         start_sym.output_section_index = shndx;
-        start_sym.value = shdr.sh_addr;
+        start_sym.value = @intCast(shdr.sh_addr);
         end_sym.output_section_index = shndx;
-        end_sym.value = shdr.sh_addr + shdr.sh_size;
+        end_sym.value = @intCast(shdr.sh_addr + shdr.sh_size);
     }
 
     // __preinit_array_start, __preinit_array_end
@@ -3171,9 +3205,9 @@ fn allocateLinkerDefinedSymbols(self: *Elf) void {
         const end_sym = self.symbol(self.preinit_array_end_index.?);
         const shdr = &self.shdrs.items[shndx];
         start_sym.output_section_index = shndx;
-        start_sym.value = shdr.sh_addr;
+        start_sym.value = @intCast(shdr.sh_addr);
         end_sym.output_section_index = shndx;
-        end_sym.value = shdr.sh_addr + shdr.sh_size;
+        end_sym.value = @intCast(shdr.sh_addr + shdr.sh_size);
     }
 
     // _GLOBAL_OFFSET_TABLE_
@@ -3181,14 +3215,14 @@ fn allocateLinkerDefinedSymbols(self: *Elf) void {
         if (self.got_plt_section_index) |shndx| {
             const shdr = self.shdrs.items[shndx];
             const sym = self.symbol(self.got_index.?);
-            sym.value = shdr.sh_addr;
+            sym.value = @intCast(shdr.sh_addr);
             sym.output_section_index = shndx;
         }
     } else {
         if (self.got_section_index) |shndx| {
             const shdr = self.shdrs.items[shndx];
             const sym = self.symbol(self.got_index.?);
-            sym.value = shdr.sh_addr;
+            sym.value = @intCast(shdr.sh_addr);
             sym.output_section_index = shndx;
         }
     }
@@ -3197,7 +3231,7 @@ fn allocateLinkerDefinedSymbols(self: *Elf) void {
     if (self.plt_section_index) |shndx| {
         const shdr = &self.shdrs.items[shndx];
         const symbol_ptr = self.symbol(self.plt_index.?);
-        symbol_ptr.value = shdr.sh_addr;
+        symbol_ptr.value = @intCast(shdr.sh_addr);
         symbol_ptr.output_section_index = shndx;
     }
 
@@ -3205,7 +3239,7 @@ fn allocateLinkerDefinedSymbols(self: *Elf) void {
     if (self.dso_handle_index) |index| {
         const shdr = &self.shdrs.items[1];
         const symbol_ptr = self.symbol(index);
-        symbol_ptr.value = shdr.sh_addr;
+        symbol_ptr.value = @intCast(shdr.sh_addr);
         symbol_ptr.output_section_index = 0;
     }
 
@@ -3213,7 +3247,7 @@ fn allocateLinkerDefinedSymbols(self: *Elf) void {
     if (self.eh_frame_hdr_section_index) |shndx| {
         const shdr = &self.shdrs.items[shndx];
         const symbol_ptr = self.symbol(self.gnu_eh_frame_hdr_index.?);
-        symbol_ptr.value = shdr.sh_addr;
+        symbol_ptr.value = @intCast(shdr.sh_addr);
         symbol_ptr.output_section_index = shndx;
     }
 
@@ -3225,9 +3259,9 @@ fn allocateLinkerDefinedSymbols(self: *Elf) void {
         const start_addr = end_addr - self.calcNumIRelativeRelocs() * @sizeOf(elf.Elf64_Rela);
         const start_sym = self.symbol(self.rela_iplt_start_index.?);
         const end_sym = self.symbol(self.rela_iplt_end_index.?);
-        start_sym.value = start_addr;
+        start_sym.value = @intCast(start_addr);
         start_sym.output_section_index = shndx;
-        end_sym.value = end_addr;
+        end_sym.value = @intCast(end_addr);
         end_sym.output_section_index = shndx;
     }
 
@@ -3236,7 +3270,7 @@ fn allocateLinkerDefinedSymbols(self: *Elf) void {
         const end_symbol = self.symbol(self.end_index.?);
         for (self.shdrs.items, 0..) |shdr, shndx| {
             if (shdr.sh_flags & elf.SHF_ALLOC != 0) {
-                end_symbol.value = shdr.sh_addr + shdr.sh_size;
+                end_symbol.value = @intCast(shdr.sh_addr + shdr.sh_size);
                 end_symbol.output_section_index = @intCast(shndx);
             }
         }
@@ -3251,9 +3285,9 @@ fn allocateLinkerDefinedSymbols(self: *Elf) void {
             const stop = self.symbol(self.start_stop_indexes.items[index + 1]);
             const shndx = self.sectionByName(name["__start_".len..]).?;
             const shdr = &self.shdrs.items[shndx];
-            start.value = shdr.sh_addr;
+            start.value = @intCast(shdr.sh_addr);
             start.output_section_index = shndx;
-            stop.value = shdr.sh_addr + shdr.sh_size;
+            stop.value = @intCast(shdr.sh_addr + shdr.sh_size);
             stop.output_section_index = shndx;
         }
     }
@@ -3263,7 +3297,7 @@ fn allocateLinkerDefinedSymbols(self: *Elf) void {
         const sym = self.symbol(index);
         if (self.sectionByName(".sdata")) |shndx| {
             const shdr = self.shdrs.items[shndx];
-            sym.value = shdr.sh_addr + 0x800;
+            sym.value = @intCast(shdr.sh_addr + 0x800);
             sym.output_section_index = shndx;
         } else {
             sym.value = 0;
@@ -3293,12 +3327,122 @@ fn checkDuplicates(self: *Elf) !void {
     try self.reportDuplicates(dupes);
 }
 
+pub fn addCommentString(self: *Elf) !void {
+    const msec_index = try self.getOrCreateMergeSection(".comment", elf.SHF_MERGE | elf.SHF_STRINGS, elf.SHT_PROGBITS);
+    const msec = self.mergeSection(msec_index);
+    const res = try msec.insertZ(self.base.comp.gpa, "zig " ++ builtin.zig_version_string);
+    if (res.found_existing) return;
+    const msub_index = try self.addMergeSubsection();
+    const msub = self.mergeSubsection(msub_index);
+    msub.merge_section_index = msec_index;
+    msub.string_index = res.key.pos;
+    msub.alignment = .@"1";
+    msub.size = res.key.len;
+    msub.entsize = 1;
+    msub.alive = true;
+    res.sub.* = msub_index;
+}
+
+pub fn resolveMergeSections(self: *Elf) !void {
+    const tracy = trace(@src());
+    defer tracy.end();
+
+    var has_errors = false;
+    for (self.objects.items) |index| {
+        const file_ptr = self.file(index).?;
+        if (!file_ptr.isAlive()) continue;
+        file_ptr.object.initMergeSections(self) catch |err| switch (err) {
+            error.MalformedObject => has_errors = true,
+            else => |e| return e,
+        };
+    }
+
+    if (has_errors) return error.FlushFailure;
+
+    for (self.objects.items) |index| {
+        const file_ptr = self.file(index).?;
+        if (!file_ptr.isAlive()) continue;
+        file_ptr.object.resolveMergeSubsections(self) catch |err| switch (err) {
+            error.MalformedObject => has_errors = true,
+            else => |e| return e,
+        };
+    }
+
+    if (has_errors) return error.FlushFailure;
+}
+
+pub fn finalizeMergeSections(self: *Elf) !void {
+    for (self.merge_sections.items) |*msec| {
+        try msec.finalize(self);
+    }
+}
+
+pub fn updateMergeSectionSizes(self: *Elf) !void {
+    for (self.merge_sections.items) |*msec| {
+        const shdr = &self.shdrs.items[msec.output_section_index];
+        for (msec.subsections.items) |msub_index| {
+            const msub = self.mergeSubsection(msub_index);
+            assert(msub.alive);
+            const offset = msub.alignment.forward(shdr.sh_size);
+            const padding = offset - shdr.sh_size;
+            msub.value = @intCast(offset);
+            shdr.sh_size += padding + msub.size;
+            shdr.sh_addralign = @max(shdr.sh_addralign, msub.alignment.toByteUnits() orelse 1);
+        }
+    }
+}
+
+pub fn writeMergeSections(self: *Elf) !void {
+    const gpa = self.base.comp.gpa;
+    var buffer = std.ArrayList(u8).init(gpa);
+    defer buffer.deinit();
+
+    for (self.merge_sections.items) |msec| {
+        const shdr = self.shdrs.items[msec.output_section_index];
+        const size = math.cast(usize, shdr.sh_size) orelse return error.Overflow;
+        try buffer.ensureTotalCapacity(size);
+        buffer.appendNTimesAssumeCapacity(0, size);
+
+        for (msec.subsections.items) |msub_index| {
+            const msub = self.mergeSubsection(msub_index);
+            assert(msub.alive);
+            const string = msub.getString(self);
+            const off = math.cast(usize, msub.value) orelse return error.Overflow;
+            @memcpy(buffer.items[off..][0..string.len], string);
+        }
+
+        try self.base.file.?.pwriteAll(buffer.items, shdr.sh_offset);
+        buffer.clearRetainingCapacity();
+    }
+}
+
 fn initOutputSections(self: *Elf) !void {
     for (self.objects.items) |index| {
         try self.file(index).?.object.initOutputSections(self);
     }
 }
 
+pub fn initMergeSections(self: *Elf) !void {
+    for (self.merge_sections.items) |*msec| {
+        if (msec.subsections.items.len == 0) continue;
+        const name = msec.name(self);
+        const shndx = self.sectionByName(name) orelse try self.addSection(.{
+            .name = name,
+            .type = msec.type,
+            .flags = msec.flags,
+        });
+        msec.output_section_index = shndx;
+
+        var entsize = self.mergeSubsection(msec.subsections.items[0]).entsize;
+        for (msec.subsections.items) |index| {
+            const msub = self.mergeSubsection(index);
+            entsize = @min(entsize, msub.entsize);
+        }
+        const shdr = &self.shdrs.items[shndx];
+        shdr.sh_entsize = entsize;
+    }
+}
+
 fn initSyntheticSections(self: *Elf) !void {
     const comp = self.base.comp;
     const target = comp.root_mod.resolved_target.result;
@@ -3965,6 +4109,10 @@ fn resetShdrIndexes(self: *Elf, backlinks: []const u32) !void {
         }
     }
 
+    for (self.merge_sections.items) |*msec| {
+        msec.output_section_index = backlinks[msec.output_section_index];
+    }
+
     {
         var output_rela_sections = try self.output_rela_sections.clone(gpa);
         defer output_rela_sections.deinit(gpa);
@@ -4052,7 +4200,7 @@ fn updateSectionSizes(self: *Elf) !void {
             if (!atom_ptr.flags.alive) continue;
             const offset = atom_ptr.alignment.forward(shdr.sh_size);
             const padding = offset - shdr.sh_size;
-            atom_ptr.value = offset;
+            atom_ptr.value = @intCast(offset);
             shdr.sh_size += padding + atom_ptr.size;
             shdr.sh_addralign = @max(shdr.sh_addralign, atom_ptr.alignment.toByteUnits() orelse 1);
         }
@@ -4535,7 +4683,7 @@ fn writeAtoms(self: *Elf) !void {
             const atom_ptr = self.atom(atom_index).?;
             assert(atom_ptr.flags.alive);
 
-            const offset = math.cast(usize, atom_ptr.value - base_offset) orelse
+            const offset = math.cast(usize, atom_ptr.value - @as(i64, @intCast(base_offset))) orelse
                 return error.Overflow;
             const size = math.cast(usize, atom_ptr.size) orelse return error.Overflow;
 
@@ -4576,7 +4724,7 @@ fn writeAtoms(self: *Elf) !void {
             const thunk_size = th.size(self);
             try buffer.ensureUnusedCapacity(thunk_size);
             const shdr = self.shdrs.items[th.output_section_index];
-            const offset = th.value + shdr.sh_offset;
+            const offset = @as(u64, @intCast(th.value)) + shdr.sh_offset;
             try th.write(self, buffer.writer());
             assert(buffer.items.len == thunk_size);
             try self.base.file.?.pwriteAll(buffer.items, offset);
@@ -4611,6 +4759,7 @@ pub fn updateSymtabSize(self: *Elf) !void {
     if (self.eh_frame_section_index) |_| {
         nlocals += 1;
     }
+    nlocals += @intCast(self.merge_sections.items.len);
 
     if (self.requiresThunks()) for (self.thunks.items) |*th| {
         th.output_symtab_ctx.ilocal = nlocals + 1;
@@ -4947,12 +5096,30 @@ fn writeSectionSymbols(self: *Elf) void {
         };
         ilocal += 1;
     }
+
+    for (self.merge_sections.items) |msec| {
+        const shdr = self.shdrs.items[msec.output_section_index];
+        const out_sym = &self.symtab.items[ilocal];
+        out_sym.* = .{
+            .st_name = 0,
+            .st_value = shdr.sh_addr,
+            .st_info = elf.STT_SECTION,
+            .st_shndx = @intCast(msec.output_section_index),
+            .st_size = 0,
+            .st_other = 0,
+        };
+        ilocal += 1;
+    }
 }
 
 pub fn sectionSymbolOutputSymtabIndex(self: Elf, shndx: u32) u32 {
     if (self.eh_frame_section_index) |index| {
         if (index == shndx) return @intCast(self.output_sections.keys().len + 1);
     }
+    const base: usize = if (self.eh_frame_section_index == null) 0 else 1;
+    for (self.merge_sections.items, 0..) |msec, index| {
+        if (msec.output_section_index == shndx) return @intCast(self.output_sections.keys().len + 1 + index + base);
+    }
     return @intCast(self.output_sections.getIndex(shndx).? + 1);
 }
 
@@ -5458,6 +5625,50 @@ pub fn addAtom(self: *Elf) !Atom.Index {
     return index;
 }
 
+pub fn addAtomExtra(self: *Elf, extra: Atom.Extra) !u32 {
+    const fields = @typeInfo(Atom.Extra).Struct.fields;
+    try self.atoms_extra.ensureUnusedCapacity(self.base.comp.gpa, fields.len);
+    return self.addAtomExtraAssumeCapacity(extra);
+}
+
+pub fn addAtomExtraAssumeCapacity(self: *Elf, extra: Atom.Extra) u32 {
+    const index = @as(u32, @intCast(self.atoms_extra.items.len));
+    const fields = @typeInfo(Atom.Extra).Struct.fields;
+    inline for (fields) |field| {
+        self.atoms_extra.appendAssumeCapacity(switch (field.type) {
+            u32 => @field(extra, field.name),
+            else => @compileError("bad field type"),
+        });
+    }
+    return index;
+}
+
+pub fn atomExtra(self: *Elf, index: u32) ?Atom.Extra {
+    if (index == 0) return null;
+    const fields = @typeInfo(Atom.Extra).Struct.fields;
+    var i: usize = index;
+    var result: Atom.Extra = undefined;
+    inline for (fields) |field| {
+        @field(result, field.name) = switch (field.type) {
+            u32 => self.atoms_extra.items[i],
+            else => @compileError("bad field type"),
+        };
+        i += 1;
+    }
+    return result;
+}
+
+pub fn setAtomExtra(self: *Elf, index: u32, extra: Atom.Extra) void {
+    assert(index > 0);
+    const fields = @typeInfo(Atom.Extra).Struct.fields;
+    inline for (fields, 0..) |field, i| {
+        self.atoms_extra.items[index + i] = switch (field.type) {
+            u32 => @field(extra, field.name),
+            else => @compileError("bad field type"),
+        };
+    }
+}
+
 pub fn addThunk(self: *Elf) !Thunk.Index {
     const index = @as(Thunk.Index, @intCast(self.thunks.items.len));
     const th = try self.thunks.addOne(self.base.comp.gpa);
@@ -5637,35 +5848,88 @@ pub fn comdatGroupOwner(self: *Elf, index: ComdatGroupOwner.Index) *ComdatGroupO
     return &self.comdat_groups_owners.items[index];
 }
 
-pub fn gotAddress(self: *Elf) u64 {
+pub fn addInputMergeSection(self: *Elf) !InputMergeSection.Index {
+    const index: InputMergeSection.Index = @intCast(self.merge_input_sections.items.len);
+    const msec = try self.merge_input_sections.addOne(self.base.comp.gpa);
+    msec.* = .{};
+    return index;
+}
+
+pub fn inputMergeSection(self: *Elf, index: InputMergeSection.Index) ?*InputMergeSection {
+    if (index == 0) return null;
+    return &self.merge_input_sections.items[index];
+}
+
+pub fn addMergeSubsection(self: *Elf) !MergeSubsection.Index {
+    const index: MergeSubsection.Index = @intCast(self.merge_subsections.items.len);
+    const msec = try self.merge_subsections.addOne(self.base.comp.gpa);
+    msec.* = .{};
+    return index;
+}
+
+pub fn mergeSubsection(self: *Elf, index: MergeSubsection.Index) *MergeSubsection {
+    assert(index < self.merge_subsections.items.len);
+    return &self.merge_subsections.items[index];
+}
+
+pub fn getOrCreateMergeSection(self: *Elf, name: []const u8, flags: u64, @"type": u32) !MergeSection.Index {
+    const gpa = self.base.comp.gpa;
+    const out_name = name: {
+        if (self.base.isRelocatable()) break :name name;
+        if (mem.eql(u8, name, ".rodata") or mem.startsWith(u8, name, ".rodata"))
+            break :name if (flags & elf.SHF_STRINGS != 0) ".rodata.str" else ".rodata.cst";
+        break :name name;
+    };
+    const out_off = try self.strings.insert(gpa, out_name);
+    const out_flags = flags & ~@as(u64, elf.SHF_COMPRESSED | elf.SHF_GROUP);
+    for (self.merge_sections.items, 0..) |msec, index| {
+        if (msec.name_offset == out_off) return @intCast(index);
+    }
+    const index = @as(MergeSection.Index, @intCast(self.merge_sections.items.len));
+    const msec = try self.merge_sections.addOne(gpa);
+    msec.* = .{
+        .name_offset = out_off,
+        .flags = out_flags,
+        .type = @"type",
+    };
+    return index;
+}
+
+pub fn mergeSection(self: *Elf, index: MergeSection.Index) *MergeSection {
+    assert(index < self.merge_sections.items.len);
+    return &self.merge_sections.items[index];
+}
+
+pub fn gotAddress(self: *Elf) i64 {
     const shndx = blk: {
         if (self.getTarget().cpu.arch == .x86_64 and self.got_plt_section_index != null)
             break :blk self.got_plt_section_index.?;
         break :blk if (self.got_section_index) |shndx| shndx else null;
     };
-    return if (shndx) |index| self.shdrs.items[index].sh_addr else 0;
+    return if (shndx) |index| @intCast(self.shdrs.items[index].sh_addr) else 0;
 }
 
-pub fn tpAddress(self: *Elf) u64 {
+pub fn tpAddress(self: *Elf) i64 {
     const index = self.phdr_tls_index orelse return 0;
     const phdr = self.phdrs.items[index];
-    return switch (self.getTarget().cpu.arch) {
+    const addr = switch (self.getTarget().cpu.arch) {
         .x86_64 => mem.alignForward(u64, phdr.p_vaddr + phdr.p_memsz, phdr.p_align),
         .aarch64 => mem.alignBackward(u64, phdr.p_vaddr - 16, phdr.p_align),
         else => @panic("TODO implement getTpAddress for this arch"),
     };
+    return @intCast(addr);
 }
 
-pub fn dtpAddress(self: *Elf) u64 {
+pub fn dtpAddress(self: *Elf) i64 {
     const index = self.phdr_tls_index orelse return 0;
     const phdr = self.phdrs.items[index];
-    return phdr.p_vaddr;
+    return @intCast(phdr.p_vaddr);
 }
 
-pub fn tlsAddress(self: *Elf) u64 {
+pub fn tlsAddress(self: *Elf) i64 {
     const index = self.phdr_tls_index orelse return 0;
     const phdr = self.phdrs.items[index];
-    return phdr.p_vaddr;
+    return @intCast(phdr.p_vaddr);
 }
 
 const ErrorWithNotes = struct {
@@ -6043,6 +6307,11 @@ fn fmtDumpState(
         try writer.print("  shdr({d}) : COMDAT({d})\n", .{ cg.shndx, cg.cg_index });
     }
 
+    try writer.writeAll("\nOutput merge sections\n");
+    for (self.merge_sections.items) |msec| {
+        try writer.print("  shdr({d}) : {}\n", .{ msec.output_section_index, msec.fmt(self) });
+    }
+
     try writer.writeAll("\nOutput shdrs\n");
     for (self.shdrs.items, 0..) |shdr, shndx| {
         try writer.print("  shdr({d}) : phdr({?d}) : {}\n", .{
@@ -6235,6 +6504,7 @@ const gc = @import("Elf/gc.zig");
 const glibc = @import("../glibc.zig");
 const link = @import("../link.zig");
 const lldMain = @import("../main.zig").lldMain;
+const merge_section = @import("Elf/merge_section.zig");
 const musl = @import("../musl.zig");
 const relocatable = @import("Elf/relocatable.zig");
 const relocation = @import("Elf/relocation.zig");
@@ -6260,10 +6530,13 @@ const GnuHashSection = synthetic_sections.GnuHashSection;
 const GotSection = synthetic_sections.GotSection;
 const GotPltSection = synthetic_sections.GotPltSection;
 const HashSection = synthetic_sections.HashSection;
+const InputMergeSection = merge_section.InputMergeSection;
 const LdScript = @import("Elf/LdScript.zig");
 const LinkerDefined = @import("Elf/LinkerDefined.zig");
 const Liveness = @import("../Liveness.zig");
 const LlvmObject = @import("../codegen/llvm.zig").Object;
+const MergeSection = merge_section.MergeSection;
+const MergeSubsection = merge_section.MergeSubsection;
 const Module = @import("../Module.zig");
 const Object = @import("Elf/Object.zig");
 const InternPool = @import("../InternPool.zig");
test/link/elf.zig
@@ -61,6 +61,7 @@ pub fn testAll(b: *Build, build_opts: BuildOptions) *Step {
         elf_step.dependOn(testAbsSymbols(b, .{ .target = musl_target }));
         elf_step.dependOn(testCommonSymbols(b, .{ .target = musl_target }));
         elf_step.dependOn(testCommonSymbolsInArchive(b, .{ .target = musl_target }));
+        elf_step.dependOn(testCommentString(b, .{ .target = musl_target }));
         elf_step.dependOn(testEmptyObject(b, .{ .target = musl_target }));
         elf_step.dependOn(testEntryPoint(b, .{ .target = musl_target }));
         elf_step.dependOn(testGcSections(b, .{ .target = musl_target }));
@@ -72,6 +73,8 @@ pub fn testAll(b: *Build, build_opts: BuildOptions) *Step {
         elf_step.dependOn(testLinkingC(b, .{ .target = musl_target }));
         elf_step.dependOn(testLinkingCpp(b, .{ .target = musl_target }));
         elf_step.dependOn(testLinkingZig(b, .{ .target = musl_target }));
+        elf_step.dependOn(testMergeStrings(b, .{ .target = musl_target }));
+        elf_step.dependOn(testMergeStrings2(b, .{ .target = musl_target }));
         // https://github.com/ziglang/zig/issues/17451
         // elf_step.dependOn(testNoEhFrameHdr(b, .{ .target = musl_target }));
         elf_step.dependOn(testTlsStatic(b, .{ .target = musl_target }));
@@ -81,6 +84,7 @@ pub fn testAll(b: *Build, build_opts: BuildOptions) *Step {
         elf_step.dependOn(testAsNeeded(b, .{ .target = gnu_target }));
         // https://github.com/ziglang/zig/issues/17430
         // elf_step.dependOn(testCanonicalPlt(b, .{ .target = gnu_target }));
+        elf_step.dependOn(testCommentString(b, .{ .target = gnu_target }));
         elf_step.dependOn(testCopyrel(b, .{ .target = gnu_target }));
         // https://github.com/ziglang/zig/issues/17430
         // elf_step.dependOn(testCopyrelAlias(b, .{ .target = gnu_target }));
@@ -152,6 +156,8 @@ pub fn testAll(b: *Build, build_opts: BuildOptions) *Step {
     elf_step.dependOn(testThunks(b, .{ .target = aarch64_musl }));
 
     // x86_64 self-hosted backend
+    elf_step.dependOn(testCommentString(b, .{ .use_llvm = false, .target = default_target }));
+    elf_step.dependOn(testCommentStringStaticLib(b, .{ .use_llvm = false, .target = default_target }));
     elf_step.dependOn(testEmitRelocatable(b, .{ .use_llvm = false, .target = x86_64_musl }));
     elf_step.dependOn(testEmitStaticLibZig(b, .{ .use_llvm = false, .target = x86_64_musl }));
     elf_step.dependOn(testGcSectionsZig(b, .{ .use_llvm = false, .target = default_target }));
@@ -362,6 +368,36 @@ fn testCanonicalPlt(b: *Build, opts: Options) *Step {
     return test_step;
 }
 
+fn testCommentString(b: *Build, opts: Options) *Step {
+    const test_step = addTestStep(b, "comment-string", opts);
+
+    const exe = addExecutable(b, opts, .{ .name = "main", .zig_source_bytes = 
+    \\pub fn main() void {}
+    });
+
+    const check = exe.checkObject();
+    check.dumpSection(".comment");
+    check.checkContains("zig");
+    test_step.dependOn(&check.step);
+
+    return test_step;
+}
+
+fn testCommentStringStaticLib(b: *Build, opts: Options) *Step {
+    const test_step = addTestStep(b, "comment-string-static-lib", opts);
+
+    const lib = addStaticLibrary(b, opts, .{ .name = "lib", .zig_source_bytes = 
+    \\export fn foo() void {}
+    });
+
+    const check = lib.checkObject();
+    check.dumpSection(".comment");
+    check.checkContains("zig");
+    test_step.dependOn(&check.step);
+
+    return test_step;
+}
+
 fn testCommonSymbols(b: *Build, opts: Options) *Step {
     const test_step = addTestStep(b, "common-symbols", opts);
 
@@ -2267,6 +2303,125 @@ fn testLinkingZig(b: *Build, opts: Options) *Step {
     return test_step;
 }
 
+// Adapted from https://github.com/rui314/mold/blob/main/test/elf/mergeable-strings.sh
+fn testMergeStrings(b: *Build, opts: Options) *Step {
+    const test_step = addTestStep(b, "merge-strings", opts);
+
+    const obj1 = addObject(b, opts, .{ .name = "a.o" });
+    addCSourceBytes(obj1,
+        \\#include <uchar.h>
+        \\#include <wchar.h>
+        \\char *cstr1 = "foo";
+        \\wchar_t *wide1 = L"foo";
+        \\char16_t *utf16_1 = u"foo";
+        \\char32_t *utf32_1 = U"foo";
+    , &.{"-O2"});
+    obj1.linkLibC();
+
+    const obj2 = addObject(b, opts, .{ .name = "b.o" });
+    addCSourceBytes(obj2,
+        \\#include <stdio.h>
+        \\#include <assert.h>
+        \\#include <uchar.h>
+        \\#include <wchar.h>
+        \\extern char *cstr1;
+        \\extern wchar_t *wide1;
+        \\extern char16_t *utf16_1;
+        \\extern char32_t *utf32_1;
+        \\char *cstr2 = "foo";
+        \\wchar_t *wide2 = L"foo";
+        \\char16_t *utf16_2 = u"foo";
+        \\char32_t *utf32_2 = U"foo";
+        \\int main() {
+        \\ printf("%p %p %p %p %p %p %p %p\n",
+        \\ cstr1, cstr2, wide1, wide2, utf16_1, utf16_2, utf32_1, utf32_2);
+        \\  assert((void*)cstr1 ==   (void*)cstr2);
+        \\  assert((void*)wide1 ==   (void*)wide2);
+        \\  assert((void*)utf16_1 == (void*)utf16_2);
+        \\  assert((void*)utf32_1 == (void*)utf32_2);
+        \\  assert((void*)wide1 ==   (void*)utf32_1);
+        \\  assert((void*)cstr1 !=   (void*)wide1);
+        \\  assert((void*)cstr1 !=   (void*)utf32_1);
+        \\  assert((void*)wide1 !=   (void*)utf16_1);
+        \\}
+    , &.{"-O2"});
+    obj2.linkLibC();
+
+    const exe = addExecutable(b, opts, .{ .name = "main" });
+    exe.addObject(obj1);
+    exe.addObject(obj2);
+    exe.linkLibC();
+
+    const run = addRunArtifact(exe);
+    run.expectExitCode(0);
+    test_step.dependOn(&run.step);
+
+    return test_step;
+}
+
+fn testMergeStrings2(b: *Build, opts: Options) *Step {
+    const test_step = addTestStep(b, "merge-strings2", opts);
+
+    const obj1 = addObject(b, opts, .{ .name = "a", .zig_source_bytes = 
+    \\const std = @import("std");
+    \\export fn foo() void {
+    \\    var arr: [5:0]u16 = [_:0]u16{ 1, 2, 3, 4, 5 };
+    \\    const slice = std.mem.sliceTo(&arr, 3);
+    \\    std.testing.expectEqualSlices(u16, arr[0..2], slice) catch unreachable;
+    \\}
+    });
+
+    const obj2 = addObject(b, opts, .{ .name = "b", .zig_source_bytes = 
+    \\const std = @import("std");
+    \\extern fn foo() void;
+    \\pub fn main() void {
+    \\    foo();
+    \\    var arr: [5:0]u16 = [_:0]u16{ 5, 4, 3, 2, 1 };
+    \\    const slice = std.mem.sliceTo(&arr, 3);
+    \\    std.testing.expectEqualSlices(u16, arr[0..2], slice) catch unreachable;
+    \\}
+    });
+
+    {
+        const exe = addExecutable(b, opts, .{ .name = "main1" });
+        exe.addObject(obj1);
+        exe.addObject(obj2);
+
+        const run = addRunArtifact(exe);
+        run.expectExitCode(0);
+        test_step.dependOn(&run.step);
+
+        const check = exe.checkObject();
+        check.dumpSection(".rodata.str");
+        check.checkContains("\x01\x00\x02\x00\x03\x00\x04\x00\x05\x00\x00\x00");
+        check.dumpSection(".rodata.str");
+        check.checkContains("\x05\x00\x04\x00\x03\x00\x02\x00\x01\x00\x00\x00");
+        test_step.dependOn(&check.step);
+    }
+
+    {
+        const obj3 = addObject(b, opts, .{ .name = "c" });
+        obj3.addObject(obj1);
+        obj3.addObject(obj2);
+
+        const exe = addExecutable(b, opts, .{ .name = "main2" });
+        exe.addObject(obj3);
+
+        const run = addRunArtifact(exe);
+        run.expectExitCode(0);
+        test_step.dependOn(&run.step);
+
+        const check = exe.checkObject();
+        check.dumpSection(".rodata.str");
+        check.checkContains("\x01\x00\x02\x00\x03\x00\x04\x00\x05\x00\x00\x00");
+        check.dumpSection(".rodata.str");
+        check.checkContains("\x05\x00\x04\x00\x03\x00\x02\x00\x01\x00\x00\x00");
+        test_step.dependOn(&check.step);
+    }
+
+    return test_step;
+}
+
 fn testNoEhFrameHdr(b: *Build, opts: Options) *Step {
     const test_step = addTestStep(b, "no-eh-frame-hdr", opts);
 
@@ -2528,6 +2683,33 @@ fn testRelocatableEhFrame(b: *Build, opts: Options) *Step {
     return test_step;
 }
 
+// Adapted from https://github.com/rui314/mold/blob/main/test/elf/relocatable-mergeable-sections.sh
+fn testRelocatableMergeStrings(b: *Build, opts: Options) *Step {
+    const test_step = addTestStep(b, "relocatable-merge-strings", opts);
+
+    const obj1 = addObject(b, opts, .{ .name = "a", .asm_source_bytes = 
+    \\.section .rodata.str1.1,"aMS",@progbits,1
+    \\val1:
+    \\.ascii "Hello \0"
+    \\.section .rodata.str1.1,"aMS",@progbits,1
+    \\val5:
+    \\.ascii "World \0"
+    \\.section .rodata.str1.1,"aMS",@progbits,1
+    \\val7:
+    \\.ascii "Hello \0"
+    });
+
+    const obj2 = addObject(b, opts, .{ .name = "b" });
+    obj2.addObject(obj1);
+
+    const check = obj2.checkObject();
+    check.dumpSection(".rodata.str1.1");
+    check.checkExact("Hello \x00World \x00");
+    test_step.dependOn(&check.step);
+
+    return test_step;
+}
+
 fn testRelocatableNoEhFrame(b: *Build, opts: Options) *Step {
     const test_step = addTestStep(b, "relocatable-no-eh-frame", opts);
 
test/link/link.zig
@@ -1,21 +1,3 @@
-pub fn build(b: *Build) void {
-    const test_step = b.step("test-link", "Run link tests");
-    b.default_step = test_step;
-
-    const has_macos_sdk = b.option(bool, "has_macos_sdk", "whether the host provides a macOS SDK in system path");
-    const has_ios_sdk = b.option(bool, "has_ios_sdk", "whether the host provides a iOS SDK in system path");
-    const has_symlinks_windows = b.option(bool, "has_symlinks_windows", "whether the host is windows and has symlinks enabled");
-
-    const build_opts: BuildOptions = .{
-        .has_macos_sdk = has_macos_sdk orelse false,
-        .has_ios_sdk = has_ios_sdk orelse false,
-        .has_symlinks_windows = has_symlinks_windows orelse false,
-    };
-
-    test_step.dependOn(@import("elf.zig").testAll(b, build_opts));
-    test_step.dependOn(@import("macho.zig").testAll(b, build_opts));
-}
-
 pub const BuildOptions = struct {
     has_macos_sdk: bool,
     has_ios_sdk: bool,