Commit 1b8ed7842c
Changed files (6)
src/arch/aarch64/Emit.zig
@@ -208,8 +208,8 @@ fn instructionSize(emit: *Emit, inst: Mir.Inst.Index) usize {
}
switch (tag) {
+ .load_memory_direct => return 3 * 4,
.load_memory_got,
- .load_memory_direct,
.load_memory_ptr_got,
.load_memory_ptr_direct,
=> return 2 * 4,
@@ -654,15 +654,31 @@ fn mirLoadMemoryPie(emit: *Emit, inst: Mir.Inst.Index) !void {
const data = emit.mir.extraData(Mir.LoadMemoryPie, payload).data;
const reg = @intToEnum(Register, data.register);
- // PC-relative displacement to the entry in the GOT table.
+ // PC-relative displacement to the entry in memory.
// adrp
const offset = @intCast(u32, emit.code.items.len);
try emit.writeInstruction(Instruction.adrp(reg, 0));
switch (tag) {
- .load_memory_got,
- .load_memory_direct,
- => {
+ .load_memory_got => {
+ // ldr reg, reg, offset
+ try emit.writeInstruction(Instruction.ldr(
+ reg,
+ reg,
+ Instruction.LoadStoreOffset.imm(0),
+ ));
+ },
+ .load_memory_direct => {
+ // We cannot load the offset directly as it may not be aligned properly.
+ // For example, load for 64bit register will require the target address offset
+ // to be 8-byte aligned, while the value might have non-8-byte natural alignment,
+ // meaning the linker might have put it at a non-8-byte aligned address. To circumvent
+ // this, we use `adrp, add` to form the address value which we then dereference with
+ // `ldr`.
+ // Note that this can potentially be optimised out by the codegen/linker if the
+ // target address is appropriately aligned.
+ // add reg, reg, offset
+ try emit.writeInstruction(Instruction.add(reg, reg, 0, false));
// ldr reg, reg, offset
try emit.writeInstruction(Instruction.ldr(
reg,
src/link/MachO/Atom.zig
@@ -691,11 +691,11 @@ pub fn resolveRelocs(self: *Atom, macho_file: *MachO) !void {
if (is_via_got) {
const got_index = macho_file.got_entries_table.get(rel.target) orelse {
- const n_strx = switch (rel.target) {
- .local => |sym_index| macho_file.locals.items[sym_index].n_strx,
- .global => |n_strx| n_strx,
- };
- log.err("expected GOT entry for symbol '{s}'", .{macho_file.getString(n_strx)});
+ log.err("expected GOT entry for symbol", .{});
+ switch (rel.target) {
+ .local => |sym_index| log.err(" local @{d}", .{sym_index}),
+ .global => |n_strx| log.err(" global @'{s}'", .{macho_file.getString(n_strx)}),
+ }
log.err(" this is an internal linker error", .{});
return error.FailedToResolveRelocationTarget;
};
src/link/MachO.zig
@@ -3797,10 +3797,11 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl: *Module.De
atom.code.clearRetainingCapacity();
try atom.code.appendSlice(self.base.allocator, code);
- const match = try self.getMatchingSectionAtom(atom, typed_value.ty, typed_value.val);
+ const match = try self.getMatchingSectionAtom(atom, decl_name, typed_value.ty, typed_value.val);
const addr = try self.allocateAtom(atom, code.len, required_alignment, match);
log.debug("allocated atom for {s} at 0x{x}", .{ name, addr });
+ log.debug(" (required alignment 0x{x})", .{required_alignment});
errdefer self.freeAtom(atom, match, true);
@@ -3903,28 +3904,60 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void {
try self.updateDeclExports(module, decl, decl_exports);
}
-fn isElemTyPointer(ty: Type) bool {
+/// Checks if the value, or any of its embedded values stores a pointer, and thus requires
+/// a rebase opcode for the dynamic linker.
+fn needsPointerRebase(ty: Type, val: Value) bool {
+ if (ty.zigTypeTag() == .Fn) {
+ return false;
+ }
+ if (val.pointerDecl()) |_| {
+ return true;
+ }
+
switch (ty.zigTypeTag()) {
- .Fn => return false,
+ .Fn => unreachable,
.Pointer => return true,
- .Array => {
- const elem_ty = ty.elemType();
- return isElemTyPointer(elem_ty);
+ .Array, .Vector => {
+ if (ty.arrayLen() == 0) return false;
+ const elem_ty = ty.childType();
+ var elem_value_buf: Value.ElemValueBuffer = undefined;
+ const elem_val = val.elemValueBuffer(0, &elem_value_buf);
+ return needsPointerRebase(elem_ty, elem_val);
},
- .Struct, .Union => {
- const len = ty.structFieldCount();
- var i: usize = 0;
- while (i < len) : (i += 1) {
- const field_ty = ty.structFieldType(i);
- if (isElemTyPointer(field_ty)) return true;
- }
- return false;
+ .Struct => {
+ const fields = ty.structFields().values();
+ if (fields.len == 0) return false;
+ if (val.castTag(.@"struct")) |payload| {
+ const field_values = payload.data;
+ for (field_values) |field_val, i| {
+ if (needsPointerRebase(fields[i].ty, field_val)) return true;
+ } else return false;
+ } else return false;
+ },
+ .Optional => {
+ if (val.castTag(.opt_payload)) |payload| {
+ const sub_val = payload.data;
+ var buffer: Type.Payload.ElemType = undefined;
+ const sub_ty = ty.optionalChild(&buffer);
+ return needsPointerRebase(sub_ty, sub_val);
+ } else return false;
+ },
+ .Union => {
+ const union_obj = val.cast(Value.Payload.Union).?.data;
+ const active_field_ty = ty.unionFieldType(union_obj.tag);
+ return needsPointerRebase(active_field_ty, union_obj.val);
+ },
+ .ErrorUnion => {
+ if (val.castTag(.eu_payload)) |payload| {
+ const payload_ty = ty.errorUnionPayload();
+ return needsPointerRebase(payload_ty, payload.data);
+ } else return false;
},
else => return false,
}
}
-fn getMatchingSectionAtom(self: *MachO, atom: *Atom, ty: Type, val: Value) !MatchingSection {
+fn getMatchingSectionAtom(self: *MachO, atom: *Atom, name: []const u8, ty: Type, val: Value) !MatchingSection {
const code = atom.code.items;
const alignment = ty.abiAlignment(self.base.options.target);
const align_log_2 = math.log2(alignment);
@@ -3938,10 +3971,25 @@ fn getMatchingSectionAtom(self: *MachO, atom: *Atom, ty: Type, val: Value) !Matc
.seg = self.data_segment_cmd_index.?,
.sect = self.bss_section_index.?,
};
+ } else {
+ break :blk MatchingSection{
+ .seg = self.data_segment_cmd_index.?,
+ .sect = self.data_section_index.?,
+ };
}
+ }
+
+ if (val.castTag(.variable)) |_| {
+ break :blk MatchingSection{
+ .seg = self.data_segment_cmd_index.?,
+ .sect = self.data_section_index.?,
+ };
+ }
+
+ if (needsPointerRebase(ty, val)) {
break :blk (try self.getMatchingSection(.{
- .segname = makeStaticString("__DATA"),
- .sectname = makeStaticString("__data"),
+ .segname = makeStaticString("__DATA_CONST"),
+ .sectname = makeStaticString("__const"),
.size = code.len,
.@"align" = align_log_2,
})).?;
@@ -3954,8 +4002,8 @@ fn getMatchingSectionAtom(self: *MachO, atom: *Atom, ty: Type, val: Value) !Matc
.sect = self.text_section_index.?,
};
},
- .Array => switch (val.tag()) {
- .bytes => {
+ .Array => {
+ if (val.tag() == .bytes) {
switch (ty.tag()) {
.array_u8_sentinel_0,
.const_slice_u8_sentinel_0,
@@ -3969,79 +4017,23 @@ fn getMatchingSectionAtom(self: *MachO, atom: *Atom, ty: Type, val: Value) !Matc
.@"align" = align_log_2,
})).?;
},
- else => {
- break :blk (try self.getMatchingSection(.{
- .segname = makeStaticString("__TEXT"),
- .sectname = makeStaticString("__const"),
- .size = code.len,
- .@"align" = align_log_2,
- })).?;
- },
- }
- },
- .array => {
- if (isElemTyPointer(ty)) {
- break :blk (try self.getMatchingSection(.{
- .segname = makeStaticString("__DATA_CONST"),
- .sectname = makeStaticString("__const"),
- .size = code.len,
- .@"align" = align_log_2,
- })).?;
- } else {
- break :blk (try self.getMatchingSection(.{
- .segname = makeStaticString("__TEXT"),
- .sectname = makeStaticString("__const"),
- .size = code.len,
- .@"align" = align_log_2,
- })).?;
+ else => {},
}
- },
- else => {
- break :blk (try self.getMatchingSection(.{
- .segname = makeStaticString("__TEXT"),
- .sectname = makeStaticString("__const"),
- .size = code.len,
- .@"align" = align_log_2,
- })).?;
- },
- },
- .Pointer => {
- if (val.castTag(.variable)) |_| {
- break :blk MatchingSection{
- .seg = self.data_segment_cmd_index.?,
- .sect = self.data_section_index.?,
- };
- } else {
- break :blk (try self.getMatchingSection(.{
- .segname = makeStaticString("__DATA_CONST"),
- .sectname = makeStaticString("__const"),
- .size = code.len,
- .@"align" = align_log_2,
- })).?;
- }
- },
- else => {
- if (val.castTag(.variable)) |_| {
- break :blk MatchingSection{
- .seg = self.data_segment_cmd_index.?,
- .sect = self.data_section_index.?,
- };
- } else {
- break :blk (try self.getMatchingSection(.{
- .segname = makeStaticString("__TEXT"),
- .sectname = makeStaticString("__const"),
- .size = code.len,
- .@"align" = align_log_2,
- })).?;
}
},
+ else => {},
}
+ break :blk (try self.getMatchingSection(.{
+ .segname = makeStaticString("__TEXT"),
+ .sectname = makeStaticString("__const"),
+ .size = code.len,
+ .@"align" = align_log_2,
+ })).?;
};
- const local = self.locals.items[atom.local_sym_index];
const seg = self.load_commands.items[match.seg].segment;
const sect = seg.sections.items[match.sect];
log.debug(" allocating atom '{s}' in '{s},{s}' ({d},{d})", .{
- self.getString(local.n_strx),
+ name,
sect.segName(),
sect.sectName(),
match.seg,
@@ -4055,13 +4047,14 @@ fn placeDecl(self: *MachO, decl: *Module.Decl, code_len: usize) !*macho.nlist_64
assert(decl.link.macho.local_sym_index != 0); // Caller forgot to call allocateDeclIndexes()
const symbol = &self.locals.items[decl.link.macho.local_sym_index];
+ const sym_name = try decl.getFullyQualifiedName(self.base.allocator);
+ defer self.base.allocator.free(sym_name);
+
const decl_ptr = self.decls.getPtr(decl).?;
if (decl_ptr.* == null) {
- decl_ptr.* = try self.getMatchingSectionAtom(&decl.link.macho, decl.ty, decl.val);
+ decl_ptr.* = try self.getMatchingSectionAtom(&decl.link.macho, sym_name, decl.ty, decl.val);
}
const match = decl_ptr.*.?;
- const sym_name = try decl.getFullyQualifiedName(self.base.allocator);
- defer self.base.allocator.free(sym_name);
if (decl.link.macho.size != 0) {
const capacity = decl.link.macho.capacity(self.*);
@@ -4071,6 +4064,7 @@ fn placeDecl(self: *MachO, decl: *Module.Decl, code_len: usize) !*macho.nlist_64
const vaddr = try self.growAtom(&decl.link.macho, code_len, required_alignment, match);
log.debug("growing {s} and moving from 0x{x} to 0x{x}", .{ sym_name, symbol.n_value, vaddr });
+ log.debug(" (required alignment 0x{x})", .{required_alignment});
if (vaddr != symbol.n_value) {
log.debug(" (writing new GOT entry)", .{});
@@ -4105,6 +4099,7 @@ fn placeDecl(self: *MachO, decl: *Module.Decl, code_len: usize) !*macho.nlist_64
const addr = try self.allocateAtom(&decl.link.macho, code_len, required_alignment, match);
log.debug("allocated atom for {s} at 0x{x}", .{ sym_name, addr });
+ log.debug(" (required alignment 0x{x})", .{required_alignment});
errdefer self.freeAtom(&decl.link.macho, match, false);
test/behavior/align.zig
@@ -7,7 +7,6 @@ var foo: u8 align(4) = 100;
test "global variable alignment" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64 and builtin.os.tag == .macos) return error.SkipZigTest;
comptime try expect(@typeInfo(@TypeOf(&foo)).Pointer.alignment == 4);
comptime try expect(@TypeOf(&foo) == *align(4) u8);
test/behavior/basic.zig
@@ -195,9 +195,6 @@ test "multiline string comments at multiple places" {
}
test "string concatenation" {
- if (builtin.zig_backend == .stage2_aarch64 and builtin.os.tag == .macos) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64 and builtin.os.tag == .macos) return error.SkipZigTest;
-
try expect(mem.eql(u8, "OK" ++ " IT " ++ "WORKED", "OK IT WORKED"));
}
test/behavior/union.zig
@@ -44,7 +44,6 @@ fn setInt(foo: *Foo, x: i32) void {
test "comptime union field access" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
comptime {
var foo = Foo{ .int = 0 };
@@ -77,14 +76,12 @@ const ExternPtrOrInt = extern union {
};
test "extern union size" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
comptime try expect(@sizeOf(ExternPtrOrInt) == 8);
}
test "0-sized extern union definition" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const U = extern union {
a: void,
@@ -115,9 +112,7 @@ const err = @as(anyerror!Agg, Agg{
const array = [_]Value{ v1, v2, v1, v2 };
test "unions embedded in aggregate types" {
- if (builtin.zig_backend == .stage2_x86_64 and builtin.os.tag == .macos) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
switch (array[1]) {
Value.Array => |arr| try expect(arr[4] == 3),
@@ -131,7 +126,6 @@ test "unions embedded in aggregate types" {
test "access a member of tagged union with conflicting enum tag name" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const Bar = union(enum) {
A: A,
@@ -176,7 +170,6 @@ const TaggedUnionWithPayload = union(enum) {
test "union alignment" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
comptime {
try expect(@alignOf(AlignTestTaggedUnion) >= @alignOf([9]u8));
@@ -276,7 +269,6 @@ fn testCastUnionToTag() !void {
test "union field access gives the enum values" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
try expect(TheUnion.A == TheTag.A);
try expect(TheUnion.B == TheTag.B);
@@ -352,7 +344,6 @@ const PackedPtrOrInt = packed union {
};
test "packed union size" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
comptime try expect(@sizeOf(PackedPtrOrInt) == 8);
}
@@ -362,7 +353,6 @@ const ZeroBits = union {
};
test "union with only 1 field which is void should be zero bits" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
comptime try expect(@sizeOf(ZeroBits) == 0);
}
@@ -422,7 +412,6 @@ test "union with only 1 field casted to its enum type" {
test "union with one member defaults to u0 tag type" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const U0 = union(enum) {
X: u32,