Commit 69abc945e4
Changed files (124)
src
arch
x86_64
codegen
test
behavior
cases
compile_errors
safety
src
src/arch/x86_64/CodeGen.zig
@@ -168141,7 +168141,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.unused,
.unused,
},
- .dst_temps = .{ .{ .cc = .b }, .unused },
+ .dst_temps = .{ .{ .cc = .be }, .unused },
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp1p, .lea(.tmp0), ._, ._ },
@@ -168165,7 +168165,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.unused,
.unused,
},
- .dst_temps = .{ .{ .cc = .b }, .unused },
+ .dst_temps = .{ .{ .cc = .be }, .unused },
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp1p, .lea(.tmp0), ._, ._ },
@@ -168189,7 +168189,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.unused,
.unused,
},
- .dst_temps = .{ .{ .cc = .b }, .unused },
+ .dst_temps = .{ .{ .cc = .be }, .unused },
.clobbers = .{ .eflags = true },
.each = .{ .once = &.{
.{ ._, ._, .lea, .tmp1p, .lea(.tmp0), ._, ._ },
src/arch/x86_64/Emit.zig
@@ -168,11 +168,12 @@ pub fn emitMir(emit: *Emit) Error!void {
else if (emit.bin_file.cast(.macho)) |macho_file|
macho_file.getZigObject().?.getOrCreateMetadataForLazySymbol(macho_file, emit.pt, lazy_sym) catch |err|
return emit.fail("{s} creating lazy symbol", .{@errorName(err)})
- else if (emit.bin_file.cast(.coff)) |coff_file| sym_index: {
- const atom = coff_file.getOrCreateAtomForLazySymbol(emit.pt, lazy_sym) catch |err|
- return emit.fail("{s} creating lazy symbol", .{@errorName(err)});
- break :sym_index coff_file.getAtom(atom).getSymbolIndex().?;
- } else if (emit.bin_file.cast(.plan9)) |p9_file|
+ else if (emit.bin_file.cast(.coff)) |coff_file|
+ if (coff_file.getOrCreateAtomForLazySymbol(emit.pt, lazy_sym)) |atom|
+ coff_file.getAtom(atom).getSymbolIndex().?
+ else |err|
+ return emit.fail("{s} creating lazy symbol", .{@errorName(err)})
+ else if (emit.bin_file.cast(.plan9)) |p9_file|
p9_file.getOrCreateAtomForLazySymbol(emit.pt, lazy_sym) catch |err|
return emit.fail("{s} creating lazy symbol", .{@errorName(err)})
else
src/codegen/aarch64/Assemble.zig
@@ -6,14 +6,19 @@ pub const Operand = union(enum) {
};
pub fn nextInstruction(as: *Assemble) !?Instruction {
- @setEvalBranchQuota(37_000);
+ @setEvalBranchQuota(42_000);
comptime var ct_token_buf: [token_buf_len]u8 = undefined;
var token_buf: [token_buf_len]u8 = undefined;
const original_source = while (true) {
const original_source = as.source;
const source_token = try as.nextToken(&token_buf, .{});
- if (source_token.len == 0) return null;
- if (source_token[0] != '\n') break original_source;
+ switch (source_token.len) {
+ 0 => return null,
+ else => switch (source_token[0]) {
+ else => break original_source,
+ '\n', ';' => {},
+ },
+ }
};
log.debug(
\\.
@@ -52,7 +57,13 @@ pub fn nextInstruction(as: *Assemble) !?Instruction {
std.zig.fmtString(source_token),
});
if (pattern_token.len == 0) {
- if (source_token.len > 0 and source_token[0] != '\n') break :next_pattern;
+ switch (source_token.len) {
+ 0 => {},
+ else => switch (source_token[0]) {
+ else => break :next_pattern,
+ '\n', ';' => {},
+ },
+ }
const encode = @field(Instruction, @tagName(instruction.encode[0]));
const Encode = @TypeOf(encode);
var args: std.meta.ArgsTuple(Encode) = undefined;
@@ -65,7 +76,7 @@ pub fn nextInstruction(as: *Assemble) !?Instruction {
const symbol = &@field(symbols, symbol_name);
symbol.* = zonCast(SymbolSpec, @field(instruction.symbols, symbol_name), .{}).parse(source_token) orelse break :next_pattern;
log.debug("{s} = {any}", .{ symbol_name, symbol.* });
- } else if (!std.ascii.eqlIgnoreCase(pattern_token, source_token)) break :next_pattern;
+ } else if (!toUpperEqlAssertUpper(source_token, pattern_token)) break :next_pattern;
}
}
log.debug("'{s}' not matched...", .{instruction.pattern});
@@ -125,6 +136,15 @@ fn zonCast(comptime Result: type, zon_value: anytype, symbols: anytype) Result {
}
}
+fn toUpperEqlAssertUpper(lhs: []const u8, rhs: []const u8) bool {
+ if (lhs.len != rhs.len) return false;
+ for (lhs, rhs) |l, r| {
+ assert(!std.ascii.isLower(r));
+ if (std.ascii.toUpper(l) != r) return false;
+ }
+ return true;
+}
+
const token_buf_len = "v31.b[15]".len;
fn nextToken(as: *Assemble, buf: *[token_buf_len]u8, comptime opts: struct {
operands: bool = false,
@@ -134,7 +154,7 @@ fn nextToken(as: *Assemble, buf: *[token_buf_len]u8, comptime opts: struct {
while (true) c: switch (as.source[0]) {
0 => return as.source[0..0],
'\t', '\n' + 1...'\r', ' ' => as.source = as.source[1..],
- '\n', '!', '#', ',', '[', ']' => {
+ '\n', '!', '#', ',', ';', '[', ']' => {
defer as.source = as.source[1..];
return as.source[0..1];
},
src/codegen/aarch64/instructions.zon
@@ -213,6 +213,63 @@
},
.encode = .{ .ands, .Xd, .Xn, .{ .shifted_register_explicit = .{ .register = .Xm, .shift = .shift, .amount = .amount } } },
},
+ // C6.2.16 ASR (register)
+ .{
+ .pattern = "ASR <Wd>, <Wn>, <Wm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ },
+ .encode = .{ .asrv, .Wd, .Wn, .Wm },
+ },
+ .{
+ .pattern = "ASR <Xd>, <Xn>, <Xm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .asrv, .Xd, .Xn, .Xm },
+ },
+ // C6.2.17 ASR (immediate)
+ .{
+ .pattern = "ASR <Wd>, <Wn>, #<shift>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .shift = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 5 } } },
+ },
+ .encode = .{ .sbfm, .Wd, .Wn, .{ .N = .word, .immr = .shift, .imms = 31 } },
+ },
+ .{
+ .pattern = "ASR <Xd>, <Xn>, #<shift>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .shift = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 6 } } },
+ },
+ .encode = .{ .sbfm, .Xd, .Xn, .{ .N = .doubleword, .immr = .shift, .imms = 63 } },
+ },
+ // C6.2.18 ASRV
+ .{
+ .pattern = "ASRV <Wd>, <Wn>, <Wm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ },
+ .encode = .{ .asrv, .Wd, .Wn, .Wm },
+ },
+ .{
+ .pattern = "ASRV <Xd>, <Xn>, <Xm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .asrv, .Xd, .Xn, .Xm },
+ },
// C6.2.35 BLR
.{
.pattern = "BLR <Xn>",
@@ -681,6 +738,82 @@
},
.encode = .{ .ldr, .Xt, .{ .unsigned_offset = .{ .base = .Xn, .offset = .pimm } } },
},
+ // C6.2.212 LSL (register)
+ .{
+ .pattern = "LSL <Wd>, <Wn>, <Wm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ },
+ .encode = .{ .lslv, .Wd, .Wn, .Wm },
+ },
+ .{
+ .pattern = "LSL <Xd>, <Xn>, <Xm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .lslv, .Xd, .Xn, .Xm },
+ },
+ // C6.2.214 LSLV
+ .{
+ .pattern = "LSLV <Wd>, <Wn>, <Wm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ },
+ .encode = .{ .lslv, .Wd, .Wn, .Wm },
+ },
+ .{
+ .pattern = "LSLV <Xd>, <Xn>, <Xm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .lslv, .Xd, .Xn, .Xm },
+ },
+ // C6.2.215 LSR (register)
+ .{
+ .pattern = "LSR <Wd>, <Wn>, <Wm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ },
+ .encode = .{ .lsrv, .Wd, .Wn, .Wm },
+ },
+ .{
+ .pattern = "LSR <Xd>, <Xn>, <Xm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .lsrv, .Xd, .Xn, .Xm },
+ },
+ // C6.2.217 LSRV
+ .{
+ .pattern = "LSRV <Wd>, <Wn>, <Wm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ },
+ .encode = .{ .lsrv, .Wd, .Wn, .Wm },
+ },
+ .{
+ .pattern = "LSRV <Xd>, <Xn>, <Xm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .lsrv, .Xd, .Xn, .Xm },
+ },
// C6.2.220 MOV (to/from SP)
.{
.pattern = "MOV WSP, <Wn|WSP>",
@@ -964,6 +1097,63 @@
},
.encode = .{ .ret, .Xn },
},
+ // C6.2.261 ROR (immediate)
+ .{
+ .pattern = "ROR <Wd>, <Ws>, #<shift>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Ws = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .shift = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 5 } } },
+ },
+ .encode = .{ .extr, .Wd, .Ws, .Ws, .shift },
+ },
+ .{
+ .pattern = "ROR <Xd>, <Xs>, #<shift>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xs = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .shift = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 6 } } },
+ },
+ .encode = .{ .extr, .Xd, .Xs, .Xs, .shift },
+ },
+ // C6.2.262 ROR (register)
+ .{
+ .pattern = "ROR <Wd>, <Wn>, <Wm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ },
+ .encode = .{ .rorv, .Wd, .Wn, .Wm },
+ },
+ .{
+ .pattern = "ROR <Xd>, <Xn>, <Xm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .rorv, .Xd, .Xn, .Xm },
+ },
+ // C6.2.263 RORV
+ .{
+ .pattern = "RORV <Wd>, <Wn>, <Wm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ },
+ .encode = .{ .rorv, .Wd, .Wn, .Wm },
+ },
+ .{
+ .pattern = "RORV <Xd>, <Xn>, <Xm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .rorv, .Xd, .Xn, .Xm },
+ },
// C6.2.268 SBFM
.{
.pattern = "SBFM <Wd>, <Wn>, #<immr>, #<imms>",
src/codegen/aarch64/Mir.zig
@@ -4,6 +4,7 @@ epilogue: []const Instruction,
literals: []const u32,
nav_relocs: []const Reloc.Nav,
uav_relocs: []const Reloc.Uav,
+lazy_relocs: []const Reloc.Lazy,
global_relocs: []const Reloc.Global,
literal_relocs: []const Reloc.Literal,
@@ -21,8 +22,13 @@ pub const Reloc = struct {
reloc: Reloc,
};
+ pub const Lazy = struct {
+ symbol: link.File.LazySymbol,
+ reloc: Reloc,
+ };
+
pub const Global = struct {
- global: [*:0]const u8,
+ name: [*:0]const u8,
reloc: Reloc,
};
@@ -38,6 +44,7 @@ pub fn deinit(mir: *Mir, gpa: std.mem.Allocator) void {
gpa.free(mir.literals);
gpa.free(mir.nav_relocs);
gpa.free(mir.uav_relocs);
+ gpa.free(mir.lazy_relocs);
gpa.free(mir.global_relocs);
gpa.free(mir.literal_relocs);
mir.* = undefined;
@@ -119,16 +126,37 @@ pub fn emit(
body_end - Instruction.size * (1 + uav_reloc.reloc.label),
uav_reloc.reloc.addend,
);
+ for (mir.lazy_relocs) |lazy_reloc| try emitReloc(
+ lf,
+ zcu,
+ func.owner_nav,
+ if (lf.cast(.elf)) |ef|
+ ef.zigObjectPtr().?.getOrCreateMetadataForLazySymbol(ef, pt, lazy_reloc.symbol) catch |err|
+ return zcu.codegenFail(func.owner_nav, "{s} creating lazy symbol", .{@errorName(err)})
+ else if (lf.cast(.macho)) |mf|
+ mf.getZigObject().?.getOrCreateMetadataForLazySymbol(mf, pt, lazy_reloc.symbol) catch |err|
+ return zcu.codegenFail(func.owner_nav, "{s} creating lazy symbol", .{@errorName(err)})
+ else if (lf.cast(.coff)) |cf|
+ if (cf.getOrCreateAtomForLazySymbol(pt, lazy_reloc.symbol)) |atom|
+ cf.getAtom(atom).getSymbolIndex().?
+ else |err|
+ return zcu.codegenFail(func.owner_nav, "{s} creating lazy symbol", .{@errorName(err)})
+ else
+ return zcu.codegenFail(func.owner_nav, "external symbols unimplemented for {s}", .{@tagName(lf.tag)}),
+ mir.body[lazy_reloc.reloc.label],
+ body_end - Instruction.size * (1 + lazy_reloc.reloc.label),
+ lazy_reloc.reloc.addend,
+ );
for (mir.global_relocs) |global_reloc| try emitReloc(
lf,
zcu,
func.owner_nav,
if (lf.cast(.elf)) |ef|
- try ef.getGlobalSymbol(std.mem.span(global_reloc.global), null)
+ try ef.getGlobalSymbol(std.mem.span(global_reloc.name), null)
else if (lf.cast(.macho)) |mf|
- try mf.getGlobalSymbol(std.mem.span(global_reloc.global), null)
+ try mf.getGlobalSymbol(std.mem.span(global_reloc.name), null)
else if (lf.cast(.coff)) |cf|
- try cf.getGlobalSymbol(std.mem.span(global_reloc.global), "compiler_rt")
+ try cf.getGlobalSymbol(std.mem.span(global_reloc.name), "compiler_rt")
else
return zcu.codegenFail(func.owner_nav, "external symbols unimplemented for {s}", .{@tagName(lf.tag)}),
mir.body[global_reloc.reloc.label],
@@ -172,35 +200,6 @@ fn emitReloc(
const gpa = zcu.gpa;
switch (instruction.decode()) {
else => unreachable,
- .branch_exception_generating_system => |decoded| if (lf.cast(.elf)) |ef| {
- const zo = ef.zigObjectPtr().?;
- const atom = zo.symbol(try zo.getOrCreateMetadataForNav(zcu, owner_nav)).atom(ef).?;
- const r_type: std.elf.R_AARCH64 = switch (decoded.decode().unconditional_branch_immediate.group.op) {
- .b => .JUMP26,
- .bl => .CALL26,
- };
- try atom.addReloc(gpa, .{
- .r_offset = offset,
- .r_info = @as(u64, sym_index) << 32 | @intFromEnum(r_type),
- .r_addend = @bitCast(addend),
- }, zo);
- } else if (lf.cast(.macho)) |mf| {
- const zo = mf.getZigObject().?;
- const atom = zo.symbols.items[try zo.getOrCreateMetadataForNav(mf, owner_nav)].getAtom(mf).?;
- try atom.addReloc(mf, .{
- .tag = .@"extern",
- .offset = offset,
- .target = sym_index,
- .addend = @bitCast(addend),
- .type = .branch,
- .meta = .{
- .pcrel = true,
- .has_subtractor = false,
- .length = 2,
- .symbolnum = @intCast(sym_index),
- },
- });
- },
.data_processing_immediate => |decoded| if (lf.cast(.elf)) |ef| {
const zo = ef.zigObjectPtr().?;
const atom = zo.symbol(try zo.getOrCreateMetadataForNav(zcu, owner_nav)).atom(ef).?;
@@ -259,6 +258,80 @@ fn emitReloc(
},
}
},
+ .branch_exception_generating_system => |decoded| if (lf.cast(.elf)) |ef| {
+ const zo = ef.zigObjectPtr().?;
+ const atom = zo.symbol(try zo.getOrCreateMetadataForNav(zcu, owner_nav)).atom(ef).?;
+ const r_type: std.elf.R_AARCH64 = switch (decoded.decode().unconditional_branch_immediate.group.op) {
+ .b => .JUMP26,
+ .bl => .CALL26,
+ };
+ try atom.addReloc(gpa, .{
+ .r_offset = offset,
+ .r_info = @as(u64, sym_index) << 32 | @intFromEnum(r_type),
+ .r_addend = @bitCast(addend),
+ }, zo);
+ } else if (lf.cast(.macho)) |mf| {
+ const zo = mf.getZigObject().?;
+ const atom = zo.symbols.items[try zo.getOrCreateMetadataForNav(mf, owner_nav)].getAtom(mf).?;
+ try atom.addReloc(mf, .{
+ .tag = .@"extern",
+ .offset = offset,
+ .target = sym_index,
+ .addend = @bitCast(addend),
+ .type = .branch,
+ .meta = .{
+ .pcrel = true,
+ .has_subtractor = false,
+ .length = 2,
+ .symbolnum = @intCast(sym_index),
+ },
+ });
+ },
+ .load_store => |decoded| if (lf.cast(.elf)) |ef| {
+ const zo = ef.zigObjectPtr().?;
+ const atom = zo.symbol(try zo.getOrCreateMetadataForNav(zcu, owner_nav)).atom(ef).?;
+ const r_type: std.elf.R_AARCH64 = switch (decoded.decode().register_unsigned_immediate.decode()) {
+ .integer => |integer| switch (integer.decode()) {
+ .unallocated, .prfm => unreachable,
+ .strb, .ldrb, .ldrsb => .LDST8_ABS_LO12_NC,
+ .strh, .ldrh, .ldrsh => .LDST16_ABS_LO12_NC,
+ .ldrsw => .LDST32_ABS_LO12_NC,
+ inline .str, .ldr => |encoded| switch (encoded.sf) {
+ .word => .LDST32_ABS_LO12_NC,
+ .doubleword => .LDST64_ABS_LO12_NC,
+ },
+ },
+ .vector => |vector| switch (vector.group.opc1.decode(vector.group.size)) {
+ .byte => .LDST8_ABS_LO12_NC,
+ .half => .LDST16_ABS_LO12_NC,
+ .single => .LDST32_ABS_LO12_NC,
+ .double => .LDST64_ABS_LO12_NC,
+ .quad => .LDST128_ABS_LO12_NC,
+ .scalable, .predicate => unreachable,
+ },
+ };
+ try atom.addReloc(gpa, .{
+ .r_offset = offset,
+ .r_info = @as(u64, sym_index) << 32 | @intFromEnum(r_type),
+ .r_addend = @bitCast(addend),
+ }, zo);
+ } else if (lf.cast(.macho)) |mf| {
+ const zo = mf.getZigObject().?;
+ const atom = zo.symbols.items[try zo.getOrCreateMetadataForNav(mf, owner_nav)].getAtom(mf).?;
+ try atom.addReloc(mf, .{
+ .tag = .@"extern",
+ .offset = offset,
+ .target = sym_index,
+ .addend = @bitCast(addend),
+ .type = .pageoff,
+ .meta = .{
+ .pcrel = false,
+ .has_subtractor = false,
+ .length = 2,
+ .symbolnum = @intCast(sym_index),
+ },
+ });
+ },
}
}
src/codegen/aarch64/Select.zig
@@ -22,6 +22,7 @@ instructions: std.ArrayListUnmanaged(codegen.aarch64.encoding.Instruction),
literals: std.ArrayListUnmanaged(u32),
nav_relocs: std.ArrayListUnmanaged(codegen.aarch64.Mir.Reloc.Nav),
uav_relocs: std.ArrayListUnmanaged(codegen.aarch64.Mir.Reloc.Uav),
+lazy_relocs: std.ArrayListUnmanaged(codegen.aarch64.Mir.Reloc.Lazy),
global_relocs: std.ArrayListUnmanaged(codegen.aarch64.Mir.Reloc.Global),
literal_relocs: std.ArrayListUnmanaged(codegen.aarch64.Mir.Reloc.Literal),
@@ -50,11 +51,11 @@ pub const Block = struct {
std.math.maxInt(@typeInfo(Air.Inst.Index).@"enum".tag_type),
);
- fn branch(block: *const Block, isel: *Select) !void {
- if (isel.instructions.items.len > block.target_label) {
- try isel.emit(.b(@intCast((isel.instructions.items.len + 1 - block.target_label) << 2)));
+ fn branch(target_block: *const Block, isel: *Select) !void {
+ if (isel.instructions.items.len > target_block.target_label) {
+ try isel.emit(.b(@intCast((isel.instructions.items.len + 1 - target_block.target_label) << 2)));
}
- try isel.merge(&block.live_registers, .{});
+ try isel.merge(&target_block.live_registers, .{});
}
};
@@ -84,12 +85,12 @@ pub const Loop = struct {
pub const empty_list: u32 = std.math.maxInt(u32);
- fn branch(loop: *Loop, isel: *Select) !void {
+ fn branch(target_loop: *Loop, isel: *Select) !void {
try isel.instructions.ensureUnusedCapacity(isel.pt.zcu.gpa, 1);
- const repeat_list_tail = loop.repeat_list;
- loop.repeat_list = @intCast(isel.instructions.items.len);
+ const repeat_list_tail = target_loop.repeat_list;
+ target_loop.repeat_list = @intCast(isel.instructions.items.len);
isel.instructions.appendAssumeCapacity(@bitCast(repeat_list_tail));
- try isel.merge(&loop.live_registers, .{});
+ try isel.merge(&target_loop.live_registers, .{});
}
};
@@ -108,6 +109,7 @@ pub fn deinit(isel: *Select) void {
isel.literals.deinit(gpa);
isel.nav_relocs.deinit(gpa);
isel.uav_relocs.deinit(gpa);
+ isel.lazy_relocs.deinit(gpa);
isel.global_relocs.deinit(gpa);
isel.literal_relocs.deinit(gpa);
@@ -864,7 +866,7 @@ pub fn finishAnalysis(isel: *Select) !void {
}
}
-pub fn body(isel: *Select, air_body: []const Air.Inst.Index) !void {
+pub fn body(isel: *Select, air_body: []const Air.Inst.Index) error{ OutOfMemory, CodegenFail }!void {
const zcu = isel.pt.zcu;
const ip = &zcu.intern_pool;
const gpa = zcu.gpa;
@@ -946,7 +948,7 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) !void {
}
if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
},
- .add, .add_optimized, .add_wrap, .sub, .sub_optimized, .sub_wrap => |air_tag| {
+ .add, .add_safe, .add_optimized, .add_wrap, .sub, .sub_safe, .sub_optimized, .sub_wrap => |air_tag| {
if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| unused: {
defer res_vi.value.deref(isel);
@@ -954,13 +956,16 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) !void {
const ty = isel.air.typeOf(bin_op.lhs, ip);
if (!ty.isRuntimeFloat()) try res_vi.value.addOrSubtract(isel, ty, try isel.use(bin_op.lhs), switch (air_tag) {
else => unreachable,
- .add, .add_wrap => .add,
- .sub, .sub_wrap => .sub,
- }, try isel.use(bin_op.rhs), .{ .wrap = switch (air_tag) {
- else => unreachable,
- .add, .sub => false,
- .add_wrap, .sub_wrap => true,
- } }) else switch (ty.floatBits(isel.target)) {
+ .add, .add_safe, .add_wrap => .add,
+ .sub, .sub_safe, .sub_wrap => .sub,
+ }, try isel.use(bin_op.rhs), .{
+ .overflow = switch (air_tag) {
+ else => unreachable,
+ .add, .sub => .@"unreachable",
+ .add_safe, .sub_safe => .{ .panic = .integer_overflow },
+ .add_wrap, .sub_wrap => .wrap,
+ },
+ }) else switch (ty.floatBits(isel.target)) {
else => unreachable,
16, 32, 64 => |bits| {
const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
@@ -1021,7 +1026,7 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) !void {
try call.prepareCallee(isel);
try isel.global_relocs.append(gpa, .{
- .global = switch (air_tag) {
+ .name = switch (air_tag) {
else => unreachable,
.add, .add_optimized => switch (bits) {
else => unreachable,
@@ -1336,7 +1341,7 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) !void {
try call.prepareCallee(isel);
try isel.global_relocs.append(gpa, .{
- .global = switch (bits) {
+ .name = switch (bits) {
else => unreachable,
16 => "__mulhf3",
32 => "__mulsf3",
@@ -1379,6 +1384,143 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) !void {
}
if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
},
+ .mul_safe => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| unused: {
+ defer res_vi.value.deref(isel);
+
+ const bin_op = air.data(air.inst_index).bin_op;
+ const ty = isel.air.typeOf(bin_op.lhs, ip);
+ if (!ty.isAbiInt(zcu)) return isel.fail("bad {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+ const int_info = ty.intInfo(zcu);
+ switch (int_info.signedness) {
+ .signed => switch (int_info.bits) {
+ 0 => unreachable,
+ 1 => {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ try isel.emit(.orr(res_ra.w(), lhs_mat.ra.w(), .{ .register = rhs_mat.ra.w() }));
+ const skip_label = isel.instructions.items.len;
+ try isel.emitPanic(.integer_overflow);
+ try isel.emit(.@"b."(
+ .invert(.ne),
+ @intCast((isel.instructions.items.len + 1 - skip_label) << 2),
+ ));
+ try isel.emit(.ands(.wzr, lhs_mat.ra.w(), .{ .register = rhs_mat.ra.w() }));
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ },
+ else => return isel.fail("too big {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) }),
+ },
+ .unsigned => switch (int_info.bits) {
+ 0 => unreachable,
+ 1 => {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ try isel.emit(.@"and"(res_ra.w(), lhs_mat.ra.w(), .{ .register = rhs_mat.ra.w() }));
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ },
+ 2...16 => |bits| {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ const skip_label = isel.instructions.items.len;
+ try isel.emitPanic(.integer_overflow);
+ try isel.emit(.@"b."(
+ .eq,
+ @intCast((isel.instructions.items.len + 1 - skip_label) << 2),
+ ));
+ try isel.emit(.ands(.wzr, res_ra.w(), .{ .immediate = .{
+ .N = .word,
+ .immr = @intCast(32 - bits),
+ .imms = @intCast(32 - bits - 1),
+ } }));
+ try isel.emit(.madd(res_ra.w(), lhs_mat.ra.w(), rhs_mat.ra.w(), .wzr));
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ },
+ 17...32 => |bits| {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ const skip_label = isel.instructions.items.len;
+ try isel.emitPanic(.integer_overflow);
+ try isel.emit(.@"b."(
+ .eq,
+ @intCast((isel.instructions.items.len + 1 - skip_label) << 2),
+ ));
+ try isel.emit(.ands(.xzr, res_ra.x(), .{ .immediate = .{
+ .N = .doubleword,
+ .immr = @intCast(64 - bits),
+ .imms = @intCast(64 - bits - 1),
+ } }));
+ try isel.emit(.umaddl(res_ra.x(), lhs_mat.ra.w(), rhs_mat.ra.w(), .xzr));
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ },
+ 33...63 => |bits| {
+ const lo64_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ const hi64_ra = hi64_ra: {
+ const lo64_lock = isel.tryLockReg(lo64_ra);
+ defer lo64_lock.unlock(isel);
+ break :hi64_ra try isel.allocIntReg();
+ };
+ defer isel.freeReg(hi64_ra);
+ const skip_label = isel.instructions.items.len;
+ try isel.emitPanic(.integer_overflow);
+ try isel.emit(.cbz(
+ hi64_ra.x(),
+ @intCast((isel.instructions.items.len + 1 - skip_label) << 2),
+ ));
+ try isel.emit(.orr(hi64_ra.x(), hi64_ra.x(), .{ .shifted_register = .{
+ .register = lo64_ra.x(),
+ .shift = .{ .lsr = @intCast(bits) },
+ } }));
+ try isel.emit(.madd(lo64_ra.x(), lhs_mat.ra.x(), rhs_mat.ra.x(), .xzr));
+ try isel.emit(.umulh(hi64_ra.x(), lhs_mat.ra.x(), rhs_mat.ra.x()));
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ },
+ 64 => {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ try isel.emit(.madd(res_ra.x(), lhs_mat.ra.x(), rhs_mat.ra.x(), .xzr));
+ const hi64_ra = try isel.allocIntReg();
+ defer isel.freeReg(hi64_ra);
+ const skip_label = isel.instructions.items.len;
+ try isel.emitPanic(.integer_overflow);
+ try isel.emit(.cbz(
+ hi64_ra.x(),
+ @intCast((isel.instructions.items.len + 1 - skip_label) << 2),
+ ));
+ try isel.emit(.umulh(hi64_ra.x(), lhs_mat.ra.x(), rhs_mat.ra.x()));
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ },
+ 65...128 => return isel.fail("bad {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) }),
+ else => return isel.fail("too big {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) }),
+ },
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
.mul_sat => |air_tag| {
if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| unused: {
defer res_vi.value.deref(isel);
@@ -1674,7 +1816,7 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) !void {
try call.prepareCallee(isel);
try isel.global_relocs.append(gpa, .{
- .global = switch (bits) {
+ .name = switch (bits) {
else => unreachable,
16 => "__divhf3",
32 => "__divsf3",
@@ -1813,7 +1955,7 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) !void {
try call.prepareCallee(isel);
try isel.global_relocs.append(gpa, .{
- .global = switch (int_info.signedness) {
+ .name = switch (int_info.signedness) {
.signed => "__divti3",
.unsigned => "__udivti3",
},
@@ -1917,7 +2059,7 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) !void {
else => unreachable,
.div_trunc, .div_trunc_optimized => {
try isel.global_relocs.append(gpa, .{
- .global = switch (bits) {
+ .name = switch (bits) {
else => unreachable,
16 => "__trunch",
32 => "truncf",
@@ -1931,7 +2073,7 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) !void {
},
.div_floor, .div_floor_optimized => {
try isel.global_relocs.append(gpa, .{
- .global = switch (bits) {
+ .name = switch (bits) {
else => unreachable,
16 => "__floorh",
32 => "floorf",
@@ -1946,7 +2088,7 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) !void {
.div_exact, .div_exact_optimized => {},
}
try isel.global_relocs.append(gpa, .{
- .global = switch (bits) {
+ .name = switch (bits) {
else => unreachable,
16 => "__divhf3",
32 => "__divsf3",
@@ -2046,7 +2188,7 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) !void {
try call.prepareCallee(isel);
try isel.global_relocs.append(gpa, .{
- .global = switch (bits) {
+ .name = switch (bits) {
else => unreachable,
16 => "__fmodh",
32 => "fmodf",
@@ -2212,7 +2354,7 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) !void {
try call.prepareCallee(isel);
try isel.global_relocs.append(gpa, .{
- .global = switch (air_tag) {
+ .name = switch (air_tag) {
else => unreachable,
.max => switch (bits) {
else => unreachable,
@@ -2284,7 +2426,9 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) !void {
else => unreachable,
.add_with_overflow => .add,
.sub_with_overflow => .sub,
- }, rhs_vi, .{ .wrap = true, .overflow_ra = try overflow_vi.?.defReg(isel) orelse .zr });
+ }, rhs_vi, .{
+ .overflow = if (try overflow_vi.?.defReg(isel)) |overflow_ra| .{ .ra = overflow_ra } else .wrap,
+ });
}
if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
},
@@ -3092,7 +3236,7 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) !void {
try call.prepareCallee(isel);
try isel.global_relocs.append(gpa, .{
- .global = "memcpy",
+ .name = "memcpy",
.reloc = .{ .label = @intCast(isel.instructions.items.len) },
});
try isel.emit(.bl(0));
@@ -3119,7 +3263,7 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) !void {
try call.prepareCallee(isel);
try isel.global_relocs.append(gpa, .{
- .global = "memcpy",
+ .name = "memcpy",
.reloc = .{ .label = @intCast(isel.instructions.items.len) },
});
try isel.emit(.bl(0));
@@ -3139,19 +3283,9 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) !void {
.block => {
const ty_pl = air.data(air.inst_index).ty_pl;
const extra = isel.air.extraData(Air.Block, ty_pl.payload);
-
- if (ty_pl.ty != .noreturn_type) {
- isel.blocks.putAssumeCapacityNoClobber(air.inst_index, .{
- .live_registers = isel.live_registers,
- .target_label = @intCast(isel.instructions.items.len),
- });
- }
- try isel.body(@ptrCast(isel.air.extra.items[extra.end..][0..extra.data.body_len]));
- if (ty_pl.ty != .noreturn_type) {
- const block_entry = isel.blocks.pop().?;
- assert(block_entry.key == air.inst_index);
- if (isel.live_values.fetchRemove(air.inst_index)) |result_vi| result_vi.value.deref(isel);
- }
+ try isel.block(air.inst_index, ty_pl.ty.toType(), @ptrCast(
+ isel.air.extra.items[extra.end..][0..extra.data.body_len],
+ ));
if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
},
.loop => {
@@ -3175,11 +3309,11 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) !void {
}
// IT'S DOM TIME!!!
- for (isel.blocks.values(), 0..) |*block, dom_index| {
+ for (isel.blocks.values(), 0..) |*dom_block, dom_index| {
if (@as(u1, @truncate(isel.dom.items[
loop.dom + dom_index / @bitSizeOf(DomInt)
] >> @truncate(dom_index))) == 0) continue;
- var live_reg_it = block.live_registers.iterator();
+ var live_reg_it = dom_block.live_registers.iterator();
while (live_reg_it.next()) |live_reg_entry| switch (live_reg_entry.value.*) {
_ => |live_vi| try live_vi.mat(isel),
.allocating => unreachable,
@@ -3211,8 +3345,7 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) !void {
},
.br => {
const br = air.data(air.inst_index).br;
- const block = isel.blocks.getPtr(br.block_inst).?;
- try block.branch(isel);
+ try isel.blocks.getPtr(br.block_inst).?.branch(isel);
if (isel.live_values.get(br.block_inst)) |dst_vi| try dst_vi.move(isel, br.operand);
if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
},
@@ -3224,6 +3357,22 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) !void {
try isel.emit(.brk(0xf000));
if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
},
+ .ret_addr => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |addr_vi| unused: {
+ defer addr_vi.value.deref(isel);
+ const addr_ra = try addr_vi.value.defReg(isel) orelse break :unused;
+ try isel.emit(.ldr(addr_ra.x(), .{ .unsigned_offset = .{ .base = .fp, .offset = 8 } }));
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .frame_addr => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |addr_vi| unused: {
+ defer addr_vi.value.deref(isel);
+ const addr_ra = try addr_vi.value.defReg(isel) orelse break :unused;
+ try isel.emit(.orr(addr_ra.x(), .xzr, .{ .register = .fp }));
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
.call => {
const pl_op = air.data(air.inst_index).pl_op;
const extra = isel.air.extraData(Air.Call, pl_op.payload);
@@ -3312,7 +3461,7 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) !void {
var param_part_it = passed_vi.parts(isel);
var arg_part_it = arg_vi.parts(isel);
if (arg_part_it.only()) |_| {
- try isel.values.ensureUnusedCapacity(isel.pt.zcu.gpa, param_part_it.remaining);
+ try isel.values.ensureUnusedCapacity(gpa, param_part_it.remaining);
arg_vi.setParts(isel, param_part_it.remaining);
while (param_part_it.next()) |param_part_vi| _ = arg_vi.addPart(
isel,
@@ -3659,7 +3808,7 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) !void {
try call.prepareCallee(isel);
try isel.global_relocs.append(gpa, .{
- .global = switch (air_tag) {
+ .name = switch (air_tag) {
else => unreachable,
.sqrt => switch (bits) {
else => unreachable,
@@ -3751,7 +3900,7 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) !void {
try call.prepareCallee(isel);
try isel.global_relocs.append(gpa, .{
- .global = switch (air_tag) {
+ .name = switch (air_tag) {
else => unreachable,
.sin => switch (bits) {
else => unreachable,
@@ -4239,7 +4388,7 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) !void {
try call.prepareCallee(isel);
try isel.global_relocs.append(gpa, .{
- .global = switch (bits) {
+ .name = switch (bits) {
else => unreachable,
16 => "__cmphf2",
32 => "__cmpsf2",
@@ -4629,6 +4778,14 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) !void {
try isel.emit(.nop());
if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
},
+ .dbg_inline_block => {
+ const ty_pl = air.data(air.inst_index).ty_pl;
+ const extra = isel.air.extraData(Air.DbgInlineBlock, ty_pl.payload);
+ try isel.block(air.inst_index, ty_pl.ty.toType(), @ptrCast(
+ isel.air.extra.items[extra.end..][0..extra.data.body_len],
+ ));
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
.dbg_var_ptr, .dbg_var_val, .dbg_arg_inline => {
if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
},
@@ -4724,7 +4881,7 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) !void {
try call.prepareCallee(isel);
try isel.global_relocs.append(gpa, .{
- .global = "memcpy",
+ .name = "memcpy",
.reloc = .{ .label = @intCast(isel.instructions.items.len) },
});
try isel.emit(.bl(0));
@@ -4816,10 +4973,8 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) !void {
const ptr_ty = isel.air.typeOf(bin_op.lhs, ip);
const ptr_info = ptr_ty.ptrInfo(zcu);
if (ptr_info.packed_offset.host_size > 0) return isel.fail("packed store", .{});
- if (bin_op.rhs.toInterned()) |rhs_val| if (ip.isUndef(rhs_val)) {
- if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
- break :air_tag;
- };
+ if (bin_op.rhs.toInterned()) |rhs_val| if (ip.isUndef(rhs_val))
+ break :air_tag if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
const src_vi = try isel.use(bin_op.rhs);
const size = src_vi.size(isel);
@@ -4833,8 +4988,7 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) !void {
});
try ptr_mat.finish(isel);
- if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
- break :air_tag;
+ break :air_tag if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
},
else => {},
};
@@ -4843,7 +4997,7 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) !void {
try call.prepareCallee(isel);
try isel.global_relocs.append(gpa, .{
- .global = "memcpy",
+ .name = "memcpy",
.reloc = .{ .label = @intCast(isel.instructions.items.len) },
});
try isel.emit(.bl(0));
@@ -4906,7 +5060,7 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) !void {
try call.prepareCallee(isel);
try isel.global_relocs.append(gpa, .{
- .global = switch (dst_bits) {
+ .name = switch (dst_bits) {
else => unreachable,
16 => switch (src_bits) {
else => unreachable,
@@ -5060,6 +5214,108 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) !void {
}
if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
},
+ .intcast_safe => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |dst_vi| unused: {
+ defer dst_vi.value.deref(isel);
+
+ const ty_op = air.data(air.inst_index).ty_op;
+ const dst_ty = ty_op.ty.toType();
+ const dst_int_info = dst_ty.intInfo(zcu);
+ const src_ty = isel.air.typeOf(ty_op.operand, ip);
+ const src_int_info = src_ty.intInfo(zcu);
+ const can_be_negative = dst_int_info.signedness == .signed and
+ src_int_info.signedness == .signed;
+ const panic_id: Zcu.SimplePanicId = panic_id: switch (dst_ty.zigTypeTag(zcu)) {
+ else => unreachable,
+ .int => .integer_out_of_bounds,
+ .@"enum" => {
+ if (!dst_ty.isNonexhaustiveEnum(zcu)) {
+ return isel.fail("bad {s} {f} {f}", .{ @tagName(air_tag), isel.fmtType(dst_ty), isel.fmtType(src_ty) });
+ }
+ break :panic_id .invalid_enum_value;
+ },
+ };
+ if (dst_ty.toIntern() == src_ty.toIntern()) {
+ try dst_vi.value.move(isel, ty_op.operand);
+ } else if (dst_int_info.bits <= 64 and src_int_info.bits <= 64) {
+ const dst_ra = try dst_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const dst_active_bits = dst_int_info.bits - @intFromBool(dst_int_info.signedness == .signed);
+ const src_active_bits = src_int_info.bits - @intFromBool(src_int_info.signedness == .signed);
+ if ((dst_int_info.signedness != .unsigned or src_int_info.signedness != .signed) and dst_active_bits >= src_active_bits) {
+ const src_mat = try src_vi.matReg(isel);
+ try isel.emit(if (can_be_negative and dst_active_bits > 32 and src_active_bits <= 32)
+ .sbfm(dst_ra.x(), src_mat.ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(src_int_info.bits - 1),
+ })
+ else switch (src_int_info.bits) {
+ else => unreachable,
+ 1...32 => .orr(dst_ra.w(), .wzr, .{ .register = src_mat.ra.w() }),
+ 33...64 => .orr(dst_ra.x(), .xzr, .{ .register = src_mat.ra.x() }),
+ });
+ try src_mat.finish(isel);
+ } else {
+ const skip_label = isel.instructions.items.len;
+ try isel.emitPanic(panic_id);
+ try isel.emit(.@"b."(
+ .eq,
+ @intCast((isel.instructions.items.len + 1 - skip_label) << 2),
+ ));
+ if (can_be_negative) {
+ const src_mat = src_mat: {
+ const dst_lock = isel.lockReg(dst_ra);
+ defer dst_lock.unlock(isel);
+ break :src_mat try src_vi.matReg(isel);
+ };
+ try isel.emit(switch (src_int_info.bits) {
+ else => unreachable,
+ 1...32 => .subs(.wzr, dst_ra.w(), .{ .register = src_mat.ra.w() }),
+ 33...64 => .subs(.xzr, dst_ra.x(), .{ .register = src_mat.ra.x() }),
+ });
+ try isel.emit(switch (@max(dst_int_info.bits, src_int_info.bits)) {
+ else => unreachable,
+ 1...32 => .sbfm(dst_ra.w(), src_mat.ra.w(), .{
+ .N = .word,
+ .immr = 0,
+ .imms = @intCast(dst_int_info.bits - 1),
+ }),
+ 33...64 => .sbfm(dst_ra.x(), src_mat.ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(dst_int_info.bits - 1),
+ }),
+ });
+ try src_mat.finish(isel);
+ } else {
+ const src_mat = try src_vi.matReg(isel);
+ try isel.emit(switch (@min(dst_int_info.bits, src_int_info.bits)) {
+ else => unreachable,
+ 1...32 => .orr(dst_ra.w(), .wzr, .{ .register = src_mat.ra.w() }),
+ 33...64 => .orr(dst_ra.x(), .xzr, .{ .register = src_mat.ra.x() }),
+ });
+ const active_bits = @min(dst_active_bits, src_active_bits);
+ try isel.emit(switch (src_int_info.bits) {
+ else => unreachable,
+ 1...32 => .ands(.wzr, src_mat.ra.w(), .{ .immediate = .{
+ .N = .word,
+ .immr = @intCast(32 - active_bits),
+ .imms = @intCast(32 - active_bits - 1),
+ } }),
+ 33...64 => .ands(.xzr, src_mat.ra.x(), .{ .immediate = .{
+ .N = .doubleword,
+ .immr = @intCast(64 - active_bits),
+ .imms = @intCast(64 - active_bits - 1),
+ } }),
+ });
+ try src_mat.finish(isel);
+ }
+ }
+ } else return isel.fail("too big {s} {f} {f}", .{ @tagName(air_tag), isel.fmtType(dst_ty), isel.fmtType(src_ty) });
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
.trunc => |air_tag| {
if (isel.live_values.fetchRemove(air.inst_index)) |dst_vi| unused: {
defer dst_vi.value.deref(isel);
@@ -5832,7 +6088,7 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) !void {
try call.prepareCallee(isel);
try isel.global_relocs.append(gpa, .{
- .global = switch (dst_int_info.bits) {
+ .name = switch (dst_int_info.bits) {
else => unreachable,
1...32 => switch (dst_int_info.signedness) {
.signed => switch (src_bits) {
@@ -5972,7 +6228,7 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) !void {
try call.prepareCallee(isel);
try isel.global_relocs.append(gpa, .{
- .global = switch (src_int_info.bits) {
+ .name = switch (src_int_info.bits) {
else => unreachable,
1...32 => switch (src_int_info.signedness) {
.signed => switch (dst_bits) {
@@ -6055,14 +6311,20 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) !void {
}
if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
},
- .memset => |air_tag| {
+ .memset, .memset_safe => |air_tag| {
const bin_op = air.data(air.inst_index).bin_op;
const dst_ty = isel.air.typeOf(bin_op.lhs, ip);
const dst_info = dst_ty.ptrInfo(zcu);
const fill_byte: union(enum) { constant: u8, value: Air.Inst.Ref } = fill_byte: {
- if (bin_op.rhs.toInterned()) |fill_val|
+ if (bin_op.rhs.toInterned()) |fill_val| {
+ if (ip.isUndef(fill_val)) switch (air_tag) {
+ else => unreachable,
+ .memset => break :air_tag if (air.next()) |next_air_tag| continue :air_tag next_air_tag,
+ .memset_safe => break :fill_byte .{ .constant = 0xaa },
+ };
if (try isel.hasRepeatedByteRepr(.fromInterned(fill_val))) |fill_byte|
break :fill_byte .{ .constant = fill_byte };
+ }
switch (dst_ty.elemType2(zcu).abiSize(zcu)) {
0 => unreachable,
1 => break :fill_byte .{ .value = bin_op.rhs },
@@ -6121,8 +6383,7 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) !void {
.c => unreachable,
}
- if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
- break :air_tag;
+ break :air_tag if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
},
else => return isel.fail("too big {s} {f}", .{ @tagName(air_tag), isel.fmtType(dst_ty) }),
}
@@ -6133,7 +6394,7 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) !void {
try call.prepareCallee(isel);
try isel.global_relocs.append(gpa, .{
- .global = "memset",
+ .name = "memset",
.reloc = .{ .label = @intCast(isel.instructions.items.len) },
});
try isel.emit(.bl(0));
@@ -6179,7 +6440,7 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) !void {
try call.prepareCallee(isel);
try isel.global_relocs.append(gpa, .{
- .global = @tagName(air_tag),
+ .name = @tagName(air_tag),
.reloc = .{ .label = @intCast(isel.instructions.items.len) },
});
try isel.emit(.bl(0));
@@ -6268,6 +6529,72 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) !void {
if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
},
+ .error_name => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |name_vi| unused: {
+ defer name_vi.value.deref(isel);
+ var ptr_part_it = name_vi.value.field(.slice_const_u8_sentinel_0, 0, 8);
+ const ptr_part_vi = try ptr_part_it.only(isel);
+ const ptr_part_ra = try ptr_part_vi.?.defReg(isel);
+ var len_part_it = name_vi.value.field(.slice_const_u8_sentinel_0, 8, 8);
+ const len_part_vi = try len_part_it.only(isel);
+ const len_part_ra = try len_part_vi.?.defReg(isel);
+ if (ptr_part_ra == null and len_part_ra == null) break :unused;
+
+ const un_op = air.data(air.inst_index).un_op;
+ const err_vi = try isel.use(un_op);
+ const err_mat = try err_vi.matReg(isel);
+ const ptr_ra = try isel.allocIntReg();
+ defer isel.freeReg(ptr_ra);
+ const start_ra, const end_ra = range_ras: {
+ const name_lock: RegLock = if (len_part_ra != null) if (ptr_part_ra) |name_ptr_ra|
+ isel.tryLockReg(name_ptr_ra)
+ else
+ .empty else .empty;
+ defer name_lock.unlock(isel);
+ break :range_ras .{ try isel.allocIntReg(), try isel.allocIntReg() };
+ };
+ defer {
+ isel.freeReg(start_ra);
+ isel.freeReg(end_ra);
+ }
+ if (len_part_ra) |name_len_ra| try isel.emit(.sub(
+ name_len_ra.w(),
+ end_ra.w(),
+ .{ .register = start_ra.w() },
+ ));
+ if (ptr_part_ra) |name_ptr_ra| try isel.emit(.add(
+ name_ptr_ra.x(),
+ ptr_ra.x(),
+ .{ .extended_register = .{
+ .register = start_ra.w(),
+ .extend = .{ .uxtw = 0 },
+ } },
+ ));
+ if (len_part_ra) |_| try isel.emit(.sub(end_ra.w(), end_ra.w(), .{ .immediate = 1 }));
+ try isel.emit(.ldp(start_ra.w(), end_ra.w(), .{ .base = start_ra.x() }));
+ try isel.emit(.add(start_ra.x(), ptr_ra.x(), .{ .extended_register = .{
+ .register = err_mat.ra.w(),
+ .extend = switch (zcu.errorSetBits()) {
+ else => unreachable,
+ 1...8 => .{ .uxtb = 2 },
+ 9...16 => .{ .uxth = 2 },
+ 17...32 => .{ .uxtw = 2 },
+ },
+ } }));
+ try isel.lazy_relocs.append(gpa, .{
+ .symbol = .{ .kind = .const_data, .ty = .anyerror_type },
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.add(ptr_ra.x(), ptr_ra.x(), .{ .immediate = 0 }));
+ try isel.lazy_relocs.append(gpa, .{
+ .symbol = .{ .kind = .const_data, .ty = .anyerror_type },
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.adrp(ptr_ra.x(), 0));
+ try err_mat.finish(isel);
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
.aggregate_init => {
if (isel.live_values.fetchRemove(air.inst_index)) |agg_vi| {
defer agg_vi.value.deref(isel);
@@ -6362,7 +6689,7 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) !void {
try call.prepareCallee(isel);
try isel.global_relocs.append(gpa, .{
- .global = "memcpy",
+ .name = "memcpy",
.reloc = .{ .label = @intCast(isel.instructions.items.len) },
});
try isel.emit(.bl(0));
@@ -6478,7 +6805,7 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) !void {
try call.prepareCallee(isel);
try isel.global_relocs.append(gpa, .{
- .global = switch (bits) {
+ .name = switch (bits) {
else => unreachable,
16 => "__fmah",
32 => "fmaf",
@@ -6559,6 +6886,32 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) !void {
}
if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
},
+ .cmp_lt_errors_len => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |is_vi| unused: {
+ defer is_vi.value.deref(isel);
+ const is_ra = try is_vi.value.defReg(isel) orelse break :unused;
+ try isel.emit(.csinc(is_ra.w(), .wzr, .wzr, .invert(.ls)));
+
+ const un_op = air.data(air.inst_index).un_op;
+ const err_vi = try isel.use(un_op);
+ const err_mat = try err_vi.matReg(isel);
+ const ptr_ra = try isel.allocIntReg();
+ defer isel.freeReg(ptr_ra);
+ try isel.emit(.subs(.wzr, err_mat.ra.w(), .{ .register = ptr_ra.w() }));
+ try isel.lazy_relocs.append(gpa, .{
+ .symbol = .{ .kind = .const_data, .ty = .anyerror_type },
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.ldr(ptr_ra.w(), .{ .base = ptr_ra.x() }));
+ try isel.lazy_relocs.append(gpa, .{
+ .symbol = .{ .kind = .const_data, .ty = .anyerror_type },
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.adrp(ptr_ra.x(), 0));
+ try err_mat.finish(isel);
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
.runtime_nav_ptr => {
if (isel.live_values.fetchRemove(air.inst_index)) |ptr_vi| unused: {
defer ptr_vi.value.deref(isel);
@@ -6567,19 +6920,19 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) !void {
const ty_nav = air.data(air.inst_index).ty_nav;
if (ZigType.fromInterned(ip.getNav(ty_nav.nav).typeOf(ip)).isFnOrHasRuntimeBits(zcu)) switch (true) {
false => {
- try isel.nav_relocs.append(zcu.gpa, .{
+ try isel.nav_relocs.append(gpa, .{
.nav = ty_nav.nav,
.reloc = .{ .label = @intCast(isel.instructions.items.len) },
});
try isel.emit(.adr(ptr_ra.x(), 0));
},
true => {
- try isel.nav_relocs.append(zcu.gpa, .{
+ try isel.nav_relocs.append(gpa, .{
.nav = ty_nav.nav,
.reloc = .{ .label = @intCast(isel.instructions.items.len) },
});
try isel.emit(.add(ptr_ra.x(), ptr_ra.x(), .{ .immediate = 0 }));
- try isel.nav_relocs.append(zcu.gpa, .{
+ try isel.nav_relocs.append(gpa, .{
.nav = ty_nav.nav,
.reloc = .{ .label = @intCast(isel.instructions.items.len) },
});
@@ -6589,9 +6942,6 @@ pub fn body(isel: *Select, air_body: []const Air.Inst.Index) !void {
}
if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
},
- .add_safe,
- .sub_safe,
- .mul_safe,
.inferred_alloc,
.inferred_alloc_comptime,
.int_from_float_safe,
@@ -6822,6 +7172,9 @@ pub fn layout(
saves_len += 1;
saves_size += 8;
deferred_gr = null;
+ } else switch (@as(u1, @truncate(saved_gra_len))) {
+ 0 => {},
+ 1 => saves_size += 8,
}
save_ra = if (mod.strip) incoming.ngrn else CallAbiIterator.ngrn_start;
while (save_ra != if (have_va) CallAbiIterator.ngrn_end else incoming.ngrn) : (save_ra = @enumFromInt(@intFromEnum(save_ra) + 1)) {
@@ -6844,42 +7197,42 @@ pub fn layout(
{
wip_mir_log.debug("{f}<prologue>:", .{nav.fqn.fmt(ip)});
var save_index: usize = 0;
- while (save_index < saves.len) {
- if (save_index + 2 <= saves.len and saves[save_index + 0].class == saves[save_index + 1].class and
- saves[save_index + 0].offset + saves[save_index + 0].size == saves[save_index + 1].offset)
- {
- try isel.emit(.stp(
- saves[save_index + 0].register,
- saves[save_index + 1].register,
- switch (saves[save_index + 0].offset) {
- 0 => .{ .pre_index = .{
- .base = .sp,
- .index = @intCast(-@as(i11, saves_size)),
- } },
- else => |offset| .{ .signed_offset = .{
- .base = .sp,
- .offset = @intCast(offset),
- } },
- },
- ));
- save_index += 2;
- } else {
- try isel.emit(.str(
- saves[save_index].register,
- switch (saves[save_index].offset) {
- 0 => .{ .pre_index = .{
- .base = .sp,
- .index = @intCast(-@as(i11, saves_size)),
- } },
- else => |offset| .{ .unsigned_offset = .{
- .base = .sp,
- .offset = @intCast(offset),
- } },
- },
- ));
- save_index += 1;
- }
- }
+ while (save_index < saves.len) if (save_index + 2 <= saves.len and
+ saves[save_index + 0].class == saves[save_index + 1].class and
+ saves[save_index + 0].size == saves[save_index + 1].size and
+ saves[save_index + 0].offset + saves[save_index + 0].size == saves[save_index + 1].offset)
+ {
+ try isel.emit(.stp(
+ saves[save_index + 0].register,
+ saves[save_index + 1].register,
+ switch (saves[save_index + 0].offset) {
+ 0 => .{ .pre_index = .{
+ .base = .sp,
+ .index = @intCast(-@as(i11, saves_size)),
+ } },
+ else => |offset| .{ .signed_offset = .{
+ .base = .sp,
+ .offset = @intCast(offset),
+ } },
+ },
+ ));
+ save_index += 2;
+ } else {
+ try isel.emit(.str(
+ saves[save_index].register,
+ switch (saves[save_index].offset) {
+ 0 => .{ .pre_index = .{
+ .base = .sp,
+ .index = @intCast(-@as(i11, saves_size)),
+ } },
+ else => |offset| .{ .unsigned_offset = .{
+ .base = .sp,
+ .offset = @intCast(offset),
+ } },
+ },
+ ));
+ save_index += 1;
+ };
try isel.emit(.add(.fp, .sp, .{ .immediate = frame_record_offset }));
const scratch_reg: Register = if (isel.stack_align == .@"16")
@@ -7053,11 +7406,43 @@ fn fmtConstant(isel: *Select, constant: Constant) @typeInfo(@TypeOf(Constant.fmt
return constant.fmtValue(isel.pt);
}
+fn block(
+ isel: *Select,
+ air_inst_index: Air.Inst.Index,
+ res_ty: ZigType,
+ air_body: []const Air.Inst.Index,
+) !void {
+ if (res_ty.toIntern() != .noreturn_type) {
+ isel.blocks.putAssumeCapacityNoClobber(air_inst_index, .{
+ .live_registers = isel.live_registers,
+ .target_label = @intCast(isel.instructions.items.len),
+ });
+ }
+ try isel.body(air_body);
+ if (res_ty.toIntern() != .noreturn_type) {
+ const block_entry = isel.blocks.pop().?;
+ assert(block_entry.key == air_inst_index);
+ if (isel.live_values.fetchRemove(air_inst_index)) |result_vi| result_vi.value.deref(isel);
+ }
+}
+
fn emit(isel: *Select, instruction: codegen.aarch64.encoding.Instruction) !void {
wip_mir_log.debug(" | {f}", .{instruction});
try isel.instructions.append(isel.pt.zcu.gpa, instruction);
}
+fn emitPanic(isel: *Select, panic_id: Zcu.SimplePanicId) !void {
+ const zcu = isel.pt.zcu;
+ try isel.nav_relocs.append(zcu.gpa, .{
+ .nav = switch (zcu.intern_pool.indexToKey(zcu.builtin_decl_values.get(panic_id.toBuiltin()))) {
+ else => unreachable,
+ inline .@"extern", .func => |func| func.owner_nav,
+ },
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+}
+
fn emitLiteral(isel: *Select, bytes: []const u8) !void {
const words: []align(1) const u32 = @ptrCast(bytes);
const literals = try isel.literals.addManyAsSlice(isel.pt.zcu.gpa, words.len);
@@ -8104,6 +8489,32 @@ pub const Value = struct {
}
}
+ const AddOrSubtractOptions = struct {
+ overflow: Overflow,
+
+ const Overflow = union(enum) {
+ @"unreachable",
+ panic: Zcu.SimplePanicId,
+ wrap,
+ ra: Register.Alias,
+
+ fn defCond(overflow: Overflow, isel: *Select, cond: codegen.aarch64.encoding.ConditionCode) !void {
+ switch (overflow) {
+ .@"unreachable" => unreachable,
+ .panic => |panic_id| {
+ const skip_label = isel.instructions.items.len;
+ try isel.emitPanic(panic_id);
+ try isel.emit(.@"b."(
+ cond.invert(),
+ @intCast((isel.instructions.items.len + 1 - skip_label) << 2),
+ ));
+ },
+ .wrap => {},
+ .ra => |overflow_ra| try isel.emit(.csinc(overflow_ra.w(), .wzr, .wzr, cond.invert())),
+ }
+ }
+ };
+ };
fn addOrSubtract(
res_vi: Value.Index,
isel: *Select,
@@ -8111,19 +8522,21 @@ pub const Value = struct {
lhs_vi: Value.Index,
op: codegen.aarch64.encoding.Instruction.AddSubtractOp,
rhs_vi: Value.Index,
- opts: struct {
- wrap: bool,
- overflow_ra: Register.Alias = .zr,
- },
+ opts: AddOrSubtractOptions,
) !void {
- assert(opts.wrap or opts.overflow_ra == .zr);
const zcu = isel.pt.zcu;
if (!ty.isAbiInt(zcu)) return isel.fail("bad {s} {f}", .{ @tagName(op), isel.fmtType(ty) });
const int_info = ty.intInfo(zcu);
if (int_info.bits > 128) return isel.fail("too big {s} {f}", .{ @tagName(op), isel.fmtType(ty) });
var part_offset = res_vi.size(isel);
- var need_wrap = opts.wrap;
- var need_carry = opts.overflow_ra != .zr;
+ var need_wrap = switch (opts.overflow) {
+ .@"unreachable" => false,
+ .panic, .wrap, .ra => true,
+ };
+ var need_carry = switch (opts.overflow) {
+ .@"unreachable", .wrap => false,
+ .panic, .ra => true,
+ };
while (part_offset > 0) : (need_wrap = false) {
const part_size = @min(part_offset, 8);
part_offset -= part_size;
@@ -8133,48 +8546,87 @@ pub const Value = struct {
const unwrapped_res_part_ra = unwrapped_res_part_ra: {
if (!need_wrap) break :unwrapped_res_part_ra wrapped_res_part_ra;
if (int_info.bits % 32 == 0) {
- if (opts.overflow_ra != .zr) try isel.emit(.csinc(opts.overflow_ra.w(), .wzr, .wzr, .invert(switch (int_info.signedness) {
+ try opts.overflow.defCond(isel, switch (int_info.signedness) {
.signed => .vs,
.unsigned => switch (op) {
.add => .cs,
.sub => .cc,
},
- })));
+ });
break :unwrapped_res_part_ra wrapped_res_part_ra;
}
- const wrapped_part_ra, const unwrapped_part_ra = if (opts.overflow_ra != .zr) part_ra: {
- switch (op) {
- .add => {},
- .sub => switch (int_info.signedness) {
- .signed => {},
- .unsigned => {
- try isel.emit(.csinc(opts.overflow_ra.w(), .wzr, .wzr, .invert(.cc)));
- break :part_ra .{ wrapped_res_part_ra, wrapped_res_part_ra };
- },
+ need_carry = false;
+ const wrapped_part_ra, const unwrapped_part_ra = part_ra: switch (opts.overflow) {
+ .@"unreachable" => unreachable,
+ .panic, .ra => switch (int_info.signedness) {
+ .signed => {
+ try opts.overflow.defCond(isel, .ne);
+ const wrapped_part_ra = switch (wrapped_res_part_ra) {
+ else => |res_part_ra| res_part_ra,
+ .zr => try isel.allocIntReg(),
+ };
+ errdefer if (wrapped_part_ra != wrapped_res_part_ra) isel.freeReg(wrapped_part_ra);
+ const unwrapped_part_ra = unwrapped_part_ra: {
+ const wrapped_res_part_lock: RegLock = switch (wrapped_res_part_ra) {
+ else => |res_part_ra| isel.lockReg(res_part_ra),
+ .zr => .empty,
+ };
+ defer wrapped_res_part_lock.unlock(isel);
+ break :unwrapped_part_ra try isel.allocIntReg();
+ };
+ errdefer isel.freeReg(unwrapped_part_ra);
+ switch (part_size) {
+ else => unreachable,
+ 1...4 => try isel.emit(.subs(.wzr, wrapped_part_ra.w(), .{ .register = unwrapped_part_ra.w() })),
+ 5...8 => try isel.emit(.subs(.xzr, wrapped_part_ra.x(), .{ .register = unwrapped_part_ra.x() })),
+ }
+ break :part_ra .{ wrapped_part_ra, unwrapped_part_ra };
},
- }
- try isel.emit(.csinc(opts.overflow_ra.w(), .wzr, .wzr, .invert(.ne)));
- const wrapped_part_ra = switch (wrapped_res_part_ra) {
- else => |res_part_ra| res_part_ra,
- .zr => try isel.allocIntReg(),
- };
- errdefer if (wrapped_part_ra != wrapped_res_part_ra) isel.freeReg(wrapped_part_ra);
- const unwrapped_part_ra = unwrapped_part_ra: {
- const wrapped_res_part_lock: RegLock = switch (wrapped_res_part_ra) {
- else => |res_part_ra| isel.lockReg(res_part_ra),
- .zr => .empty,
- };
- defer wrapped_res_part_lock.unlock(isel);
- break :unwrapped_part_ra try isel.allocIntReg();
- };
- errdefer isel.freeReg(unwrapped_part_ra);
- switch (part_size) {
- else => unreachable,
- 1...4 => try isel.emit(.subs(.wzr, wrapped_part_ra.w(), .{ .register = unwrapped_part_ra.w() })),
- 5...8 => try isel.emit(.subs(.xzr, wrapped_part_ra.x(), .{ .register = unwrapped_part_ra.x() })),
- }
- break :part_ra .{ wrapped_part_ra, unwrapped_part_ra };
- } else .{ wrapped_res_part_ra, wrapped_res_part_ra };
+ .unsigned => {
+ const unwrapped_part_ra = unwrapped_part_ra: {
+ const wrapped_res_part_lock: RegLock = switch (wrapped_res_part_ra) {
+ else => |res_part_ra| isel.lockReg(res_part_ra),
+ .zr => .empty,
+ };
+ defer wrapped_res_part_lock.unlock(isel);
+ break :unwrapped_part_ra try isel.allocIntReg();
+ };
+ errdefer isel.freeReg(unwrapped_part_ra);
+ const bit: u6 = @truncate(int_info.bits);
+ switch (opts.overflow) {
+ .@"unreachable", .wrap => unreachable,
+ .panic => |panic_id| {
+ const skip_label = isel.instructions.items.len;
+ try isel.emitPanic(panic_id);
+ try isel.emit(.tbz(
+ switch (bit) {
+ 0, 32 => unreachable,
+ 1...31 => unwrapped_part_ra.w(),
+ 33...63 => unwrapped_part_ra.x(),
+ },
+ bit,
+ @intCast((isel.instructions.items.len + 1 - skip_label) << 2),
+ ));
+ },
+ .ra => |overflow_ra| try isel.emit(switch (bit) {
+ 0, 32 => unreachable,
+ 1...31 => .ubfm(overflow_ra.w(), unwrapped_part_ra.w(), .{
+ .N = .word,
+ .immr = bit,
+ .imms = bit,
+ }),
+ 33...63 => .ubfm(overflow_ra.x(), unwrapped_part_ra.x(), .{
+ .N = .doubleword,
+ .immr = bit,
+ .imms = bit,
+ }),
+ }),
+ }
+ break :part_ra .{ wrapped_res_part_ra, unwrapped_part_ra };
+ },
+ },
+ .wrap => .{ wrapped_res_part_ra, wrapped_res_part_ra },
+ };
defer if (wrapped_part_ra != wrapped_res_part_ra) isel.freeReg(wrapped_part_ra);
errdefer if (unwrapped_part_ra != wrapped_res_part_ra) isel.freeReg(unwrapped_part_ra);
if (wrapped_part_ra != .zr) try isel.emit(switch (part_size) {
@@ -8650,41 +9102,15 @@ pub const Value = struct {
expected_live_registers: *const LiveRegisters,
) !void {
try vi.liveIn(isel, src_ra, expected_live_registers);
- const offset_from_parent: i65, const parent_vi = vi.valueParent(isel);
+ const offset_from_parent, const parent_vi = vi.valueParent(isel);
switch (parent_vi.parent(isel)) {
.unallocated => {},
- .stack_slot => |stack_slot| {
- const offset = stack_slot.offset + offset_from_parent;
- try isel.emit(switch (vi.size(isel)) {
- else => unreachable,
- 1 => if (src_ra.isVector()) .str(src_ra.b(), .{ .unsigned_offset = .{
- .base = stack_slot.base.x(),
- .offset = @intCast(offset),
- } }) else .strb(src_ra.w(), .{ .unsigned_offset = .{
- .base = stack_slot.base.x(),
- .offset = @intCast(offset),
- } }),
- 2 => if (src_ra.isVector()) .str(src_ra.h(), .{ .unsigned_offset = .{
- .base = stack_slot.base.x(),
- .offset = @intCast(offset),
- } }) else .strh(src_ra.w(), .{ .unsigned_offset = .{
- .base = stack_slot.base.x(),
- .offset = @intCast(offset),
- } }),
- 4 => .str(if (src_ra.isVector()) src_ra.s() else src_ra.w(), .{ .unsigned_offset = .{
- .base = stack_slot.base.x(),
- .offset = @intCast(offset),
- } }),
- 8 => .str(if (src_ra.isVector()) src_ra.d() else src_ra.x(), .{ .unsigned_offset = .{
- .base = stack_slot.base.x(),
- .offset = @intCast(offset),
- } }),
- 16 => .str(src_ra.q(), .{ .unsigned_offset = .{
- .base = stack_slot.base.x(),
- .offset = @intCast(offset),
- } }),
- });
- },
+ .stack_slot => |stack_slot| if (stack_slot.base != Register.Alias.fp) try isel.storeReg(
+ src_ra,
+ vi.size(isel),
+ stack_slot.base,
+ @as(i65, stack_slot.offset) + offset_from_parent,
+ ),
else => unreachable,
}
try vi.spillReg(isel, src_ra, 0, expected_live_registers);
@@ -9631,14 +10057,18 @@ pub const Value = struct {
var base_ptr = ip.indexToKey(base).ptr;
const eu_ty = ip.indexToKey(base_ptr.ty).ptr_type.child;
const payload_ty = ip.indexToKey(eu_ty).error_union_type.payload_type;
- base_ptr.byte_offset += codegen.errUnionPayloadOffset(.fromInterned(payload_ty), zcu);
+ base_ptr.byte_offset += codegen.errUnionPayloadOffset(.fromInterned(payload_ty), zcu) + ptr.byte_offset;
+ continue :constant_key .{ .ptr = base_ptr };
+ },
+ .opt_payload => |base| {
+ var base_ptr = ip.indexToKey(base).ptr;
+ base_ptr.byte_offset += ptr.byte_offset;
continue :constant_key .{ .ptr = base_ptr };
},
- .opt_payload => |base| continue :constant_key .{ .ptr = ip.indexToKey(base).ptr },
.field => |field| {
var base_ptr = ip.indexToKey(field.base).ptr;
const agg_ty: ZigType = .fromInterned(ip.indexToKey(base_ptr.ty).ptr_type.child);
- base_ptr.byte_offset += agg_ty.structFieldOffset(@intCast(field.index), zcu);
+ base_ptr.byte_offset += agg_ty.structFieldOffset(@intCast(field.index), zcu) + ptr.byte_offset;
continue :constant_key .{ .ptr = base_ptr };
},
.comptime_alloc, .comptime_field, .arr_elem => unreachable,
src/codegen/aarch64.zig
@@ -47,6 +47,7 @@ pub fn generate(
.literals = .empty,
.nav_relocs = .empty,
.uav_relocs = .empty,
+ .lazy_relocs = .empty,
.global_relocs = .empty,
.literal_relocs = .empty,
@@ -101,8 +102,8 @@ pub fn generate(
};
switch (passed_vi.parent(&isel)) {
.unallocated => if (!mod.strip) {
- var part_it = arg_vi.parts(&isel);
- const first_passed_part_vi = part_it.next() orelse passed_vi;
+ var part_it = passed_vi.parts(&isel);
+ const first_passed_part_vi = part_it.next().?;
const hint_ra = first_passed_part_vi.hint(&isel).?;
passed_vi.setParent(&isel, .{ .stack_slot = if (hint_ra.isVector())
isel.va_list.__vr_top.withOffset(@as(i8, -16) *
@@ -167,6 +168,7 @@ pub fn generate(
.literals = &.{},
.nav_relocs = &.{},
.uav_relocs = &.{},
+ .lazy_relocs = &.{},
.global_relocs = &.{},
.literal_relocs = &.{},
};
@@ -174,6 +176,7 @@ pub fn generate(
mir.literals = try isel.literals.toOwnedSlice(gpa);
mir.nav_relocs = try isel.nav_relocs.toOwnedSlice(gpa);
mir.uav_relocs = try isel.uav_relocs.toOwnedSlice(gpa);
+ mir.lazy_relocs = try isel.lazy_relocs.toOwnedSlice(gpa);
mir.global_relocs = try isel.global_relocs.toOwnedSlice(gpa);
mir.literal_relocs = try isel.literal_relocs.toOwnedSlice(gpa);
return mir;
src/Compilation.zig
@@ -1816,10 +1816,12 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
if (options.skip_linker_dependencies) break :s .none;
const want = options.want_compiler_rt orelse is_exe_or_dyn_lib;
if (!want) break :s .none;
- if (have_zcu) {
+ if (have_zcu and target_util.canBuildLibCompilerRt(target, use_llvm, build_options.have_llvm and use_llvm)) {
if (output_mode == .Obj) break :s .zcu;
- if (target.ofmt == .coff and target_util.zigBackend(target, use_llvm) == .stage2_x86_64)
- break :s if (is_exe_or_dyn_lib) .dyn_lib else .zcu;
+ if (switch (target_util.zigBackend(target, use_llvm)) {
+ else => false,
+ .stage2_aarch64, .stage2_x86_64 => target.ofmt == .coff,
+ }) break :s if (is_exe_or_dyn_lib) .dyn_lib else .zcu;
}
if (is_exe_or_dyn_lib) break :s .lib;
break :s .obj;
@@ -1854,7 +1856,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
const want_ubsan_rt = options.want_ubsan_rt orelse (can_build_ubsan_rt and any_sanitize_c == .full and is_exe_or_dyn_lib);
if (!want_ubsan_rt) break :s .none;
if (options.skip_linker_dependencies) break :s .none;
- if (have_zcu) break :s .zcu;
+ if (have_zcu and target_util.canBuildLibUbsanRt(target, use_llvm, build_options.have_llvm and use_llvm)) break :s .zcu;
if (is_exe_or_dyn_lib) break :s .lib;
break :s .obj;
};
src/target.zig
@@ -351,7 +351,7 @@ pub fn defaultCompilerRtOptimizeMode(target: *const std.Target) std.builtin.Opti
}
}
-pub fn canBuildLibCompilerRt(target: *const std.Target, use_llvm: bool, comptime have_llvm: bool) bool {
+pub fn canBuildLibCompilerRt(target: *const std.Target, use_llvm: bool, have_llvm: bool) bool {
switch (target.os.tag) {
.plan9 => return false,
else => {},
@@ -373,7 +373,7 @@ pub fn canBuildLibCompilerRt(target: *const std.Target, use_llvm: bool, comptime
};
}
-pub fn canBuildLibUbsanRt(target: *const std.Target, use_llvm: bool, comptime have_llvm: bool) bool {
+pub fn canBuildLibUbsanRt(target: *const std.Target, use_llvm: bool, have_llvm: bool) bool {
switch (target.cpu.arch) {
.spirv32, .spirv64 => return false,
// Remove this once https://github.com/ziglang/zig/issues/23715 is fixed
@@ -382,6 +382,7 @@ pub fn canBuildLibUbsanRt(target: *const std.Target, use_llvm: bool, comptime ha
}
return switch (zigBackend(target, use_llvm)) {
.stage2_llvm => true,
+ .stage2_wasm => false,
.stage2_x86_64 => switch (target.ofmt) {
.elf, .macho => true,
else => have_llvm,
@@ -860,6 +861,7 @@ pub fn zigBackend(target: *const std.Target, use_llvm: bool) std.builtin.Compile
pub inline fn backendSupportsFeature(backend: std.builtin.CompilerBackend, comptime feature: Feature) bool {
return switch (feature) {
.panic_fn => switch (backend) {
+ .stage2_aarch64,
.stage2_c,
.stage2_llvm,
.stage2_x86_64,
test/behavior/error.zig
@@ -590,7 +590,6 @@ test "error union comptime caching" {
}
test "@errorName" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -605,7 +604,6 @@ fn gimmeItBroke() anyerror {
}
test "@errorName sentinel length matches slice length" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -883,7 +881,6 @@ test "catch within a function that calls no errorable functions" {
}
test "error from comptime string" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
test/behavior/return_address.zig
@@ -6,7 +6,6 @@ fn retAddr() usize {
}
test "return address" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
test/cases/compile_errors/callconv_interrupt_on_unsupported_platform.zig
@@ -7,5 +7,5 @@ export fn entry3() callconv(.avr_interrupt) void {}
// target=aarch64-linux-none
//
// :1:30: error: calling convention 'x86_64_interrupt' only available on architectures 'x86_64'
-// :1:30: error: calling convention 'x86_interrupt' only available on architectures 'x86'
-// :1:30: error: calling convention 'avr_interrupt' only available on architectures 'avr'
+// :2:30: error: calling convention 'x86_interrupt' only available on architectures 'x86'
+// :3:30: error: calling convention 'avr_interrupt' only available on architectures 'avr'
test/cases/compile_errors/error_set_membership.zig
@@ -25,7 +25,7 @@ pub fn main() Error!void {
// error
// backend=stage2
-// target=native
+// target=x86_64-linux
//
// :23:29: error: expected type 'error{InvalidCharacter}', found '@typeInfo(@typeInfo(@TypeOf(tmp.fooey)).@"fn".return_type.?).error_union.error_set'
// :23:29: note: 'error.InvalidDirection' not a member of destination error set
test/cases/compile_errors/function_ptr_alignment.zig
@@ -10,7 +10,7 @@ comptime {
// error
// backend=stage2
-// target=native
+// target=x86_64-linux
//
// :8:41: error: expected type '*align(2) const fn () void', found '*const fn () void'
// :8:41: note: pointer alignment '1' cannot cast into pointer alignment '2'
test/cases/compile_errors/issue_15572_break_on_inline_while.zig
@@ -15,6 +15,6 @@ pub fn main() void {
// error
// backend=stage2
-// target=native
+// target=x86_64-linux
//
// :9:28: error: incompatible types: 'builtin.Type.EnumField' and 'void'
test/cases/compile_errors/switch_on_non_err_union.zig
@@ -6,6 +6,6 @@ pub fn main() void {
// error
// backend=stage2
-// target=native
+// target=x86_64-linux
//
// :2:23: error: expected error union type, found 'bool'
test/cases/safety/@alignCast misaligned.zig
@@ -22,4 +22,4 @@ fn foo(bytes: []u8) u32 {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/@enumFromInt - no matching tag value.zig
@@ -23,4 +23,4 @@ fn baz(_: Foo) void {}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
test/cases/safety/@enumFromInt truncated bits - exhaustive.zig
@@ -20,4 +20,4 @@ pub fn main() u8 {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
test/cases/safety/@enumFromInt truncated bits - nonexhaustive.zig
@@ -20,4 +20,4 @@ pub fn main() u8 {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
test/cases/safety/@errorCast error not present in destination.zig
@@ -18,4 +18,4 @@ fn foo(set1: Set1) Set2 {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
test/cases/safety/@errorCast error union casted to disjoint set.zig
@@ -17,4 +17,4 @@ fn foo() anyerror!i32 {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
test/cases/safety/@intCast to u0.zig
@@ -19,4 +19,4 @@ fn bar(one: u1, not_zero: i32) void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/@intFromFloat cannot fit - boundary case - i0 max.zig
@@ -13,4 +13,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/@intFromFloat cannot fit - boundary case - i0 min.zig
@@ -13,4 +13,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/@intFromFloat cannot fit - boundary case - signed max.zig
@@ -13,4 +13,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
test/cases/safety/@intFromFloat cannot fit - boundary case - signed min.zig
@@ -13,4 +13,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
test/cases/safety/@intFromFloat cannot fit - boundary case - u0 max.zig
@@ -13,4 +13,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/@intFromFloat cannot fit - boundary case - u0 min.zig
@@ -13,4 +13,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/@intFromFloat cannot fit - boundary case - unsigned max.zig
@@ -13,4 +13,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
test/cases/safety/@intFromFloat cannot fit - boundary case - unsigned min.zig
@@ -13,4 +13,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
test/cases/safety/@intFromFloat cannot fit - boundary case - vector max.zig
@@ -13,4 +13,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
test/cases/safety/@intFromFloat cannot fit - boundary case - vector min.zig
@@ -13,4 +13,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
test/cases/safety/@intFromFloat cannot fit - negative out of range.zig
@@ -17,4 +17,4 @@ fn bar(a: f32) i8 {
fn baz(_: i8) void {}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
test/cases/safety/@intFromFloat cannot fit - negative to unsigned.zig
@@ -17,4 +17,4 @@ fn bar(a: f32) u8 {
fn baz(_: u8) void {}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
test/cases/safety/@intFromFloat cannot fit - positive out of range.zig
@@ -17,4 +17,4 @@ fn bar(a: f32) u8 {
fn baz(_: u8) void {}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
test/cases/safety/@ptrFromInt address zero to non-optional byte-aligned pointer.zig
@@ -16,4 +16,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/@ptrFromInt address zero to non-optional pointer.zig
@@ -16,4 +16,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/@ptrFromInt with misaligned address.zig
@@ -16,4 +16,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/@tagName on corrupted enum value.zig
@@ -23,4 +23,4 @@ pub fn main() !void {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
test/cases/safety/@tagName on corrupted union value.zig
@@ -24,4 +24,4 @@ pub fn main() !void {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
test/cases/safety/array slice sentinel mismatch vector.zig
@@ -16,4 +16,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
test/cases/safety/array slice sentinel mismatch.zig
@@ -16,4 +16,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/bad union field access.zig
@@ -24,4 +24,4 @@ fn bar(f: *Foo) void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
test/cases/safety/calling panic.zig
@@ -13,4 +13,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/cast []u8 to bigger slice of wrong size.zig
@@ -18,4 +18,4 @@ fn widenSlice(slice: []align(1) const u8) []align(1) const i32 {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/cast integer to global error and no code matches.zig
@@ -16,4 +16,4 @@ fn bar(x: u16) anyerror {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/empty slice with sentinel out of bounds.zig
@@ -18,4 +18,4 @@ pub fn main() !void {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/exact division failure - vectors.zig
@@ -20,4 +20,4 @@ fn divExact(a: @Vector(4, i32), b: @Vector(4, i32)) @Vector(4, i32) {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
test/cases/safety/exact division failure.zig
@@ -18,4 +18,4 @@ fn divExact(a: i32, b: i32) i32 {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/for_len_mismatch.zig
@@ -22,4 +22,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/for_len_mismatch_three.zig
@@ -21,4 +21,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/ignored expression integer overflow.zig
@@ -18,4 +18,4 @@ pub fn main() !void {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
test/cases/safety/integer addition overflow.zig
@@ -20,4 +20,4 @@ fn add(a: u16, b: u16) u16 {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/integer division by zero - vectors.zig
@@ -19,4 +19,4 @@ fn div0(a: @Vector(4, i32), b: @Vector(4, i32)) @Vector(4, i32) {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
test/cases/safety/integer division by zero.zig
@@ -17,4 +17,4 @@ fn div0(a: i32, b: i32) i32 {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/integer multiplication overflow.zig
@@ -18,4 +18,4 @@ fn mul(a: u16, b: u16) u16 {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/integer negation overflow.zig
@@ -18,4 +18,4 @@ fn neg(a: i16) i16 {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/integer subtraction overflow.zig
@@ -18,4 +18,4 @@ fn sub(a: u16, b: u16) u16 {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/memcpy_alias.zig
@@ -16,4 +16,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/memcpy_len_mismatch.zig
@@ -16,4 +16,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/memmove_len_mismatch.zig
@@ -16,4 +16,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/memset_array_undefined_bytes.zig
@@ -15,4 +15,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/memset_array_undefined_large.zig
@@ -15,4 +15,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/memset_slice_undefined_bytes.zig
@@ -17,4 +17,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/memset_slice_undefined_large.zig
@@ -17,4 +17,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/modrem by zero.zig
@@ -17,4 +17,4 @@ fn div0(a: u32, b: u32) u32 {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/modulus by zero.zig
@@ -17,4 +17,4 @@ fn mod0(a: i32, b: i32) i32 {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
test/cases/safety/noreturn returned.zig
@@ -20,4 +20,4 @@ pub fn main() void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/optional unwrap operator on C pointer.zig
@@ -16,4 +16,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/optional unwrap operator on null pointer.zig
@@ -16,4 +16,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/optional_empty_error_set.zig
@@ -19,4 +19,4 @@ fn foo() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/out of bounds array slice by length.zig
@@ -17,4 +17,4 @@ fn foo(a: u32) u32 {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/out of bounds slice access.zig
@@ -18,4 +18,4 @@ fn bar(a: []const i32) i32 {
fn baz(_: i32) void {}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/pointer casting null to non-optional pointer.zig
@@ -18,4 +18,4 @@ pub fn main() !void {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/pointer casting to null function pointer.zig
@@ -20,4 +20,4 @@ pub fn main() !void {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/pointer slice sentinel mismatch.zig
@@ -18,4 +18,4 @@ pub fn main() !void {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/remainder division by zero.zig
@@ -17,4 +17,4 @@ fn rem0(a: i32, b: i32) i32 {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/shift left by huge amount.zig
@@ -19,4 +19,4 @@ pub fn main() !void {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/shift right by huge amount.zig
@@ -19,4 +19,4 @@ pub fn main() !void {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/signed integer division overflow - vectors.zig
@@ -20,4 +20,4 @@ fn div(a: @Vector(4, i16), b: @Vector(4, i16)) @Vector(4, i16) {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
test/cases/safety/signed integer division overflow.zig
@@ -18,4 +18,4 @@ fn div(a: i16, b: i16) i16 {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/signed integer not fitting in cast to unsigned integer - widening.zig
@@ -16,4 +16,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
test/cases/safety/signed integer not fitting in cast to unsigned integer.zig
@@ -17,4 +17,4 @@ fn unsigned_cast(x: i32) u32 {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/signed shift left overflow.zig
@@ -18,4 +18,4 @@ fn shl(a: i16, b: u4) i16 {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
test/cases/safety/signed shift right overflow.zig
@@ -18,4 +18,4 @@ fn shr(a: i16, b: u4) i16 {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/signed-unsigned vector cast.zig
@@ -18,4 +18,4 @@ pub fn main() !void {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
test/cases/safety/slice by length sentinel mismatch on lhs.zig
@@ -15,4 +15,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/slice by length sentinel mismatch on rhs.zig
@@ -15,4 +15,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/slice sentinel mismatch - floats.zig
@@ -17,4 +17,4 @@ pub fn main() !void {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
test/cases/safety/slice sentinel mismatch - optional pointers.zig
@@ -17,4 +17,4 @@ pub fn main() !void {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/slice slice sentinel mismatch.zig
@@ -16,4 +16,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/slice start index greater than end index.zig
@@ -21,4 +21,4 @@ pub fn main() !void {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/slice with sentinel out of bounds - runtime len.zig
@@ -20,4 +20,4 @@ pub fn main() !void {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/slice with sentinel out of bounds.zig
@@ -18,4 +18,4 @@ pub fn main() !void {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/slice_cast_change_len_0.zig
@@ -24,4 +24,4 @@ const std = @import("std");
// run
// backend=stage2,llvm
-// target=x86_64-linux
+// target=x86_64-linux,aarch64-linux
test/cases/safety/slice_cast_change_len_1.zig
@@ -24,4 +24,4 @@ const std = @import("std");
// run
// backend=stage2,llvm
-// target=x86_64-linux
+// target=x86_64-linux,aarch64-linux
test/cases/safety/slice_cast_change_len_2.zig
@@ -24,4 +24,4 @@ const std = @import("std");
// run
// backend=stage2,llvm
-// target=x86_64-linux
+// target=x86_64-linux,aarch64-linux
test/cases/safety/slicing null C pointer - runtime len.zig
@@ -18,4 +18,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/slicing null C pointer.zig
@@ -17,4 +17,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/switch else on corrupt enum value - one prong.zig
@@ -21,4 +21,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
test/cases/safety/switch else on corrupt enum value - union.zig
@@ -26,4 +26,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
test/cases/safety/switch else on corrupt enum value.zig
@@ -20,4 +20,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
test/cases/safety/switch on corrupted enum value.zig
@@ -24,4 +24,4 @@ pub fn main() !void {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/switch on corrupted union value.zig
@@ -24,4 +24,4 @@ pub fn main() !void {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
test/cases/safety/truncating vector cast.zig
@@ -18,4 +18,4 @@ pub fn main() !void {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
test/cases/safety/unreachable.zig
@@ -12,4 +12,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/unsigned integer not fitting in cast to signed integer - same bit count.zig
@@ -16,4 +16,4 @@ pub fn main() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
test/cases/safety/unsigned shift left overflow.zig
@@ -18,4 +18,4 @@ fn shl(a: u16, b: u4) u16 {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
test/cases/safety/unsigned shift right overflow.zig
@@ -18,4 +18,4 @@ fn shr(a: u16, b: u4) u16 {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/unsigned-signed vector cast.zig
@@ -18,4 +18,4 @@ pub fn main() !void {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
test/cases/safety/unwrap error switch.zig
@@ -18,4 +18,4 @@ fn bar() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/unwrap error.zig
@@ -16,4 +16,4 @@ fn bar() !void {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/value does not fit in shortening cast - u0.zig
@@ -18,4 +18,4 @@ fn shorten_cast(x: u8) u0 {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/value does not fit in shortening cast.zig
@@ -18,4 +18,4 @@ fn shorten_cast(x: i32) i8 {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/safety/vector integer addition overflow.zig
@@ -19,4 +19,4 @@ fn add(a: @Vector(4, i32), b: @Vector(4, i32)) @Vector(4, i32) {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
test/cases/safety/vector integer multiplication overflow.zig
@@ -19,4 +19,4 @@ fn mul(a: @Vector(4, u8), b: @Vector(4, u8)) @Vector(4, u8) {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
test/cases/safety/vector integer negation overflow.zig
@@ -19,4 +19,4 @@ fn neg(a: @Vector(4, i16)) @Vector(4, i16) {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
test/cases/safety/vector integer subtraction overflow.zig
@@ -19,4 +19,4 @@ fn sub(a: @Vector(4, u32), b: @Vector(4, u32)) @Vector(4, u32) {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
test/cases/safety/zero casted to error.zig
@@ -16,4 +16,4 @@ fn bar(x: u16) anyerror {
}
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/array_in_anon_struct.zig
@@ -19,4 +19,4 @@ pub fn main() !void {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux,aarch64-linux
test/cases/pic_freestanding.zig
@@ -1,7 +1,7 @@
const builtin = @import("builtin");
const std = @import("std");
-fn _start() callconv(.naked) void {}
+pub fn _start() callconv(.naked) void {}
comptime {
@export(&_start, .{ .name = if (builtin.cpu.arch.isMIPS()) "__start" else "_start" });
test/cases/taking_pointer_of_global_tagged_union.zig
@@ -23,4 +23,4 @@ pub fn main() !void {
// run
// backend=stage2,llvm
-// target=native
+// target=x86_64-linux
test/src/Cases.zig
@@ -436,7 +436,7 @@ fn addFromDirInner(
const target = &resolved_target.result;
for (backends) |backend| {
if (backend == .stage2 and
- target.cpu.arch != .wasm32 and target.cpu.arch != .x86_64 and target.cpu.arch != .spirv64)
+ target.cpu.arch != .aarch64 and target.cpu.arch != .wasm32 and target.cpu.arch != .x86_64 and target.cpu.arch != .spirv64)
{
// Other backends don't support new liveness format
continue;
@@ -447,10 +447,6 @@ fn addFromDirInner(
// Rosetta has issues with ZLD
continue;
}
- if (backend == .stage2 and target.ofmt == .coff) {
- // COFF linker has bitrotted
- continue;
- }
const next = ctx.cases.items.len;
try ctx.cases.append(.{