Commit 5060ab99c9
Changed files (167)
lib
compiler
compiler_rt
std
src
arch
codegen
link
test
behavior
c_import
lib/compiler/test_runner.zig
@@ -16,6 +16,7 @@ var stdin_buffer: [4096]u8 = undefined;
var stdout_buffer: [4096]u8 = undefined;
const crippled = switch (builtin.zig_backend) {
+ .stage2_aarch64,
.stage2_powerpc,
.stage2_riscv64,
=> true,
@@ -287,13 +288,14 @@ pub fn log(
/// work-in-progress backends can handle it.
pub fn mainSimple() anyerror!void {
@disableInstrumentation();
- // is the backend capable of printing to stderr?
- const enable_print = switch (builtin.zig_backend) {
+ // is the backend capable of calling `std.fs.File.writeAll`?
+ const enable_write = switch (builtin.zig_backend) {
+ .stage2_aarch64, .stage2_riscv64 => true,
else => false,
};
- // is the backend capable of using std.fmt.format to print a summary at the end?
- const print_summary = switch (builtin.zig_backend) {
- .stage2_riscv64 => true,
+ // is the backend capable of calling `std.Io.Writer.print`?
+ const enable_print = switch (builtin.zig_backend) {
+ .stage2_aarch64, .stage2_riscv64 => true,
else => false,
};
@@ -302,34 +304,31 @@ pub fn mainSimple() anyerror!void {
var failed: u64 = 0;
// we don't want to bring in File and Writer if the backend doesn't support it
- const stderr = if (comptime enable_print) std.fs.File.stderr() else {};
+ const stdout = if (enable_write) std.fs.File.stdout() else {};
for (builtin.test_functions) |test_fn| {
+ if (enable_write) {
+ stdout.writeAll(test_fn.name) catch {};
+ stdout.writeAll("... ") catch {};
+ }
if (test_fn.func()) |_| {
- if (enable_print) {
- stderr.writeAll(test_fn.name) catch {};
- stderr.writeAll("... ") catch {};
- stderr.writeAll("PASS\n") catch {};
- }
+ if (enable_write) stdout.writeAll("PASS\n") catch {};
} else |err| {
- if (enable_print) {
- stderr.writeAll(test_fn.name) catch {};
- stderr.writeAll("... ") catch {};
- }
if (err != error.SkipZigTest) {
- if (enable_print) stderr.writeAll("FAIL\n") catch {};
+ if (enable_write) stdout.writeAll("FAIL\n") catch {};
failed += 1;
- if (!enable_print) return err;
+ if (!enable_write) return err;
continue;
}
- if (enable_print) stderr.writeAll("SKIP\n") catch {};
+ if (enable_write) stdout.writeAll("SKIP\n") catch {};
skipped += 1;
continue;
}
passed += 1;
}
- if (enable_print and print_summary) {
- stderr.deprecatedWriter().print("{} passed, {} skipped, {} failed\n", .{ passed, skipped, failed }) catch {};
+ if (enable_print) {
+ var stdout_writer = stdout.writer(&.{});
+ stdout_writer.interface.print("{} passed, {} skipped, {} failed\n", .{ passed, skipped, failed }) catch {};
}
if (failed != 0) std.process.exit(1);
}
lib/compiler_rt/addo.zig
@@ -1,6 +1,4 @@
const std = @import("std");
-const builtin = @import("builtin");
-const is_test = builtin.is_test;
const common = @import("./common.zig");
pub const panic = @import("common.zig").panic;
@@ -16,7 +14,7 @@ comptime {
// - addoXi4_generic as default
inline fn addoXi4_generic(comptime ST: type, a: ST, b: ST, overflow: *c_int) ST {
- @setRuntimeSafety(builtin.is_test);
+ @setRuntimeSafety(common.test_safety);
overflow.* = 0;
const sum: ST = a +% b;
// Hackers Delight: section Overflow Detection, subsection Signed Add/Subtract
lib/compiler_rt/addoti4_test.zig
@@ -1,4 +1,5 @@
const addv = @import("addo.zig");
+const builtin = @import("builtin");
const std = @import("std");
const testing = std.testing;
const math = std.math;
@@ -23,6 +24,8 @@ fn simple_addoti4(a: i128, b: i128, overflow: *c_int) i128 {
}
test "addoti4" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+
const min: i128 = math.minInt(i128);
const max: i128 = math.maxInt(i128);
var i: i128 = 1;
lib/compiler_rt/clear_cache.zig
@@ -97,8 +97,7 @@ fn clear_cache(start: usize, end: usize) callconv(.c) void {
.nbytes = end - start,
.whichcache = 3, // ICACHE | DCACHE
};
- asm volatile (
- \\ syscall
+ asm volatile ("syscall"
:
: [_] "{$2}" (165), // nr = SYS_sysarch
[_] "{$4}" (0), // op = MIPS_CACHEFLUSH
@@ -116,11 +115,8 @@ fn clear_cache(start: usize, end: usize) callconv(.c) void {
} else if (arm64 and !apple) {
// Get Cache Type Info.
// TODO memoize this?
- var ctr_el0: u64 = 0;
- asm volatile (
- \\mrs %[x], ctr_el0
- \\
- : [x] "=r" (ctr_el0),
+ const ctr_el0 = asm volatile ("mrs %[ctr_el0], ctr_el0"
+ : [ctr_el0] "=r" (-> u64),
);
// The DC and IC instructions must use 64-bit registers so we don't use
// uintptr_t in case this runs in an IPL32 environment.
@@ -187,9 +183,7 @@ fn clear_cache(start: usize, end: usize) callconv(.c) void {
exportIt();
} else if (os == .linux and loongarch) {
// See: https://github.com/llvm/llvm-project/blob/cf54cae26b65fc3201eff7200ffb9b0c9e8f9a13/compiler-rt/lib/builtins/clear_cache.c#L94-L95
- asm volatile (
- \\ ibar 0
- );
+ asm volatile ("ibar 0");
exportIt();
}
lib/compiler_rt/cmp.zig
@@ -1,6 +1,5 @@
const std = @import("std");
const builtin = @import("builtin");
-const is_test = builtin.is_test;
const common = @import("common.zig");
pub const panic = common.panic;
lib/compiler_rt/common.zig
@@ -102,9 +102,14 @@ pub const gnu_f16_abi = switch (builtin.cpu.arch) {
pub const want_sparc_abi = builtin.cpu.arch.isSPARC();
+pub const test_safety = switch (builtin.zig_backend) {
+ .stage2_aarch64 => false,
+ else => builtin.is_test,
+};
+
// Avoid dragging in the runtime safety mechanisms into this .o file, unless
// we're trying to test compiler-rt.
-pub const panic = if (builtin.is_test) std.debug.FullPanic(std.debug.defaultPanic) else std.debug.no_panic;
+pub const panic = if (test_safety) std.debug.FullPanic(std.debug.defaultPanic) else std.debug.no_panic;
/// This seems to mostly correspond to `clang::TargetInfo::HasFloat16`.
pub fn F16T(comptime OtherType: type) type {
lib/compiler_rt/comparedf2_test.zig
@@ -4,7 +4,6 @@
const std = @import("std");
const builtin = @import("builtin");
-const is_test = builtin.is_test;
const __eqdf2 = @import("./cmpdf2.zig").__eqdf2;
const __ledf2 = @import("./cmpdf2.zig").__ledf2;
lib/compiler_rt/comparesf2_test.zig
@@ -4,7 +4,6 @@
const std = @import("std");
const builtin = @import("builtin");
-const is_test = builtin.is_test;
const __eqsf2 = @import("./cmpsf2.zig").__eqsf2;
const __lesf2 = @import("./cmpsf2.zig").__lesf2;
lib/compiler_rt/count0bits.zig
@@ -1,6 +1,5 @@
const std = @import("std");
const builtin = @import("builtin");
-const is_test = builtin.is_test;
const common = @import("common.zig");
pub const panic = common.panic;
lib/compiler_rt/divdf3.zig
@@ -5,7 +5,6 @@
const std = @import("std");
const builtin = @import("builtin");
const arch = builtin.cpu.arch;
-const is_test = builtin.is_test;
const common = @import("common.zig");
const normalize = common.normalize;
lib/compiler_rt/divmodei4.zig
@@ -34,7 +34,7 @@ fn divmod(q: ?[]u32, r: ?[]u32, u: []u32, v: []u32) !void {
}
pub fn __divei4(q_p: [*]u8, u_p: [*]u8, v_p: [*]u8, bits: usize) callconv(.c) void {
- @setRuntimeSafety(builtin.is_test);
+ @setRuntimeSafety(common.test_safety);
const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
const q: []u32 = @ptrCast(@alignCast(q_p[0..byte_size]));
const u: []u32 = @ptrCast(@alignCast(u_p[0..byte_size]));
@@ -43,7 +43,7 @@ pub fn __divei4(q_p: [*]u8, u_p: [*]u8, v_p: [*]u8, bits: usize) callconv(.c) vo
}
pub fn __modei4(r_p: [*]u8, u_p: [*]u8, v_p: [*]u8, bits: usize) callconv(.c) void {
- @setRuntimeSafety(builtin.is_test);
+ @setRuntimeSafety(common.test_safety);
const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
const r: []u32 = @ptrCast(@alignCast(r_p[0..byte_size]));
const u: []u32 = @ptrCast(@alignCast(u_p[0..byte_size]));
lib/compiler_rt/fixint_test.zig
@@ -1,4 +1,3 @@
-const is_test = @import("builtin").is_test;
const std = @import("std");
const math = std.math;
const testing = std.testing;
lib/compiler_rt/int.zig
@@ -6,7 +6,6 @@ const testing = std.testing;
const maxInt = std.math.maxInt;
const minInt = std.math.minInt;
const arch = builtin.cpu.arch;
-const is_test = builtin.is_test;
const common = @import("common.zig");
const udivmod = @import("udivmod.zig").udivmod;
const __divti3 = @import("divti3.zig").__divti3;
lib/compiler_rt/memcpy.zig
@@ -11,7 +11,7 @@ comptime {
.visibility = common.visibility,
};
- if (builtin.mode == .ReleaseSmall)
+ if (builtin.mode == .ReleaseSmall or builtin.zig_backend == .stage2_aarch64)
@export(&memcpySmall, export_options)
else
@export(&memcpyFast, export_options);
@@ -195,6 +195,8 @@ inline fn copyRange4(
}
test "memcpy" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+
const S = struct {
fn testFunc(comptime copy_func: anytype) !void {
const max_len = 1024;
lib/compiler_rt/memmove.zig
@@ -14,7 +14,7 @@ comptime {
.visibility = common.visibility,
};
- if (builtin.mode == .ReleaseSmall)
+ if (builtin.mode == .ReleaseSmall or builtin.zig_backend == .stage2_aarch64)
@export(&memmoveSmall, export_options)
else
@export(&memmoveFast, export_options);
@@ -39,7 +39,7 @@ fn memmoveSmall(opt_dest: ?[*]u8, opt_src: ?[*]const u8, len: usize) callconv(.c
}
fn memmoveFast(dest: ?[*]u8, src: ?[*]u8, len: usize) callconv(.c) ?[*]u8 {
- @setRuntimeSafety(builtin.is_test);
+ @setRuntimeSafety(common.test_safety);
const small_limit = @max(2 * @sizeOf(Element), @sizeOf(Element));
if (copySmallLength(small_limit, dest.?, src.?, len)) return dest;
@@ -79,7 +79,7 @@ inline fn copyLessThan16(
src: [*]const u8,
len: usize,
) void {
- @setRuntimeSafety(builtin.is_test);
+ @setRuntimeSafety(common.test_safety);
if (len < 4) {
if (len == 0) return;
const b = len / 2;
@@ -100,7 +100,7 @@ inline fn copy16ToSmallLimit(
src: [*]const u8,
len: usize,
) bool {
- @setRuntimeSafety(builtin.is_test);
+ @setRuntimeSafety(common.test_safety);
inline for (2..(std.math.log2(small_limit) + 1) / 2 + 1) |p| {
const limit = 1 << (2 * p);
if (len < limit) {
@@ -119,7 +119,7 @@ inline fn copyRange4(
src: [*]const u8,
len: usize,
) void {
- @setRuntimeSafety(builtin.is_test);
+ @setRuntimeSafety(common.test_safety);
comptime assert(std.math.isPowerOfTwo(copy_len));
assert(len >= copy_len);
assert(len < 4 * copy_len);
@@ -147,7 +147,7 @@ inline fn copyForwards(
src: [*]const u8,
len: usize,
) void {
- @setRuntimeSafety(builtin.is_test);
+ @setRuntimeSafety(common.test_safety);
assert(len >= 2 * @sizeOf(Element));
const head = src[0..@sizeOf(Element)].*;
@@ -181,7 +181,7 @@ inline fn copyBlocks(
src: anytype,
max_bytes: usize,
) void {
- @setRuntimeSafety(builtin.is_test);
+ @setRuntimeSafety(common.test_safety);
const T = @typeInfo(@TypeOf(dest)).pointer.child;
comptime assert(T == @typeInfo(@TypeOf(src)).pointer.child);
@@ -217,6 +217,8 @@ inline fn copyBackwards(
}
test memmoveFast {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+
const max_len = 1024;
var buffer: [max_len + @alignOf(Element) - 1]u8 = undefined;
for (&buffer, 0..) |*b, i| {
lib/compiler_rt/mulf3.zig
@@ -6,7 +6,7 @@ const common = @import("./common.zig");
/// Ported from:
/// https://github.com/llvm/llvm-project/blob/2ffb1b0413efa9a24eb3c49e710e36f92e2cb50b/compiler-rt/lib/builtins/fp_mul_impl.inc
pub inline fn mulf3(comptime T: type, a: T, b: T) T {
- @setRuntimeSafety(builtin.is_test);
+ @setRuntimeSafety(common.test_safety);
const typeWidth = @typeInfo(T).float.bits;
const significandBits = math.floatMantissaBits(T);
const fractionalBits = math.floatFractionalBits(T);
@@ -163,7 +163,7 @@ pub inline fn mulf3(comptime T: type, a: T, b: T) T {
///
/// This is analogous to an shr version of `@shlWithOverflow`
fn wideShrWithTruncation(comptime Z: type, hi: *Z, lo: *Z, count: u32) bool {
- @setRuntimeSafety(builtin.is_test);
+ @setRuntimeSafety(common.test_safety);
const typeWidth = @typeInfo(Z).int.bits;
var inexact = false;
if (count < typeWidth) {
lib/compiler_rt/rem_pio2_large.zig
@@ -251,7 +251,7 @@ const PIo2 = [_]f64{
/// compiler will convert from decimal to binary accurately enough
/// to produce the hexadecimal values shown.
///
-pub fn rem_pio2_large(x: []f64, y: []f64, e0: i32, nx: i32, prec: usize) i32 {
+pub fn rem_pio2_large(x: []const f64, y: []f64, e0: i32, nx: i32, prec: usize) i32 {
var jz: i32 = undefined;
var jx: i32 = undefined;
var jv: i32 = undefined;
lib/compiler_rt/stack_probe.zig
@@ -4,7 +4,6 @@ const common = @import("common.zig");
const os_tag = builtin.os.tag;
const arch = builtin.cpu.arch;
const abi = builtin.abi;
-const is_test = builtin.is_test;
pub const panic = common.panic;
lib/compiler_rt/suboti4_test.zig
@@ -1,4 +1,5 @@
const subo = @import("subo.zig");
+const builtin = @import("builtin");
const std = @import("std");
const testing = std.testing;
const math = std.math;
@@ -27,6 +28,8 @@ pub fn simple_suboti4(a: i128, b: i128, overflow: *c_int) i128 {
}
test "suboti3" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+
const min: i128 = math.minInt(i128);
const max: i128 = math.maxInt(i128);
var i: i128 = 1;
lib/compiler_rt/udivmod.zig
@@ -1,8 +1,8 @@
const std = @import("std");
const builtin = @import("builtin");
-const is_test = builtin.is_test;
const Log2Int = std.math.Log2Int;
-const HalveInt = @import("common.zig").HalveInt;
+const common = @import("common.zig");
+const HalveInt = common.HalveInt;
const lo = switch (builtin.cpu.arch.endian()) {
.big => 1,
@@ -14,7 +14,7 @@ const hi = 1 - lo;
// Returns U / v_ and sets r = U % v_.
fn divwide_generic(comptime T: type, _u1: T, _u0: T, v_: T, r: *T) T {
const HalfT = HalveInt(T, false).HalfT;
- @setRuntimeSafety(is_test);
+ @setRuntimeSafety(common.test_safety);
var v = v_;
const b = @as(T, 1) << (@bitSizeOf(T) / 2);
@@ -70,7 +70,7 @@ fn divwide_generic(comptime T: type, _u1: T, _u0: T, v_: T, r: *T) T {
}
fn divwide(comptime T: type, _u1: T, _u0: T, v: T, r: *T) T {
- @setRuntimeSafety(is_test);
+ @setRuntimeSafety(common.test_safety);
if (T == u64 and builtin.target.cpu.arch == .x86_64 and builtin.target.os.tag != .windows) {
var rem: T = undefined;
const quo = asm (
@@ -90,7 +90,7 @@ fn divwide(comptime T: type, _u1: T, _u0: T, v: T, r: *T) T {
// Returns a_ / b_ and sets maybe_rem = a_ % b.
pub fn udivmod(comptime T: type, a_: T, b_: T, maybe_rem: ?*T) T {
- @setRuntimeSafety(is_test);
+ @setRuntimeSafety(common.test_safety);
const HalfT = HalveInt(T, false).HalfT;
const SignedT = std.meta.Int(.signed, @bitSizeOf(T));
lib/compiler_rt/udivmodei4.zig
@@ -113,7 +113,7 @@ pub fn divmod(q: ?[]u32, r: ?[]u32, u: []const u32, v: []const u32) !void {
}
pub fn __udivei4(q_p: [*]u8, u_p: [*]const u8, v_p: [*]const u8, bits: usize) callconv(.c) void {
- @setRuntimeSafety(builtin.is_test);
+ @setRuntimeSafety(common.test_safety);
const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
const q: []u32 = @ptrCast(@alignCast(q_p[0..byte_size]));
const u: []const u32 = @ptrCast(@alignCast(u_p[0..byte_size]));
@@ -122,7 +122,7 @@ pub fn __udivei4(q_p: [*]u8, u_p: [*]const u8, v_p: [*]const u8, bits: usize) ca
}
pub fn __umodei4(r_p: [*]u8, u_p: [*]const u8, v_p: [*]const u8, bits: usize) callconv(.c) void {
- @setRuntimeSafety(builtin.is_test);
+ @setRuntimeSafety(common.test_safety);
const byte_size = std.zig.target.intByteSize(&builtin.target, @intCast(bits));
const r: []u32 = @ptrCast(@alignCast(r_p[0..byte_size]));
const u: []const u32 = @ptrCast(@alignCast(u_p[0..byte_size]));
@@ -131,6 +131,7 @@ pub fn __umodei4(r_p: [*]u8, u_p: [*]const u8, v_p: [*]const u8, bits: usize) ca
}
test "__udivei4/__umodei4" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
lib/std/fs/File.zig
@@ -1554,7 +1554,10 @@ pub const Writer = struct {
return .{
.vtable = &.{
.drain = drain,
- .sendFile = sendFile,
+ .sendFile = switch (builtin.zig_backend) {
+ else => sendFile,
+ .stage2_aarch64 => std.io.Writer.unimplementedSendFile,
+ },
},
.buffer = buffer,
};
lib/std/Io/Writer.zig
@@ -2239,6 +2239,10 @@ pub const Discarding = struct {
pub fn sendFile(w: *Writer, file_reader: *File.Reader, limit: Limit) FileError!usize {
if (File.Handle == void) return error.Unimplemented;
+ switch (builtin.zig_backend) {
+ else => {},
+ .stage2_aarch64 => return error.Unimplemented,
+ }
const d: *Discarding = @alignCast(@fieldParentPtr("writer", w));
d.count += w.end;
w.end = 0;
lib/std/math/big/int_test.zig
@@ -2774,7 +2774,6 @@ test "bitNotWrap more than two limbs" {
// This test requires int sizes greater than 128 bits.
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
// LLVM: unexpected runtime library name: __umodei4
if (builtin.zig_backend == .stage2_llvm and comptime builtin.target.cpu.arch.isWasm()) return error.SkipZigTest; // TODO
lib/std/math/float.zig
@@ -4,8 +4,6 @@ const assert = std.debug.assert;
const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual;
-pub const Sign = enum(u1) { positive, negative };
-
pub fn FloatRepr(comptime Float: type) type {
const fractional_bits = floatFractionalBits(Float);
const exponent_bits = floatExponentBits(Float);
@@ -14,7 +12,7 @@ pub fn FloatRepr(comptime Float: type) type {
mantissa: StoredMantissa,
exponent: BiasedExponent,
- sign: Sign,
+ sign: std.math.Sign,
pub const StoredMantissa = @Type(.{ .int = .{
.signedness = .unsigned,
@@ -69,7 +67,7 @@ pub fn FloatRepr(comptime Float: type) type {
/// This currently truncates denormal values, which needs to be fixed before this can be used to
/// produce a rounded value.
- pub fn reconstruct(normalized: Normalized, sign: Sign) Float {
+ pub fn reconstruct(normalized: Normalized, sign: std.math.Sign) Float {
if (normalized.exponent > BiasedExponent.max_normal.unbias()) return @bitCast(Repr{
.mantissa = 0,
.exponent = .infinite,
lib/std/math/log10.zig
@@ -132,7 +132,6 @@ inline fn less_than_5(x: u32) u32 {
test log10_int {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_llvm and comptime builtin.target.cpu.arch.isWasm()) return error.SkipZigTest; // TODO
lib/std/os/linux.zig
@@ -503,7 +503,6 @@ pub var elf_aux_maybe: ?[*]std.elf.Auxv = null;
/// Whether an external or internal getauxval implementation is used.
const extern_getauxval = switch (builtin.zig_backend) {
// Calling extern functions is not yet supported with these backends
- .stage2_aarch64,
.stage2_arm,
.stage2_powerpc,
.stage2_riscv64,
lib/std/builtin.zig
@@ -772,7 +772,7 @@ pub const Endian = enum {
/// This data structure is used by the Zig language code generation and
/// therefore must be kept in sync with the compiler implementation.
-pub const Signedness = enum {
+pub const Signedness = enum(u1) {
signed,
unsigned,
};
@@ -894,7 +894,10 @@ pub const VaList = switch (builtin.cpu.arch) {
.aarch64, .aarch64_be => switch (builtin.os.tag) {
.windows => *u8,
.ios, .macos, .tvos, .watchos, .visionos => *u8,
- else => @compileError("disabled due to miscompilations"), // VaListAarch64,
+ else => switch (builtin.zig_backend) {
+ .stage2_aarch64 => VaListAarch64,
+ else => @compileError("disabled due to miscompilations"),
+ },
},
.arm, .armeb, .thumb, .thumbeb => switch (builtin.os.tag) {
.ios, .macos, .tvos, .watchos, .visionos => *u8,
lib/std/elf.zig
@@ -2001,7 +2001,7 @@ pub const R_AARCH64 = enum(u32) {
TLSLE_LDST64_TPREL_LO12 = 558,
/// Likewise; no check.
TLSLE_LDST64_TPREL_LO12_NC = 559,
- /// PC-rel. load immediate 20:2.
+ /// PC-rel. load immediate 20:2.
TLSDESC_LD_PREL19 = 560,
/// PC-rel. ADR immediate 20:0.
TLSDESC_ADR_PREL21 = 561,
lib/std/math.zig
@@ -45,6 +45,7 @@ pub const rad_per_deg = 0.017453292519943295769236907684886127134428718885417254
/// 180.0/pi
pub const deg_per_rad = 57.295779513082320876798154814105170332405472466564321549160243861;
+pub const Sign = enum(u1) { positive, negative };
pub const FloatRepr = float.FloatRepr;
pub const floatExponentBits = float.floatExponentBits;
pub const floatMantissaBits = float.floatMantissaBits;
@@ -594,27 +595,30 @@ pub fn shlExact(comptime T: type, a: T, shift_amt: Log2Int(T)) !T {
/// Shifts left. Overflowed bits are truncated.
/// A negative shift amount results in a right shift.
pub fn shl(comptime T: type, a: T, shift_amt: anytype) T {
+ const is_shl = shift_amt >= 0;
const abs_shift_amt = @abs(shift_amt);
-
- const casted_shift_amt = blk: {
- if (@typeInfo(T) == .vector) {
- const C = @typeInfo(T).vector.child;
- const len = @typeInfo(T).vector.len;
- if (abs_shift_amt >= @typeInfo(C).int.bits) return @splat(0);
- break :blk @as(@Vector(len, Log2Int(C)), @splat(@as(Log2Int(C), @intCast(abs_shift_amt))));
- } else {
- if (abs_shift_amt >= @typeInfo(T).int.bits) return 0;
- break :blk @as(Log2Int(T), @intCast(abs_shift_amt));
- }
+ const casted_shift_amt = casted_shift_amt: switch (@typeInfo(T)) {
+ .int => |info| {
+ if (abs_shift_amt < info.bits) break :casted_shift_amt @as(
+ Log2Int(T),
+ @intCast(abs_shift_amt),
+ );
+ if (info.signedness == .unsigned or is_shl) return 0;
+ return a >> (info.bits - 1);
+ },
+ .vector => |info| {
+ const Child = info.child;
+ const child_info = @typeInfo(Child).int;
+ if (abs_shift_amt < child_info.bits) break :casted_shift_amt @as(
+ @Vector(info.len, Log2Int(Child)),
+ @splat(@as(Log2Int(Child), @intCast(abs_shift_amt))),
+ );
+ if (child_info.signedness == .unsigned or is_shl) return @splat(0);
+ return a >> @splat(child_info.bits - 1);
+ },
+ else => comptime unreachable,
};
-
- if (@TypeOf(shift_amt) == comptime_int or @typeInfo(@TypeOf(shift_amt)).int.signedness == .signed) {
- if (shift_amt < 0) {
- return a >> casted_shift_amt;
- }
- }
-
- return a << casted_shift_amt;
+ return if (is_shl) a << casted_shift_amt else a >> casted_shift_amt;
}
test shl {
@@ -629,32 +633,40 @@ test shl {
try testing.expect(shl(@Vector(1, u32), @Vector(1, u32){42}, @as(usize, 1))[0] == @as(u32, 42) << 1);
try testing.expect(shl(@Vector(1, u32), @Vector(1, u32){42}, @as(isize, -1))[0] == @as(u32, 42) >> 1);
try testing.expect(shl(@Vector(1, u32), @Vector(1, u32){42}, 33)[0] == 0);
+
+ try testing.expect(shl(i8, -1, -100) == -1);
+ try testing.expect(shl(i8, -1, 100) == 0);
+ try testing.expect(@reduce(.And, shl(@Vector(2, i8), .{ -1, 1 }, -100) == @Vector(2, i8){ -1, 0 }));
+ try testing.expect(@reduce(.And, shl(@Vector(2, i8), .{ -1, 1 }, 100) == @Vector(2, i8){ 0, 0 }));
}
/// Shifts right. Overflowed bits are truncated.
/// A negative shift amount results in a left shift.
pub fn shr(comptime T: type, a: T, shift_amt: anytype) T {
+ const is_shl = shift_amt < 0;
const abs_shift_amt = @abs(shift_amt);
-
- const casted_shift_amt = blk: {
- if (@typeInfo(T) == .vector) {
- const C = @typeInfo(T).vector.child;
- const len = @typeInfo(T).vector.len;
- if (abs_shift_amt >= @typeInfo(C).int.bits) return @splat(0);
- break :blk @as(@Vector(len, Log2Int(C)), @splat(@as(Log2Int(C), @intCast(abs_shift_amt))));
- } else {
- if (abs_shift_amt >= @typeInfo(T).int.bits) return 0;
- break :blk @as(Log2Int(T), @intCast(abs_shift_amt));
- }
+ const casted_shift_amt = casted_shift_amt: switch (@typeInfo(T)) {
+ .int => |info| {
+ if (abs_shift_amt < info.bits) break :casted_shift_amt @as(
+ Log2Int(T),
+ @intCast(abs_shift_amt),
+ );
+ if (info.signedness == .unsigned or is_shl) return 0;
+ return a >> (info.bits - 1);
+ },
+ .vector => |info| {
+ const Child = info.child;
+ const child_info = @typeInfo(Child).int;
+ if (abs_shift_amt < child_info.bits) break :casted_shift_amt @as(
+ @Vector(info.len, Log2Int(Child)),
+ @splat(@as(Log2Int(Child), @intCast(abs_shift_amt))),
+ );
+ if (child_info.signedness == .unsigned or is_shl) return @splat(0);
+ return a >> @splat(child_info.bits - 1);
+ },
+ else => comptime unreachable,
};
-
- if (@TypeOf(shift_amt) == comptime_int or @typeInfo(@TypeOf(shift_amt)).int.signedness == .signed) {
- if (shift_amt < 0) {
- return a << casted_shift_amt;
- }
- }
-
- return a >> casted_shift_amt;
+ return if (is_shl) a << casted_shift_amt else a >> casted_shift_amt;
}
test shr {
@@ -669,6 +681,11 @@ test shr {
try testing.expect(shr(@Vector(1, u32), @Vector(1, u32){42}, @as(usize, 1))[0] == @as(u32, 42) >> 1);
try testing.expect(shr(@Vector(1, u32), @Vector(1, u32){42}, @as(isize, -1))[0] == @as(u32, 42) << 1);
try testing.expect(shr(@Vector(1, u32), @Vector(1, u32){42}, 33)[0] == 0);
+
+ try testing.expect(shr(i8, -1, -100) == 0);
+ try testing.expect(shr(i8, -1, 100) == -1);
+ try testing.expect(@reduce(.And, shr(@Vector(2, i8), .{ -1, 1 }, -100) == @Vector(2, i8){ 0, 0 }));
+ try testing.expect(@reduce(.And, shr(@Vector(2, i8), .{ -1, 1 }, 100) == @Vector(2, i8){ -1, 0 }));
}
/// Rotates right. Only unsigned values can be rotated. Negative shift
lib/std/mem.zig
@@ -676,6 +676,7 @@ test lessThan {
const eqlBytes_allowed = switch (builtin.zig_backend) {
// These backends don't support vectors yet.
+ .stage2_aarch64,
.stage2_powerpc,
.stage2_riscv64,
=> false,
@@ -4482,7 +4483,7 @@ pub fn doNotOptimizeAway(val: anytype) void {
);
asm volatile (""
:
- : [val2] "r" (val2),
+ : [_] "r" (val2),
);
} else doNotOptimizeAway(&val);
},
@@ -4490,7 +4491,7 @@ pub fn doNotOptimizeAway(val: anytype) void {
if ((t.float.bits == 32 or t.float.bits == 64) and builtin.zig_backend != .stage2_c) {
asm volatile (""
:
- : [val] "rm" (val),
+ : [_] "rm" (val),
);
} else doNotOptimizeAway(&val);
},
@@ -4500,7 +4501,7 @@ pub fn doNotOptimizeAway(val: anytype) void {
} else {
asm volatile (""
:
- : [val] "m" (val),
+ : [_] "m" (val),
: .{ .memory = true });
}
},
lib/std/Progress.zig
@@ -408,6 +408,9 @@ pub const have_ipc = switch (builtin.os.tag) {
const noop_impl = builtin.single_threaded or switch (builtin.os.tag) {
.wasi, .freestanding => true,
else => false,
+} or switch (builtin.zig_backend) {
+ .stage2_aarch64 => true,
+ else => false,
};
/// Initializes a global Progress instance.
@@ -754,7 +757,7 @@ fn appendTreeSymbol(symbol: TreeSymbol, buf: []u8, start_i: usize) usize {
}
fn clearWrittenWithEscapeCodes() anyerror!void {
- if (!global_progress.need_clear) return;
+ if (noop_impl or !global_progress.need_clear) return;
global_progress.need_clear = false;
try write(clear);
lib/std/start.zig
@@ -101,17 +101,11 @@ comptime {
// Simplified start code for stage2 until it supports more language features ///
fn main2() callconv(.c) c_int {
- root.main();
- return 0;
+ return callMain();
}
fn _start2() callconv(.withStackAlign(.c, 1)) noreturn {
- callMain2();
-}
-
-fn callMain2() noreturn {
- root.main();
- exit2(0);
+ std.posix.exit(callMain());
}
fn spirvMain2() callconv(.kernel) void {
@@ -119,51 +113,7 @@ fn spirvMain2() callconv(.kernel) void {
}
fn wWinMainCRTStartup2() callconv(.c) noreturn {
- root.main();
- exit2(0);
-}
-
-fn exit2(code: usize) noreturn {
- switch (native_os) {
- .linux => switch (builtin.cpu.arch) {
- .x86_64 => {
- asm volatile ("syscall"
- :
- : [number] "{rax}" (231),
- [arg1] "{rdi}" (code),
- : .{ .rcx = true, .r11 = true, .memory = true });
- },
- .arm => {
- asm volatile ("svc #0"
- :
- : [number] "{r7}" (1),
- [arg1] "{r0}" (code),
- : .{ .memory = true });
- },
- .aarch64 => {
- asm volatile ("svc #0"
- :
- : [number] "{x8}" (93),
- [arg1] "{x0}" (code),
- : .{ .memory = true });
- },
- .sparc64 => {
- asm volatile ("ta 0x6d"
- :
- : [number] "{g1}" (1),
- [arg1] "{o0}" (code),
- : .{ .o0 = true, .o1 = true, .o2 = true, .o3 = true, .o4 = true, .o5 = true, .o6 = true, .o7 = true, .memory = true });
- },
- else => @compileError("TODO"),
- },
- // exits(0)
- .plan9 => std.os.plan9.exits(null),
- .windows => {
- std.os.windows.ntdll.RtlExitUserProcess(@truncate(code));
- },
- else => @compileError("TODO"),
- }
- unreachable;
+ std.posix.exit(callMain());
}
////////////////////////////////////////////////////////////////////////////////
@@ -676,10 +626,11 @@ pub inline fn callMain() u8 {
const result = root.main() catch |err| {
switch (builtin.zig_backend) {
+ .stage2_aarch64,
.stage2_powerpc,
.stage2_riscv64,
=> {
- std.debug.print("error: failed with error\n", .{});
+ _ = std.posix.write(std.posix.STDERR_FILENO, "error: failed with error\n") catch {};
return 1;
},
else => {},
lib/std/testing.zig
@@ -33,6 +33,7 @@ pub var log_level = std.log.Level.warn;
// Disable printing in tests for simple backends.
pub const backend_can_print = switch (builtin.zig_backend) {
+ .stage2_aarch64,
.stage2_powerpc,
.stage2_riscv64,
.stage2_spirv,
lib/compiler_rt.zig
@@ -240,7 +240,7 @@ comptime {
_ = @import("compiler_rt/udivmodti4.zig");
// extra
- _ = @import("compiler_rt/os_version_check.zig");
+ if (builtin.zig_backend != .stage2_aarch64) _ = @import("compiler_rt/os_version_check.zig");
_ = @import("compiler_rt/emutls.zig");
_ = @import("compiler_rt/arm.zig");
_ = @import("compiler_rt/aulldiv.zig");
@@ -249,12 +249,12 @@ comptime {
_ = @import("compiler_rt/hexagon.zig");
if (@import("builtin").object_format != .c) {
- _ = @import("compiler_rt/atomics.zig");
+ if (builtin.zig_backend != .stage2_aarch64) _ = @import("compiler_rt/atomics.zig");
_ = @import("compiler_rt/stack_probe.zig");
// macOS has these functions inside libSystem.
if (builtin.cpu.arch.isAARCH64() and !builtin.os.tag.isDarwin()) {
- _ = @import("compiler_rt/aarch64_outline_atomics.zig");
+ if (builtin.zig_backend != .stage2_aarch64) _ = @import("compiler_rt/aarch64_outline_atomics.zig");
}
_ = @import("compiler_rt/memcpy.zig");
src/arch/aarch64/bits.zig
@@ -1,2063 +0,0 @@
-const std = @import("std");
-const builtin = @import("builtin");
-const assert = std.debug.assert;
-const testing = std.testing;
-
-/// Disjoint sets of registers. Every register must belong to
-/// exactly one register class.
-pub const RegisterClass = enum {
- general_purpose,
- stack_pointer,
- floating_point,
-};
-
-/// Registers in the AArch64 instruction set
-pub const Register = enum(u8) {
- // zig fmt: off
- // 64-bit general-purpose registers
- x0, x1, x2, x3, x4, x5, x6, x7,
- x8, x9, x10, x11, x12, x13, x14, x15,
- x16, x17, x18, x19, x20, x21, x22, x23,
- x24, x25, x26, x27, x28, x29, x30, xzr,
-
- // 32-bit general-purpose registers
- w0, w1, w2, w3, w4, w5, w6, w7,
- w8, w9, w10, w11, w12, w13, w14, w15,
- w16, w17, w18, w19, w20, w21, w22, w23,
- w24, w25, w26, w27, w28, w29, w30, wzr,
-
- // Stack pointer
- sp, wsp,
-
- // 128-bit floating-point registers
- q0, q1, q2, q3, q4, q5, q6, q7,
- q8, q9, q10, q11, q12, q13, q14, q15,
- q16, q17, q18, q19, q20, q21, q22, q23,
- q24, q25, q26, q27, q28, q29, q30, q31,
-
- // 64-bit floating-point registers
- d0, d1, d2, d3, d4, d5, d6, d7,
- d8, d9, d10, d11, d12, d13, d14, d15,
- d16, d17, d18, d19, d20, d21, d22, d23,
- d24, d25, d26, d27, d28, d29, d30, d31,
-
- // 32-bit floating-point registers
- s0, s1, s2, s3, s4, s5, s6, s7,
- s8, s9, s10, s11, s12, s13, s14, s15,
- s16, s17, s18, s19, s20, s21, s22, s23,
- s24, s25, s26, s27, s28, s29, s30, s31,
-
- // 16-bit floating-point registers
- h0, h1, h2, h3, h4, h5, h6, h7,
- h8, h9, h10, h11, h12, h13, h14, h15,
- h16, h17, h18, h19, h20, h21, h22, h23,
- h24, h25, h26, h27, h28, h29, h30, h31,
-
- // 8-bit floating-point registers
- b0, b1, b2, b3, b4, b5, b6, b7,
- b8, b9, b10, b11, b12, b13, b14, b15,
- b16, b17, b18, b19, b20, b21, b22, b23,
- b24, b25, b26, b27, b28, b29, b30, b31,
- // zig fmt: on
-
- pub fn class(self: Register) RegisterClass {
- return switch (@intFromEnum(self)) {
- @intFromEnum(Register.x0)...@intFromEnum(Register.xzr) => .general_purpose,
- @intFromEnum(Register.w0)...@intFromEnum(Register.wzr) => .general_purpose,
-
- @intFromEnum(Register.sp) => .stack_pointer,
- @intFromEnum(Register.wsp) => .stack_pointer,
-
- @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => .floating_point,
- @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => .floating_point,
- @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => .floating_point,
- @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => .floating_point,
- @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => .floating_point,
- else => unreachable,
- };
- }
-
- pub fn id(self: Register) u6 {
- return switch (@intFromEnum(self)) {
- @intFromEnum(Register.x0)...@intFromEnum(Register.xzr) => @as(u6, @intCast(@intFromEnum(self) - @intFromEnum(Register.x0))),
- @intFromEnum(Register.w0)...@intFromEnum(Register.wzr) => @as(u6, @intCast(@intFromEnum(self) - @intFromEnum(Register.w0))),
-
- @intFromEnum(Register.sp) => 32,
- @intFromEnum(Register.wsp) => 32,
-
- @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @as(u6, @intCast(@intFromEnum(self) - @intFromEnum(Register.q0) + 33)),
- @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @as(u6, @intCast(@intFromEnum(self) - @intFromEnum(Register.d0) + 33)),
- @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @as(u6, @intCast(@intFromEnum(self) - @intFromEnum(Register.s0) + 33)),
- @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @as(u6, @intCast(@intFromEnum(self) - @intFromEnum(Register.h0) + 33)),
- @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @as(u6, @intCast(@intFromEnum(self) - @intFromEnum(Register.b0) + 33)),
- else => unreachable,
- };
- }
-
- pub fn enc(self: Register) u5 {
- return switch (@intFromEnum(self)) {
- @intFromEnum(Register.x0)...@intFromEnum(Register.xzr) => @as(u5, @intCast(@intFromEnum(self) - @intFromEnum(Register.x0))),
- @intFromEnum(Register.w0)...@intFromEnum(Register.wzr) => @as(u5, @intCast(@intFromEnum(self) - @intFromEnum(Register.w0))),
-
- @intFromEnum(Register.sp) => 31,
- @intFromEnum(Register.wsp) => 31,
-
- @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @as(u5, @intCast(@intFromEnum(self) - @intFromEnum(Register.q0))),
- @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @as(u5, @intCast(@intFromEnum(self) - @intFromEnum(Register.d0))),
- @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @as(u5, @intCast(@intFromEnum(self) - @intFromEnum(Register.s0))),
- @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @as(u5, @intCast(@intFromEnum(self) - @intFromEnum(Register.h0))),
- @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @as(u5, @intCast(@intFromEnum(self) - @intFromEnum(Register.b0))),
- else => unreachable,
- };
- }
-
- /// Returns the bit-width of the register.
- pub fn size(self: Register) u8 {
- return switch (@intFromEnum(self)) {
- @intFromEnum(Register.x0)...@intFromEnum(Register.xzr) => 64,
- @intFromEnum(Register.w0)...@intFromEnum(Register.wzr) => 32,
-
- @intFromEnum(Register.sp) => 64,
- @intFromEnum(Register.wsp) => 32,
-
- @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => 128,
- @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => 64,
- @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => 32,
- @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => 16,
- @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => 8,
- else => unreachable,
- };
- }
-
- /// Convert from a general-purpose register to its 64 bit alias.
- pub fn toX(self: Register) Register {
- return switch (@intFromEnum(self)) {
- @intFromEnum(Register.x0)...@intFromEnum(Register.xzr) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.x0) + @intFromEnum(Register.x0)),
- ),
- @intFromEnum(Register.w0)...@intFromEnum(Register.wzr) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.w0) + @intFromEnum(Register.x0)),
- ),
- else => unreachable,
- };
- }
-
- /// Convert from a general-purpose register to its 32 bit alias.
- pub fn toW(self: Register) Register {
- return switch (@intFromEnum(self)) {
- @intFromEnum(Register.x0)...@intFromEnum(Register.xzr) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.x0) + @intFromEnum(Register.w0)),
- ),
- @intFromEnum(Register.w0)...@intFromEnum(Register.wzr) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.w0) + @intFromEnum(Register.w0)),
- ),
- else => unreachable,
- };
- }
-
- /// Convert from a floating-point register to its 128 bit alias.
- pub fn toQ(self: Register) Register {
- return switch (@intFromEnum(self)) {
- @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.q0)),
- ),
- @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.q0)),
- ),
- @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.q0)),
- ),
- @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.q0)),
- ),
- @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.q0)),
- ),
- else => unreachable,
- };
- }
-
- /// Convert from a floating-point register to its 64 bit alias.
- pub fn toD(self: Register) Register {
- return switch (@intFromEnum(self)) {
- @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.d0)),
- ),
- @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.d0)),
- ),
- @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.d0)),
- ),
- @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.d0)),
- ),
- @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.d0)),
- ),
- else => unreachable,
- };
- }
-
- /// Convert from a floating-point register to its 32 bit alias.
- pub fn toS(self: Register) Register {
- return switch (@intFromEnum(self)) {
- @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.s0)),
- ),
- @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.s0)),
- ),
- @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.s0)),
- ),
- @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.s0)),
- ),
- @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.s0)),
- ),
- else => unreachable,
- };
- }
-
- /// Convert from a floating-point register to its 16 bit alias.
- pub fn toH(self: Register) Register {
- return switch (@intFromEnum(self)) {
- @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.h0)),
- ),
- @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.h0)),
- ),
- @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.h0)),
- ),
- @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.h0)),
- ),
- @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.h0)),
- ),
- else => unreachable,
- };
- }
-
- /// Convert from a floating-point register to its 8 bit alias.
- pub fn toB(self: Register) Register {
- return switch (@intFromEnum(self)) {
- @intFromEnum(Register.q0)...@intFromEnum(Register.q31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.q0) + @intFromEnum(Register.b0)),
- ),
- @intFromEnum(Register.d0)...@intFromEnum(Register.d31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.d0) + @intFromEnum(Register.b0)),
- ),
- @intFromEnum(Register.s0)...@intFromEnum(Register.s31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.s0) + @intFromEnum(Register.b0)),
- ),
- @intFromEnum(Register.h0)...@intFromEnum(Register.h31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.h0) + @intFromEnum(Register.b0)),
- ),
- @intFromEnum(Register.b0)...@intFromEnum(Register.b31) => @as(
- Register,
- @enumFromInt(@intFromEnum(self) - @intFromEnum(Register.b0) + @intFromEnum(Register.b0)),
- ),
- else => unreachable,
- };
- }
-
- pub fn dwarfNum(self: Register) u5 {
- return self.enc();
- }
-};
-
-test "Register.enc" {
- try testing.expectEqual(@as(u5, 0), Register.x0.enc());
- try testing.expectEqual(@as(u5, 0), Register.w0.enc());
-
- try testing.expectEqual(@as(u5, 31), Register.xzr.enc());
- try testing.expectEqual(@as(u5, 31), Register.wzr.enc());
-
- try testing.expectEqual(@as(u5, 31), Register.sp.enc());
- try testing.expectEqual(@as(u5, 31), Register.sp.enc());
-}
-
-test "Register.size" {
- try testing.expectEqual(@as(u8, 64), Register.x19.size());
- try testing.expectEqual(@as(u8, 32), Register.w3.size());
-}
-
-test "Register.toX/toW" {
- try testing.expectEqual(Register.x0, Register.w0.toX());
- try testing.expectEqual(Register.x0, Register.x0.toX());
-
- try testing.expectEqual(Register.w3, Register.w3.toW());
- try testing.expectEqual(Register.w3, Register.x3.toW());
-}
-
-/// Represents an instruction in the AArch64 instruction set
-pub const Instruction = union(enum) {
- move_wide_immediate: packed struct {
- rd: u5,
- imm16: u16,
- hw: u2,
- fixed: u6 = 0b100101,
- opc: u2,
- sf: u1,
- },
- pc_relative_address: packed struct {
- rd: u5,
- immhi: u19,
- fixed: u5 = 0b10000,
- immlo: u2,
- op: u1,
- },
- load_store_register: packed struct {
- rt: u5,
- rn: u5,
- offset: u12,
- opc: u2,
- op1: u2,
- v: u1,
- fixed: u3 = 0b111,
- size: u2,
- },
- load_store_register_pair: packed struct {
- rt1: u5,
- rn: u5,
- rt2: u5,
- imm7: u7,
- load: u1,
- encoding: u2,
- fixed: u5 = 0b101_0_0,
- opc: u2,
- },
- load_literal: packed struct {
- rt: u5,
- imm19: u19,
- fixed: u6 = 0b011_0_00,
- opc: u2,
- },
- exception_generation: packed struct {
- ll: u2,
- op2: u3,
- imm16: u16,
- opc: u3,
- fixed: u8 = 0b1101_0100,
- },
- unconditional_branch_register: packed struct {
- op4: u5,
- rn: u5,
- op3: u6,
- op2: u5,
- opc: u4,
- fixed: u7 = 0b1101_011,
- },
- unconditional_branch_immediate: packed struct {
- imm26: u26,
- fixed: u5 = 0b00101,
- op: u1,
- },
- no_operation: packed struct {
- fixed: u32 = 0b1101010100_0_00_011_0010_0000_000_11111,
- },
- logical_shifted_register: packed struct {
- rd: u5,
- rn: u5,
- imm6: u6,
- rm: u5,
- n: u1,
- shift: u2,
- fixed: u5 = 0b01010,
- opc: u2,
- sf: u1,
- },
- add_subtract_immediate: packed struct {
- rd: u5,
- rn: u5,
- imm12: u12,
- sh: u1,
- fixed: u6 = 0b100010,
- s: u1,
- op: u1,
- sf: u1,
- },
- logical_immediate: packed struct {
- rd: u5,
- rn: u5,
- imms: u6,
- immr: u6,
- n: u1,
- fixed: u6 = 0b100100,
- opc: u2,
- sf: u1,
- },
- bitfield: packed struct {
- rd: u5,
- rn: u5,
- imms: u6,
- immr: u6,
- n: u1,
- fixed: u6 = 0b100110,
- opc: u2,
- sf: u1,
- },
- add_subtract_shifted_register: packed struct {
- rd: u5,
- rn: u5,
- imm6: u6,
- rm: u5,
- fixed_1: u1 = 0b0,
- shift: u2,
- fixed_2: u5 = 0b01011,
- s: u1,
- op: u1,
- sf: u1,
- },
- add_subtract_extended_register: packed struct {
- rd: u5,
- rn: u5,
- imm3: u3,
- option: u3,
- rm: u5,
- fixed: u8 = 0b01011_00_1,
- s: u1,
- op: u1,
- sf: u1,
- },
- conditional_branch: struct {
- cond: u4,
- o0: u1,
- imm19: u19,
- o1: u1,
- fixed: u7 = 0b0101010,
- },
- compare_and_branch: struct {
- rt: u5,
- imm19: u19,
- op: u1,
- fixed: u6 = 0b011010,
- sf: u1,
- },
- conditional_select: struct {
- rd: u5,
- rn: u5,
- op2: u2,
- cond: u4,
- rm: u5,
- fixed: u8 = 0b11010100,
- s: u1,
- op: u1,
- sf: u1,
- },
- data_processing_3_source: packed struct {
- rd: u5,
- rn: u5,
- ra: u5,
- o0: u1,
- rm: u5,
- op31: u3,
- fixed: u5 = 0b11011,
- op54: u2,
- sf: u1,
- },
- data_processing_2_source: packed struct {
- rd: u5,
- rn: u5,
- opcode: u6,
- rm: u5,
- fixed_1: u8 = 0b11010110,
- s: u1,
- fixed_2: u1 = 0b0,
- sf: u1,
- },
-
- pub const Condition = enum(u4) {
- /// Integer: Equal
- /// Floating point: Equal
- eq,
- /// Integer: Not equal
- /// Floating point: Not equal or unordered
- ne,
- /// Integer: Carry set
- /// Floating point: Greater than, equal, or unordered
- cs,
- /// Integer: Carry clear
- /// Floating point: Less than
- cc,
- /// Integer: Minus, negative
- /// Floating point: Less than
- mi,
- /// Integer: Plus, positive or zero
- /// Floating point: Greater than, equal, or unordered
- pl,
- /// Integer: Overflow
- /// Floating point: Unordered
- vs,
- /// Integer: No overflow
- /// Floating point: Ordered
- vc,
- /// Integer: Unsigned higher
- /// Floating point: Greater than, or unordered
- hi,
- /// Integer: Unsigned lower or same
- /// Floating point: Less than or equal
- ls,
- /// Integer: Signed greater than or equal
- /// Floating point: Greater than or equal
- ge,
- /// Integer: Signed less than
- /// Floating point: Less than, or unordered
- lt,
- /// Integer: Signed greater than
- /// Floating point: Greater than
- gt,
- /// Integer: Signed less than or equal
- /// Floating point: Less than, equal, or unordered
- le,
- /// Integer: Always
- /// Floating point: Always
- al,
- /// Integer: Always
- /// Floating point: Always
- nv,
-
- /// Converts a std.math.CompareOperator into a condition flag,
- /// i.e. returns the condition that is true iff the result of the
- /// comparison is true. Assumes signed comparison
- pub fn fromCompareOperatorSigned(op: std.math.CompareOperator) Condition {
- return switch (op) {
- .gte => .ge,
- .gt => .gt,
- .neq => .ne,
- .lt => .lt,
- .lte => .le,
- .eq => .eq,
- };
- }
-
- /// Converts a std.math.CompareOperator into a condition flag,
- /// i.e. returns the condition that is true iff the result of the
- /// comparison is true. Assumes unsigned comparison
- pub fn fromCompareOperatorUnsigned(op: std.math.CompareOperator) Condition {
- return switch (op) {
- .gte => .cs,
- .gt => .hi,
- .neq => .ne,
- .lt => .cc,
- .lte => .ls,
- .eq => .eq,
- };
- }
-
- /// Returns the condition which is true iff the given condition is
- /// false (if such a condition exists)
- pub fn negate(cond: Condition) Condition {
- return switch (cond) {
- .eq => .ne,
- .ne => .eq,
- .cs => .cc,
- .cc => .cs,
- .mi => .pl,
- .pl => .mi,
- .vs => .vc,
- .vc => .vs,
- .hi => .ls,
- .ls => .hi,
- .ge => .lt,
- .lt => .ge,
- .gt => .le,
- .le => .gt,
- .al => unreachable,
- .nv => unreachable,
- };
- }
- };
-
- pub fn toU32(self: Instruction) u32 {
- return switch (self) {
- .move_wide_immediate => |v| @as(u32, @bitCast(v)),
- .pc_relative_address => |v| @as(u32, @bitCast(v)),
- .load_store_register => |v| @as(u32, @bitCast(v)),
- .load_store_register_pair => |v| @as(u32, @bitCast(v)),
- .load_literal => |v| @as(u32, @bitCast(v)),
- .exception_generation => |v| @as(u32, @bitCast(v)),
- .unconditional_branch_register => |v| @as(u32, @bitCast(v)),
- .unconditional_branch_immediate => |v| @as(u32, @bitCast(v)),
- .no_operation => |v| @as(u32, @bitCast(v)),
- .logical_shifted_register => |v| @as(u32, @bitCast(v)),
- .add_subtract_immediate => |v| @as(u32, @bitCast(v)),
- .logical_immediate => |v| @as(u32, @bitCast(v)),
- .bitfield => |v| @as(u32, @bitCast(v)),
- .add_subtract_shifted_register => |v| @as(u32, @bitCast(v)),
- .add_subtract_extended_register => |v| @as(u32, @bitCast(v)),
- // TODO once packed structs work, this can be refactored
- .conditional_branch => |v| @as(u32, v.cond) | (@as(u32, v.o0) << 4) | (@as(u32, v.imm19) << 5) | (@as(u32, v.o1) << 24) | (@as(u32, v.fixed) << 25),
- .compare_and_branch => |v| @as(u32, v.rt) | (@as(u32, v.imm19) << 5) | (@as(u32, v.op) << 24) | (@as(u32, v.fixed) << 25) | (@as(u32, v.sf) << 31),
- .conditional_select => |v| @as(u32, v.rd) | @as(u32, v.rn) << 5 | @as(u32, v.op2) << 10 | @as(u32, v.cond) << 12 | @as(u32, v.rm) << 16 | @as(u32, v.fixed) << 21 | @as(u32, v.s) << 29 | @as(u32, v.op) << 30 | @as(u32, v.sf) << 31,
- .data_processing_3_source => |v| @as(u32, @bitCast(v)),
- .data_processing_2_source => |v| @as(u32, @bitCast(v)),
- };
- }
-
- fn moveWideImmediate(
- opc: u2,
- rd: Register,
- imm16: u16,
- shift: u6,
- ) Instruction {
- assert(shift % 16 == 0);
- assert(!(rd.size() == 32 and shift > 16));
- assert(!(rd.size() == 64 and shift > 48));
-
- return Instruction{
- .move_wide_immediate = .{
- .rd = rd.enc(),
- .imm16 = imm16,
- .hw = @as(u2, @intCast(shift / 16)),
- .opc = opc,
- .sf = switch (rd.size()) {
- 32 => 0,
- 64 => 1,
- else => unreachable, // unexpected register size
- },
- },
- };
- }
-
- fn pcRelativeAddress(rd: Register, imm21: i21, op: u1) Instruction {
- assert(rd.size() == 64);
- const imm21_u = @as(u21, @bitCast(imm21));
- return Instruction{
- .pc_relative_address = .{
- .rd = rd.enc(),
- .immlo = @as(u2, @truncate(imm21_u)),
- .immhi = @as(u19, @truncate(imm21_u >> 2)),
- .op = op,
- },
- };
- }
-
- pub const LoadStoreOffsetImmediate = union(enum) {
- post_index: i9,
- pre_index: i9,
- unsigned: u12,
- };
-
- pub const LoadStoreOffsetRegister = struct {
- rm: u5,
- shift: union(enum) {
- uxtw: u2,
- lsl: u2,
- sxtw: u2,
- sxtx: u2,
- },
- };
-
- /// Represents the offset operand of a load or store instruction.
- /// Data can be loaded from memory with either an immediate offset
- /// or an offset that is stored in some register.
- pub const LoadStoreOffset = union(enum) {
- immediate: LoadStoreOffsetImmediate,
- register: LoadStoreOffsetRegister,
-
- pub const none = LoadStoreOffset{
- .immediate = .{ .unsigned = 0 },
- };
-
- pub fn toU12(self: LoadStoreOffset) u12 {
- return switch (self) {
- .immediate => |imm_type| switch (imm_type) {
- .post_index => |v| (@as(u12, @intCast(@as(u9, @bitCast(v)))) << 2) + 1,
- .pre_index => |v| (@as(u12, @intCast(@as(u9, @bitCast(v)))) << 2) + 3,
- .unsigned => |v| v,
- },
- .register => |r| switch (r.shift) {
- .uxtw => |v| (@as(u12, @intCast(r.rm)) << 6) + (@as(u12, @intCast(v)) << 2) + 16 + 2050,
- .lsl => |v| (@as(u12, @intCast(r.rm)) << 6) + (@as(u12, @intCast(v)) << 2) + 24 + 2050,
- .sxtw => |v| (@as(u12, @intCast(r.rm)) << 6) + (@as(u12, @intCast(v)) << 2) + 48 + 2050,
- .sxtx => |v| (@as(u12, @intCast(r.rm)) << 6) + (@as(u12, @intCast(v)) << 2) + 56 + 2050,
- },
- };
- }
-
- pub fn imm(offset: u12) LoadStoreOffset {
- return .{
- .immediate = .{ .unsigned = offset },
- };
- }
-
- pub fn imm_post_index(offset: i9) LoadStoreOffset {
- return .{
- .immediate = .{ .post_index = offset },
- };
- }
-
- pub fn imm_pre_index(offset: i9) LoadStoreOffset {
- return .{
- .immediate = .{ .pre_index = offset },
- };
- }
-
- pub fn reg(rm: Register) LoadStoreOffset {
- return .{
- .register = .{
- .rm = rm.enc(),
- .shift = .{
- .lsl = 0,
- },
- },
- };
- }
-
- pub fn reg_uxtw(rm: Register, shift: u2) LoadStoreOffset {
- assert(rm.size() == 32 and (shift == 0 or shift == 2));
- return .{
- .register = .{
- .rm = rm.enc(),
- .shift = .{
- .uxtw = shift,
- },
- },
- };
- }
-
- pub fn reg_lsl(rm: Register, shift: u2) LoadStoreOffset {
- assert(rm.size() == 64 and (shift == 0 or shift == 3));
- return .{
- .register = .{
- .rm = rm.enc(),
- .shift = .{
- .lsl = shift,
- },
- },
- };
- }
-
- pub fn reg_sxtw(rm: Register, shift: u2) LoadStoreOffset {
- assert(rm.size() == 32 and (shift == 0 or shift == 2));
- return .{
- .register = .{
- .rm = rm.enc(),
- .shift = .{
- .sxtw = shift,
- },
- },
- };
- }
-
- pub fn reg_sxtx(rm: Register, shift: u2) LoadStoreOffset {
- assert(rm.size() == 64 and (shift == 0 or shift == 3));
- return .{
- .register = .{
- .rm = rm.enc(),
- .shift = .{
- .sxtx = shift,
- },
- },
- };
- }
- };
-
- /// Which kind of load/store to perform
- const LoadStoreVariant = enum {
- /// 32 bits or 64 bits
- str,
- /// 8 bits, zero-extended
- strb,
- /// 16 bits, zero-extended
- strh,
- /// 32 bits or 64 bits
- ldr,
- /// 8 bits, zero-extended
- ldrb,
- /// 16 bits, zero-extended
- ldrh,
- /// 8 bits, sign extended
- ldrsb,
- /// 16 bits, sign extended
- ldrsh,
- /// 32 bits, sign extended
- ldrsw,
- };
-
- fn loadStoreRegister(
- rt: Register,
- rn: Register,
- offset: LoadStoreOffset,
- variant: LoadStoreVariant,
- ) Instruction {
- assert(rn.size() == 64);
- assert(rn.id() != Register.xzr.id());
-
- const off = offset.toU12();
-
- const op1: u2 = blk: {
- switch (offset) {
- .immediate => |imm| switch (imm) {
- .unsigned => break :blk 0b01,
- else => {},
- },
- else => {},
- }
- break :blk 0b00;
- };
-
- const opc: u2 = blk: {
- switch (variant) {
- .ldr, .ldrh, .ldrb => break :blk 0b01,
- .str, .strh, .strb => break :blk 0b00,
- .ldrsb,
- .ldrsh,
- => switch (rt.size()) {
- 32 => break :blk 0b11,
- 64 => break :blk 0b10,
- else => unreachable, // unexpected register size
- },
- .ldrsw => break :blk 0b10,
- }
- };
-
- const size: u2 = blk: {
- switch (variant) {
- .ldr, .str => switch (rt.size()) {
- 32 => break :blk 0b10,
- 64 => break :blk 0b11,
- else => unreachable, // unexpected register size
- },
- .ldrsw => break :blk 0b10,
- .ldrh, .ldrsh, .strh => break :blk 0b01,
- .ldrb, .ldrsb, .strb => break :blk 0b00,
- }
- };
-
- return Instruction{
- .load_store_register = .{
- .rt = rt.enc(),
- .rn = rn.enc(),
- .offset = off,
- .opc = opc,
- .op1 = op1,
- .v = 0,
- .size = size,
- },
- };
- }
-
- fn loadStoreRegisterPair(
- rt1: Register,
- rt2: Register,
- rn: Register,
- offset: i9,
- encoding: u2,
- load: bool,
- ) Instruction {
- assert(rn.size() == 64);
- assert(rn.id() != Register.xzr.id());
-
- switch (rt1.size()) {
- 32 => {
- assert(-256 <= offset and offset <= 252);
- const imm7 = @as(u7, @truncate(@as(u9, @bitCast(offset >> 2))));
- return Instruction{
- .load_store_register_pair = .{
- .rt1 = rt1.enc(),
- .rn = rn.enc(),
- .rt2 = rt2.enc(),
- .imm7 = imm7,
- .load = @intFromBool(load),
- .encoding = encoding,
- .opc = 0b00,
- },
- };
- },
- 64 => {
- assert(-512 <= offset and offset <= 504);
- const imm7 = @as(u7, @truncate(@as(u9, @bitCast(offset >> 3))));
- return Instruction{
- .load_store_register_pair = .{
- .rt1 = rt1.enc(),
- .rn = rn.enc(),
- .rt2 = rt2.enc(),
- .imm7 = imm7,
- .load = @intFromBool(load),
- .encoding = encoding,
- .opc = 0b10,
- },
- };
- },
- else => unreachable, // unexpected register size
- }
- }
-
- fn loadLiteral(rt: Register, imm19: u19) Instruction {
- return Instruction{
- .load_literal = .{
- .rt = rt.enc(),
- .imm19 = imm19,
- .opc = switch (rt.size()) {
- 32 => 0b00,
- 64 => 0b01,
- else => unreachable, // unexpected register size
- },
- },
- };
- }
-
- fn exceptionGeneration(
- opc: u3,
- op2: u3,
- ll: u2,
- imm16: u16,
- ) Instruction {
- return Instruction{
- .exception_generation = .{
- .ll = ll,
- .op2 = op2,
- .imm16 = imm16,
- .opc = opc,
- },
- };
- }
-
- fn unconditionalBranchRegister(
- opc: u4,
- op2: u5,
- op3: u6,
- rn: Register,
- op4: u5,
- ) Instruction {
- assert(rn.size() == 64);
-
- return Instruction{
- .unconditional_branch_register = .{
- .op4 = op4,
- .rn = rn.enc(),
- .op3 = op3,
- .op2 = op2,
- .opc = opc,
- },
- };
- }
-
- fn unconditionalBranchImmediate(
- op: u1,
- offset: i28,
- ) Instruction {
- return Instruction{
- .unconditional_branch_immediate = .{
- .imm26 = @as(u26, @bitCast(@as(i26, @intCast(offset >> 2)))),
- .op = op,
- },
- };
- }
-
- pub const LogicalShiftedRegisterShift = enum(u2) { lsl, lsr, asr, ror };
-
- fn logicalShiftedRegister(
- opc: u2,
- n: u1,
- rd: Register,
- rn: Register,
- rm: Register,
- shift: LogicalShiftedRegisterShift,
- amount: u6,
- ) Instruction {
- assert(rd.size() == rn.size());
- assert(rd.size() == rm.size());
- if (rd.size() == 32) assert(amount < 32);
-
- return Instruction{
- .logical_shifted_register = .{
- .rd = rd.enc(),
- .rn = rn.enc(),
- .imm6 = amount,
- .rm = rm.enc(),
- .n = n,
- .shift = @intFromEnum(shift),
- .opc = opc,
- .sf = switch (rd.size()) {
- 32 => 0b0,
- 64 => 0b1,
- else => unreachable,
- },
- },
- };
- }
-
- fn addSubtractImmediate(
- op: u1,
- s: u1,
- rd: Register,
- rn: Register,
- imm12: u12,
- shift: bool,
- ) Instruction {
- assert(rd.size() == rn.size());
- assert(rn.id() != Register.xzr.id());
-
- return Instruction{
- .add_subtract_immediate = .{
- .rd = rd.enc(),
- .rn = rn.enc(),
- .imm12 = imm12,
- .sh = @intFromBool(shift),
- .s = s,
- .op = op,
- .sf = switch (rd.size()) {
- 32 => 0b0,
- 64 => 0b1,
- else => unreachable, // unexpected register size
- },
- },
- };
- }
-
- fn logicalImmediate(
- opc: u2,
- rd: Register,
- rn: Register,
- imms: u6,
- immr: u6,
- n: u1,
- ) Instruction {
- assert(rd.size() == rn.size());
- assert(!(rd.size() == 32 and n != 0));
-
- return Instruction{
- .logical_immediate = .{
- .rd = rd.enc(),
- .rn = rn.enc(),
- .imms = imms,
- .immr = immr,
- .n = n,
- .opc = opc,
- .sf = switch (rd.size()) {
- 32 => 0b0,
- 64 => 0b1,
- else => unreachable, // unexpected register size
- },
- },
- };
- }
-
- fn initBitfield(
- opc: u2,
- n: u1,
- rd: Register,
- rn: Register,
- immr: u6,
- imms: u6,
- ) Instruction {
- assert(rd.size() == rn.size());
- assert(!(rd.size() == 64 and n != 1));
- assert(!(rd.size() == 32 and (n != 0 or immr >> 5 != 0 or immr >> 5 != 0)));
-
- return Instruction{
- .bitfield = .{
- .rd = rd.enc(),
- .rn = rn.enc(),
- .imms = imms,
- .immr = immr,
- .n = n,
- .opc = opc,
- .sf = switch (rd.size()) {
- 32 => 0b0,
- 64 => 0b1,
- else => unreachable, // unexpected register size
- },
- },
- };
- }
-
- pub const AddSubtractShiftedRegisterShift = enum(u2) { lsl, lsr, asr, _ };
-
- fn addSubtractShiftedRegister(
- op: u1,
- s: u1,
- shift: AddSubtractShiftedRegisterShift,
- rd: Register,
- rn: Register,
- rm: Register,
- imm6: u6,
- ) Instruction {
- assert(rd.size() == rn.size());
- assert(rd.size() == rm.size());
-
- return Instruction{
- .add_subtract_shifted_register = .{
- .rd = rd.enc(),
- .rn = rn.enc(),
- .imm6 = imm6,
- .rm = rm.enc(),
- .shift = @intFromEnum(shift),
- .s = s,
- .op = op,
- .sf = switch (rd.size()) {
- 32 => 0b0,
- 64 => 0b1,
- else => unreachable, // unexpected register size
- },
- },
- };
- }
-
- pub const AddSubtractExtendedRegisterOption = enum(u3) {
- uxtb,
- uxth,
- uxtw,
- uxtx, // serves also as lsl
- sxtb,
- sxth,
- sxtw,
- sxtx,
- };
-
- fn addSubtractExtendedRegister(
- op: u1,
- s: u1,
- rd: Register,
- rn: Register,
- rm: Register,
- extend: AddSubtractExtendedRegisterOption,
- imm3: u3,
- ) Instruction {
- return Instruction{
- .add_subtract_extended_register = .{
- .rd = rd.enc(),
- .rn = rn.enc(),
- .imm3 = imm3,
- .option = @intFromEnum(extend),
- .rm = rm.enc(),
- .s = s,
- .op = op,
- .sf = switch (rd.size()) {
- 32 => 0b0,
- 64 => 0b1,
- else => unreachable, // unexpected register size
- },
- },
- };
- }
-
- fn conditionalBranch(
- o0: u1,
- o1: u1,
- cond: Condition,
- offset: i21,
- ) Instruction {
- assert(offset & 0b11 == 0b00);
-
- return Instruction{
- .conditional_branch = .{
- .cond = @intFromEnum(cond),
- .o0 = o0,
- .imm19 = @as(u19, @bitCast(@as(i19, @intCast(offset >> 2)))),
- .o1 = o1,
- },
- };
- }
-
- fn compareAndBranch(
- op: u1,
- rt: Register,
- offset: i21,
- ) Instruction {
- assert(offset & 0b11 == 0b00);
-
- return Instruction{
- .compare_and_branch = .{
- .rt = rt.enc(),
- .imm19 = @as(u19, @bitCast(@as(i19, @intCast(offset >> 2)))),
- .op = op,
- .sf = switch (rt.size()) {
- 32 => 0b0,
- 64 => 0b1,
- else => unreachable, // unexpected register size
- },
- },
- };
- }
-
- fn conditionalSelect(
- op2: u2,
- op: u1,
- s: u1,
- rd: Register,
- rn: Register,
- rm: Register,
- cond: Condition,
- ) Instruction {
- assert(rd.size() == rn.size());
- assert(rd.size() == rm.size());
-
- return Instruction{
- .conditional_select = .{
- .rd = rd.enc(),
- .rn = rn.enc(),
- .op2 = op2,
- .cond = @intFromEnum(cond),
- .rm = rm.enc(),
- .s = s,
- .op = op,
- .sf = switch (rd.size()) {
- 32 => 0b0,
- 64 => 0b1,
- else => unreachable, // unexpected register size
- },
- },
- };
- }
-
- fn dataProcessing3Source(
- op54: u2,
- op31: u3,
- o0: u1,
- rd: Register,
- rn: Register,
- rm: Register,
- ra: Register,
- ) Instruction {
- return Instruction{
- .data_processing_3_source = .{
- .rd = rd.enc(),
- .rn = rn.enc(),
- .ra = ra.enc(),
- .o0 = o0,
- .rm = rm.enc(),
- .op31 = op31,
- .op54 = op54,
- .sf = switch (rd.size()) {
- 32 => 0b0,
- 64 => 0b1,
- else => unreachable, // unexpected register size
- },
- },
- };
- }
-
- fn dataProcessing2Source(
- s: u1,
- opcode: u6,
- rd: Register,
- rn: Register,
- rm: Register,
- ) Instruction {
- assert(rd.size() == rn.size());
- assert(rd.size() == rm.size());
-
- return Instruction{
- .data_processing_2_source = .{
- .rd = rd.enc(),
- .rn = rn.enc(),
- .opcode = opcode,
- .rm = rm.enc(),
- .s = s,
- .sf = switch (rd.size()) {
- 32 => 0b0,
- 64 => 0b1,
- else => unreachable, // unexpected register size
- },
- },
- };
- }
-
- // Helper functions for assembly syntax functions
-
- // Move wide (immediate)
-
- pub fn movn(rd: Register, imm16: u16, shift: u6) Instruction {
- return moveWideImmediate(0b00, rd, imm16, shift);
- }
-
- pub fn movz(rd: Register, imm16: u16, shift: u6) Instruction {
- return moveWideImmediate(0b10, rd, imm16, shift);
- }
-
- pub fn movk(rd: Register, imm16: u16, shift: u6) Instruction {
- return moveWideImmediate(0b11, rd, imm16, shift);
- }
-
- // PC relative address
-
- pub fn adr(rd: Register, imm21: i21) Instruction {
- return pcRelativeAddress(rd, imm21, 0b0);
- }
-
- pub fn adrp(rd: Register, imm21: i21) Instruction {
- return pcRelativeAddress(rd, imm21, 0b1);
- }
-
- // Load or store register
-
- pub fn ldrLiteral(rt: Register, literal: u19) Instruction {
- return loadLiteral(rt, literal);
- }
-
- pub fn ldr(rt: Register, rn: Register, offset: LoadStoreOffset) Instruction {
- return loadStoreRegister(rt, rn, offset, .ldr);
- }
-
- pub fn ldrh(rt: Register, rn: Register, offset: LoadStoreOffset) Instruction {
- return loadStoreRegister(rt, rn, offset, .ldrh);
- }
-
- pub fn ldrb(rt: Register, rn: Register, offset: LoadStoreOffset) Instruction {
- return loadStoreRegister(rt, rn, offset, .ldrb);
- }
-
- pub fn ldrsb(rt: Register, rn: Register, offset: LoadStoreOffset) Instruction {
- return loadStoreRegister(rt, rn, offset, .ldrsb);
- }
-
- pub fn ldrsh(rt: Register, rn: Register, offset: LoadStoreOffset) Instruction {
- return loadStoreRegister(rt, rn, offset, .ldrsh);
- }
-
- pub fn ldrsw(rt: Register, rn: Register, offset: LoadStoreOffset) Instruction {
- return loadStoreRegister(rt, rn, offset, .ldrsw);
- }
-
- pub fn str(rt: Register, rn: Register, offset: LoadStoreOffset) Instruction {
- return loadStoreRegister(rt, rn, offset, .str);
- }
-
- pub fn strh(rt: Register, rn: Register, offset: LoadStoreOffset) Instruction {
- return loadStoreRegister(rt, rn, offset, .strh);
- }
-
- pub fn strb(rt: Register, rn: Register, offset: LoadStoreOffset) Instruction {
- return loadStoreRegister(rt, rn, offset, .strb);
- }
-
- // Load or store pair of registers
-
- pub const LoadStorePairOffset = struct {
- encoding: enum(u2) {
- post_index = 0b01,
- signed = 0b10,
- pre_index = 0b11,
- },
- offset: i9,
-
- pub fn none() LoadStorePairOffset {
- return .{ .encoding = .signed, .offset = 0 };
- }
-
- pub fn post_index(imm: i9) LoadStorePairOffset {
- return .{ .encoding = .post_index, .offset = imm };
- }
-
- pub fn pre_index(imm: i9) LoadStorePairOffset {
- return .{ .encoding = .pre_index, .offset = imm };
- }
-
- pub fn signed(imm: i9) LoadStorePairOffset {
- return .{ .encoding = .signed, .offset = imm };
- }
- };
-
- pub fn ldp(rt1: Register, rt2: Register, rn: Register, offset: LoadStorePairOffset) Instruction {
- return loadStoreRegisterPair(rt1, rt2, rn, offset.offset, @intFromEnum(offset.encoding), true);
- }
-
- pub fn ldnp(rt1: Register, rt2: Register, rn: Register, offset: i9) Instruction {
- return loadStoreRegisterPair(rt1, rt2, rn, offset, 0, true);
- }
-
- pub fn stp(rt1: Register, rt2: Register, rn: Register, offset: LoadStorePairOffset) Instruction {
- return loadStoreRegisterPair(rt1, rt2, rn, offset.offset, @intFromEnum(offset.encoding), false);
- }
-
- pub fn stnp(rt1: Register, rt2: Register, rn: Register, offset: i9) Instruction {
- return loadStoreRegisterPair(rt1, rt2, rn, offset, 0, false);
- }
-
- // Exception generation
-
- pub fn svc(imm16: u16) Instruction {
- return exceptionGeneration(0b000, 0b000, 0b01, imm16);
- }
-
- pub fn hvc(imm16: u16) Instruction {
- return exceptionGeneration(0b000, 0b000, 0b10, imm16);
- }
-
- pub fn smc(imm16: u16) Instruction {
- return exceptionGeneration(0b000, 0b000, 0b11, imm16);
- }
-
- pub fn brk(imm16: u16) Instruction {
- return exceptionGeneration(0b001, 0b000, 0b00, imm16);
- }
-
- pub fn hlt(imm16: u16) Instruction {
- return exceptionGeneration(0b010, 0b000, 0b00, imm16);
- }
-
- // Unconditional branch (register)
-
- pub fn br(rn: Register) Instruction {
- return unconditionalBranchRegister(0b0000, 0b11111, 0b000000, rn, 0b00000);
- }
-
- pub fn blr(rn: Register) Instruction {
- return unconditionalBranchRegister(0b0001, 0b11111, 0b000000, rn, 0b00000);
- }
-
- pub fn ret(rn: ?Register) Instruction {
- return unconditionalBranchRegister(0b0010, 0b11111, 0b000000, rn orelse .x30, 0b00000);
- }
-
- // Unconditional branch (immediate)
-
- pub fn b(offset: i28) Instruction {
- return unconditionalBranchImmediate(0, offset);
- }
-
- pub fn bl(offset: i28) Instruction {
- return unconditionalBranchImmediate(1, offset);
- }
-
- // Nop
-
- pub fn nop() Instruction {
- return Instruction{ .no_operation = .{} };
- }
-
- // Logical (shifted register)
-
- pub fn andShiftedRegister(
- rd: Register,
- rn: Register,
- rm: Register,
- shift: LogicalShiftedRegisterShift,
- amount: u6,
- ) Instruction {
- return logicalShiftedRegister(0b00, 0b0, rd, rn, rm, shift, amount);
- }
-
- pub fn bicShiftedRegister(
- rd: Register,
- rn: Register,
- rm: Register,
- shift: LogicalShiftedRegisterShift,
- amount: u6,
- ) Instruction {
- return logicalShiftedRegister(0b00, 0b1, rd, rn, rm, shift, amount);
- }
-
- pub fn orrShiftedRegister(
- rd: Register,
- rn: Register,
- rm: Register,
- shift: LogicalShiftedRegisterShift,
- amount: u6,
- ) Instruction {
- return logicalShiftedRegister(0b01, 0b0, rd, rn, rm, shift, amount);
- }
-
- pub fn ornShiftedRegister(
- rd: Register,
- rn: Register,
- rm: Register,
- shift: LogicalShiftedRegisterShift,
- amount: u6,
- ) Instruction {
- return logicalShiftedRegister(0b01, 0b1, rd, rn, rm, shift, amount);
- }
-
- pub fn eorShiftedRegister(
- rd: Register,
- rn: Register,
- rm: Register,
- shift: LogicalShiftedRegisterShift,
- amount: u6,
- ) Instruction {
- return logicalShiftedRegister(0b10, 0b0, rd, rn, rm, shift, amount);
- }
-
- pub fn eonShiftedRegister(
- rd: Register,
- rn: Register,
- rm: Register,
- shift: LogicalShiftedRegisterShift,
- amount: u6,
- ) Instruction {
- return logicalShiftedRegister(0b10, 0b1, rd, rn, rm, shift, amount);
- }
-
- pub fn andsShiftedRegister(
- rd: Register,
- rn: Register,
- rm: Register,
- shift: LogicalShiftedRegisterShift,
- amount: u6,
- ) Instruction {
- return logicalShiftedRegister(0b11, 0b0, rd, rn, rm, shift, amount);
- }
-
- pub fn bicsShiftedRegister(
- rd: Register,
- rn: Register,
- rm: Register,
- shift: LogicalShiftedRegisterShift,
- amount: u6,
- ) Instruction {
- return logicalShiftedRegister(0b11, 0b1, rd, rn, rm, shift, amount);
- }
-
- // Add/subtract (immediate)
-
- pub fn add(rd: Register, rn: Register, imm: u12, shift: bool) Instruction {
- return addSubtractImmediate(0b0, 0b0, rd, rn, imm, shift);
- }
-
- pub fn adds(rd: Register, rn: Register, imm: u12, shift: bool) Instruction {
- return addSubtractImmediate(0b0, 0b1, rd, rn, imm, shift);
- }
-
- pub fn sub(rd: Register, rn: Register, imm: u12, shift: bool) Instruction {
- return addSubtractImmediate(0b1, 0b0, rd, rn, imm, shift);
- }
-
- pub fn subs(rd: Register, rn: Register, imm: u12, shift: bool) Instruction {
- return addSubtractImmediate(0b1, 0b1, rd, rn, imm, shift);
- }
-
- // Logical (immediate)
-
- pub fn andImmediate(rd: Register, rn: Register, imms: u6, immr: u6, n: u1) Instruction {
- return logicalImmediate(0b00, rd, rn, imms, immr, n);
- }
-
- pub fn orrImmediate(rd: Register, rn: Register, imms: u6, immr: u6, n: u1) Instruction {
- return logicalImmediate(0b01, rd, rn, imms, immr, n);
- }
-
- pub fn eorImmediate(rd: Register, rn: Register, imms: u6, immr: u6, n: u1) Instruction {
- return logicalImmediate(0b10, rd, rn, imms, immr, n);
- }
-
- pub fn andsImmediate(rd: Register, rn: Register, imms: u6, immr: u6, n: u1) Instruction {
- return logicalImmediate(0b11, rd, rn, imms, immr, n);
- }
-
- // Bitfield
-
- pub fn sbfm(rd: Register, rn: Register, immr: u6, imms: u6) Instruction {
- const n: u1 = switch (rd.size()) {
- 32 => 0b0,
- 64 => 0b1,
- else => unreachable, // unexpected register size
- };
- return initBitfield(0b00, n, rd, rn, immr, imms);
- }
-
- pub fn bfm(rd: Register, rn: Register, immr: u6, imms: u6) Instruction {
- const n: u1 = switch (rd.size()) {
- 32 => 0b0,
- 64 => 0b1,
- else => unreachable, // unexpected register size
- };
- return initBitfield(0b01, n, rd, rn, immr, imms);
- }
-
- pub fn ubfm(rd: Register, rn: Register, immr: u6, imms: u6) Instruction {
- const n: u1 = switch (rd.size()) {
- 32 => 0b0,
- 64 => 0b1,
- else => unreachable, // unexpected register size
- };
- return initBitfield(0b10, n, rd, rn, immr, imms);
- }
-
- pub fn asrImmediate(rd: Register, rn: Register, shift: u6) Instruction {
- const imms = @as(u6, @intCast(rd.size() - 1));
- return sbfm(rd, rn, shift, imms);
- }
-
- pub fn sbfx(rd: Register, rn: Register, lsb: u6, width: u7) Instruction {
- return sbfm(rd, rn, lsb, @as(u6, @intCast(lsb + width - 1)));
- }
-
- pub fn sxtb(rd: Register, rn: Register) Instruction {
- return sbfm(rd, rn, 0, 7);
- }
-
- pub fn sxth(rd: Register, rn: Register) Instruction {
- return sbfm(rd, rn, 0, 15);
- }
-
- pub fn sxtw(rd: Register, rn: Register) Instruction {
- assert(rd.size() == 64);
- return sbfm(rd, rn, 0, 31);
- }
-
- pub fn lslImmediate(rd: Register, rn: Register, shift: u6) Instruction {
- const size = @as(u6, @intCast(rd.size() - 1));
- return ubfm(rd, rn, size - shift + 1, size - shift);
- }
-
- pub fn lsrImmediate(rd: Register, rn: Register, shift: u6) Instruction {
- const imms = @as(u6, @intCast(rd.size() - 1));
- return ubfm(rd, rn, shift, imms);
- }
-
- pub fn ubfx(rd: Register, rn: Register, lsb: u6, width: u7) Instruction {
- return ubfm(rd, rn, lsb, @as(u6, @intCast(lsb + width - 1)));
- }
-
- pub fn uxtb(rd: Register, rn: Register) Instruction {
- return ubfm(rd, rn, 0, 7);
- }
-
- pub fn uxth(rd: Register, rn: Register) Instruction {
- return ubfm(rd, rn, 0, 15);
- }
-
- // Add/subtract (shifted register)
-
- pub fn addShiftedRegister(
- rd: Register,
- rn: Register,
- rm: Register,
- shift: AddSubtractShiftedRegisterShift,
- imm6: u6,
- ) Instruction {
- return addSubtractShiftedRegister(0b0, 0b0, shift, rd, rn, rm, imm6);
- }
-
- pub fn addsShiftedRegister(
- rd: Register,
- rn: Register,
- rm: Register,
- shift: AddSubtractShiftedRegisterShift,
- imm6: u6,
- ) Instruction {
- return addSubtractShiftedRegister(0b0, 0b1, shift, rd, rn, rm, imm6);
- }
-
- pub fn subShiftedRegister(
- rd: Register,
- rn: Register,
- rm: Register,
- shift: AddSubtractShiftedRegisterShift,
- imm6: u6,
- ) Instruction {
- return addSubtractShiftedRegister(0b1, 0b0, shift, rd, rn, rm, imm6);
- }
-
- pub fn subsShiftedRegister(
- rd: Register,
- rn: Register,
- rm: Register,
- shift: AddSubtractShiftedRegisterShift,
- imm6: u6,
- ) Instruction {
- return addSubtractShiftedRegister(0b1, 0b1, shift, rd, rn, rm, imm6);
- }
-
- // Add/subtract (extended register)
-
- pub fn addExtendedRegister(
- rd: Register,
- rn: Register,
- rm: Register,
- extend: AddSubtractExtendedRegisterOption,
- imm3: u3,
- ) Instruction {
- return addSubtractExtendedRegister(0b0, 0b0, rd, rn, rm, extend, imm3);
- }
-
- pub fn addsExtendedRegister(
- rd: Register,
- rn: Register,
- rm: Register,
- extend: AddSubtractExtendedRegisterOption,
- imm3: u3,
- ) Instruction {
- return addSubtractExtendedRegister(0b0, 0b1, rd, rn, rm, extend, imm3);
- }
-
- pub fn subExtendedRegister(
- rd: Register,
- rn: Register,
- rm: Register,
- extend: AddSubtractExtendedRegisterOption,
- imm3: u3,
- ) Instruction {
- return addSubtractExtendedRegister(0b1, 0b0, rd, rn, rm, extend, imm3);
- }
-
- pub fn subsExtendedRegister(
- rd: Register,
- rn: Register,
- rm: Register,
- extend: AddSubtractExtendedRegisterOption,
- imm3: u3,
- ) Instruction {
- return addSubtractExtendedRegister(0b1, 0b1, rd, rn, rm, extend, imm3);
- }
-
- // Conditional branch
-
- pub fn bCond(cond: Condition, offset: i21) Instruction {
- return conditionalBranch(0b0, 0b0, cond, offset);
- }
-
- // Compare and branch
-
- pub fn cbz(rt: Register, offset: i21) Instruction {
- return compareAndBranch(0b0, rt, offset);
- }
-
- pub fn cbnz(rt: Register, offset: i21) Instruction {
- return compareAndBranch(0b1, rt, offset);
- }
-
- // Conditional select
-
- pub fn csel(rd: Register, rn: Register, rm: Register, cond: Condition) Instruction {
- return conditionalSelect(0b00, 0b0, 0b0, rd, rn, rm, cond);
- }
-
- pub fn csinc(rd: Register, rn: Register, rm: Register, cond: Condition) Instruction {
- return conditionalSelect(0b01, 0b0, 0b0, rd, rn, rm, cond);
- }
-
- pub fn csinv(rd: Register, rn: Register, rm: Register, cond: Condition) Instruction {
- return conditionalSelect(0b00, 0b1, 0b0, rd, rn, rm, cond);
- }
-
- pub fn csneg(rd: Register, rn: Register, rm: Register, cond: Condition) Instruction {
- return conditionalSelect(0b01, 0b1, 0b0, rd, rn, rm, cond);
- }
-
- // Data processing (3 source)
-
- pub fn madd(rd: Register, rn: Register, rm: Register, ra: Register) Instruction {
- return dataProcessing3Source(0b00, 0b000, 0b0, rd, rn, rm, ra);
- }
-
- pub fn smaddl(rd: Register, rn: Register, rm: Register, ra: Register) Instruction {
- assert(rd.size() == 64 and rn.size() == 32 and rm.size() == 32 and ra.size() == 64);
- return dataProcessing3Source(0b00, 0b001, 0b0, rd, rn, rm, ra);
- }
-
- pub fn umaddl(rd: Register, rn: Register, rm: Register, ra: Register) Instruction {
- assert(rd.size() == 64 and rn.size() == 32 and rm.size() == 32 and ra.size() == 64);
- return dataProcessing3Source(0b00, 0b101, 0b0, rd, rn, rm, ra);
- }
-
- pub fn msub(rd: Register, rn: Register, rm: Register, ra: Register) Instruction {
- return dataProcessing3Source(0b00, 0b000, 0b1, rd, rn, rm, ra);
- }
-
- pub fn mul(rd: Register, rn: Register, rm: Register) Instruction {
- return madd(rd, rn, rm, .xzr);
- }
-
- pub fn smull(rd: Register, rn: Register, rm: Register) Instruction {
- return smaddl(rd, rn, rm, .xzr);
- }
-
- pub fn smulh(rd: Register, rn: Register, rm: Register) Instruction {
- assert(rd.size() == 64);
- return dataProcessing3Source(0b00, 0b010, 0b0, rd, rn, rm, .xzr);
- }
-
- pub fn umull(rd: Register, rn: Register, rm: Register) Instruction {
- return umaddl(rd, rn, rm, .xzr);
- }
-
- pub fn umulh(rd: Register, rn: Register, rm: Register) Instruction {
- assert(rd.size() == 64);
- return dataProcessing3Source(0b00, 0b110, 0b0, rd, rn, rm, .xzr);
- }
-
- pub fn mneg(rd: Register, rn: Register, rm: Register) Instruction {
- return msub(rd, rn, rm, .xzr);
- }
-
- // Data processing (2 source)
-
- pub fn udiv(rd: Register, rn: Register, rm: Register) Instruction {
- return dataProcessing2Source(0b0, 0b000010, rd, rn, rm);
- }
-
- pub fn sdiv(rd: Register, rn: Register, rm: Register) Instruction {
- return dataProcessing2Source(0b0, 0b000011, rd, rn, rm);
- }
-
- pub fn lslv(rd: Register, rn: Register, rm: Register) Instruction {
- return dataProcessing2Source(0b0, 0b001000, rd, rn, rm);
- }
-
- pub fn lsrv(rd: Register, rn: Register, rm: Register) Instruction {
- return dataProcessing2Source(0b0, 0b001001, rd, rn, rm);
- }
-
- pub fn asrv(rd: Register, rn: Register, rm: Register) Instruction {
- return dataProcessing2Source(0b0, 0b001010, rd, rn, rm);
- }
-
- pub const asrRegister = asrv;
- pub const lslRegister = lslv;
- pub const lsrRegister = lsrv;
-};
-
-test {
- testing.refAllDecls(@This());
-}
-
-test "serialize instructions" {
- const Testcase = struct {
- inst: Instruction,
- expected: u32,
- };
-
- const testcases = [_]Testcase{
- .{ // orr x0, xzr, x1
- .inst = Instruction.orrShiftedRegister(.x0, .xzr, .x1, .lsl, 0),
- .expected = 0b1_01_01010_00_0_00001_000000_11111_00000,
- },
- .{ // orn x0, xzr, x1
- .inst = Instruction.ornShiftedRegister(.x0, .xzr, .x1, .lsl, 0),
- .expected = 0b1_01_01010_00_1_00001_000000_11111_00000,
- },
- .{ // movz x1, #4
- .inst = Instruction.movz(.x1, 4, 0),
- .expected = 0b1_10_100101_00_0000000000000100_00001,
- },
- .{ // movz x1, #4, lsl 16
- .inst = Instruction.movz(.x1, 4, 16),
- .expected = 0b1_10_100101_01_0000000000000100_00001,
- },
- .{ // movz x1, #4, lsl 32
- .inst = Instruction.movz(.x1, 4, 32),
- .expected = 0b1_10_100101_10_0000000000000100_00001,
- },
- .{ // movz x1, #4, lsl 48
- .inst = Instruction.movz(.x1, 4, 48),
- .expected = 0b1_10_100101_11_0000000000000100_00001,
- },
- .{ // movz w1, #4
- .inst = Instruction.movz(.w1, 4, 0),
- .expected = 0b0_10_100101_00_0000000000000100_00001,
- },
- .{ // movz w1, #4, lsl 16
- .inst = Instruction.movz(.w1, 4, 16),
- .expected = 0b0_10_100101_01_0000000000000100_00001,
- },
- .{ // svc #0
- .inst = Instruction.svc(0),
- .expected = 0b1101_0100_000_0000000000000000_00001,
- },
- .{ // svc #0x80 ; typical on Darwin
- .inst = Instruction.svc(0x80),
- .expected = 0b1101_0100_000_0000000010000000_00001,
- },
- .{ // ret
- .inst = Instruction.ret(null),
- .expected = 0b1101_011_00_10_11111_0000_00_11110_00000,
- },
- .{ // bl #0x10
- .inst = Instruction.bl(0x10),
- .expected = 0b1_00101_00_0000_0000_0000_0000_0000_0100,
- },
- .{ // ldr x2, [x1]
- .inst = Instruction.ldr(.x2, .x1, Instruction.LoadStoreOffset.none),
- .expected = 0b11_111_0_01_01_000000000000_00001_00010,
- },
- .{ // ldr x2, [x1, #1]!
- .inst = Instruction.ldr(.x2, .x1, Instruction.LoadStoreOffset.imm_pre_index(1)),
- .expected = 0b11_111_0_00_01_0_000000001_11_00001_00010,
- },
- .{ // ldr x2, [x1], #-1
- .inst = Instruction.ldr(.x2, .x1, Instruction.LoadStoreOffset.imm_post_index(-1)),
- .expected = 0b11_111_0_00_01_0_111111111_01_00001_00010,
- },
- .{ // ldr x2, [x1], (x3)
- .inst = Instruction.ldr(.x2, .x1, Instruction.LoadStoreOffset.reg(.x3)),
- .expected = 0b11_111_0_00_01_1_00011_011_0_10_00001_00010,
- },
- .{ // ldr x2, label
- .inst = Instruction.ldrLiteral(.x2, 0x1),
- .expected = 0b01_011_0_00_0000000000000000001_00010,
- },
- .{ // ldrh x7, [x4], #0xaa
- .inst = Instruction.ldrh(.x7, .x4, Instruction.LoadStoreOffset.imm_post_index(0xaa)),
- .expected = 0b01_111_0_00_01_0_010101010_01_00100_00111,
- },
- .{ // ldrb x9, [x15, #0xff]!
- .inst = Instruction.ldrb(.x9, .x15, Instruction.LoadStoreOffset.imm_pre_index(0xff)),
- .expected = 0b00_111_0_00_01_0_011111111_11_01111_01001,
- },
- .{ // str x2, [x1]
- .inst = Instruction.str(.x2, .x1, Instruction.LoadStoreOffset.none),
- .expected = 0b11_111_0_01_00_000000000000_00001_00010,
- },
- .{ // str x2, [x1], (x3)
- .inst = Instruction.str(.x2, .x1, Instruction.LoadStoreOffset.reg(.x3)),
- .expected = 0b11_111_0_00_00_1_00011_011_0_10_00001_00010,
- },
- .{ // strh w0, [x1]
- .inst = Instruction.strh(.w0, .x1, Instruction.LoadStoreOffset.none),
- .expected = 0b01_111_0_01_00_000000000000_00001_00000,
- },
- .{ // strb w8, [x9]
- .inst = Instruction.strb(.w8, .x9, Instruction.LoadStoreOffset.none),
- .expected = 0b00_111_0_01_00_000000000000_01001_01000,
- },
- .{ // adr x2, #0x8
- .inst = Instruction.adr(.x2, 0x8),
- .expected = 0b0_00_10000_0000000000000000010_00010,
- },
- .{ // adr x2, -#0x8
- .inst = Instruction.adr(.x2, -0x8),
- .expected = 0b0_00_10000_1111111111111111110_00010,
- },
- .{ // adrp x2, #0x8
- .inst = Instruction.adrp(.x2, 0x8),
- .expected = 0b1_00_10000_0000000000000000010_00010,
- },
- .{ // adrp x2, -#0x8
- .inst = Instruction.adrp(.x2, -0x8),
- .expected = 0b1_00_10000_1111111111111111110_00010,
- },
- .{ // stp x1, x2, [sp, #8]
- .inst = Instruction.stp(.x1, .x2, .sp, Instruction.LoadStorePairOffset.signed(8)),
- .expected = 0b10_101_0_010_0_0000001_00010_11111_00001,
- },
- .{ // ldp x1, x2, [sp, #8]
- .inst = Instruction.ldp(.x1, .x2, .sp, Instruction.LoadStorePairOffset.signed(8)),
- .expected = 0b10_101_0_010_1_0000001_00010_11111_00001,
- },
- .{ // stp x1, x2, [sp, #-16]!
- .inst = Instruction.stp(.x1, .x2, .sp, Instruction.LoadStorePairOffset.pre_index(-16)),
- .expected = 0b10_101_0_011_0_1111110_00010_11111_00001,
- },
- .{ // ldp x1, x2, [sp], #16
- .inst = Instruction.ldp(.x1, .x2, .sp, Instruction.LoadStorePairOffset.post_index(16)),
- .expected = 0b10_101_0_001_1_0000010_00010_11111_00001,
- },
- .{ // and x0, x4, x2
- .inst = Instruction.andShiftedRegister(.x0, .x4, .x2, .lsl, 0),
- .expected = 0b1_00_01010_00_0_00010_000000_00100_00000,
- },
- .{ // and x0, x4, x2, lsl #0x8
- .inst = Instruction.andShiftedRegister(.x0, .x4, .x2, .lsl, 0x8),
- .expected = 0b1_00_01010_00_0_00010_001000_00100_00000,
- },
- .{ // add x0, x10, #10
- .inst = Instruction.add(.x0, .x10, 10, false),
- .expected = 0b1_0_0_100010_0_0000_0000_1010_01010_00000,
- },
- .{ // subs x0, x5, #11, lsl #12
- .inst = Instruction.subs(.x0, .x5, 11, true),
- .expected = 0b1_1_1_100010_1_0000_0000_1011_00101_00000,
- },
- .{ // b.hi #-4
- .inst = Instruction.bCond(.hi, -4),
- .expected = 0b0101010_0_1111111111111111111_0_1000,
- },
- .{ // cbz x10, #40
- .inst = Instruction.cbz(.x10, 40),
- .expected = 0b1_011010_0_0000000000000001010_01010,
- },
- .{ // add x0, x1, x2, lsl #5
- .inst = Instruction.addShiftedRegister(.x0, .x1, .x2, .lsl, 5),
- .expected = 0b1_0_0_01011_00_0_00010_000101_00001_00000,
- },
- .{ // csinc x1, x2, x4, eq
- .inst = Instruction.csinc(.x1, .x2, .x4, .eq),
- .expected = 0b1_0_0_11010100_00100_0000_0_1_00010_00001,
- },
- .{ // mul x1, x4, x9
- .inst = Instruction.mul(.x1, .x4, .x9),
- .expected = 0b1_00_11011_000_01001_0_11111_00100_00001,
- },
- .{ // eor x3, x5, #1
- .inst = Instruction.eorImmediate(.x3, .x5, 0b000000, 0b000000, 0b1),
- .expected = 0b1_10_100100_1_000000_000000_00101_00011,
- },
- .{ // lslv x6, x9, x10
- .inst = Instruction.lslv(.x6, .x9, .x10),
- .expected = 0b1_0_0_11010110_01010_0010_00_01001_00110,
- },
- .{ // lsl x4, x2, #42
- .inst = Instruction.lslImmediate(.x4, .x2, 42),
- .expected = 0b1_10_100110_1_010110_010101_00010_00100,
- },
- .{ // lsl x4, x2, #63
- .inst = Instruction.lslImmediate(.x4, .x2, 63),
- .expected = 0b1_10_100110_1_000001_000000_00010_00100,
- },
- .{ // lsr x4, x2, #42
- .inst = Instruction.lsrImmediate(.x4, .x2, 42),
- .expected = 0b1_10_100110_1_101010_111111_00010_00100,
- },
- .{ // lsr x4, x2, #63
- .inst = Instruction.lsrImmediate(.x4, .x2, 63),
- .expected = 0b1_10_100110_1_111111_111111_00010_00100,
- },
- .{ // umull x0, w0, w1
- .inst = Instruction.umull(.x0, .w0, .w1),
- .expected = 0b1_00_11011_1_01_00001_0_11111_00000_00000,
- },
- .{ // smull x0, w0, w1
- .inst = Instruction.smull(.x0, .w0, .w1),
- .expected = 0b1_00_11011_0_01_00001_0_11111_00000_00000,
- },
- .{ // tst x0, #0xffffffff00000000
- .inst = Instruction.andsImmediate(.xzr, .x0, 0b011111, 0b100000, 0b1),
- .expected = 0b1_11_100100_1_100000_011111_00000_11111,
- },
- .{ // umulh x0, x1, x2
- .inst = Instruction.umulh(.x0, .x1, .x2),
- .expected = 0b1_00_11011_1_10_00010_0_11111_00001_00000,
- },
- .{ // smulh x0, x1, x2
- .inst = Instruction.smulh(.x0, .x1, .x2),
- .expected = 0b1_00_11011_0_10_00010_0_11111_00001_00000,
- },
- .{ // adds x0, x1, x2, sxtx
- .inst = Instruction.addsExtendedRegister(.x0, .x1, .x2, .sxtx, 0),
- .expected = 0b1_0_1_01011_00_1_00010_111_000_00001_00000,
- },
- };
-
- for (testcases) |case| {
- const actual = case.inst.toU32();
- try testing.expectEqual(case.expected, actual);
- }
-}
src/arch/riscv64/CodeGen.zig
@@ -744,7 +744,7 @@ pub fn generate(
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
air: *const Air,
- liveness: *const Air.Liveness,
+ liveness: *const ?Air.Liveness,
) CodeGenError!Mir {
const zcu = pt.zcu;
const gpa = zcu.gpa;
@@ -767,7 +767,7 @@ pub fn generate(
.pt = pt,
.mod = mod,
.bin_file = bin_file,
- .liveness = liveness.*,
+ .liveness = liveness.*.?,
.target = &mod.resolved_target.result,
.owner = .{ .nav_index = func.owner_nav },
.args = undefined, // populated after `resolveCallingConventionValues`
@@ -4584,7 +4584,7 @@ fn structFieldPtr(func: *Func, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde
const field_offset: i32 = switch (container_ty.containerLayout(zcu)) {
.auto, .@"extern" => @intCast(container_ty.structFieldOffset(index, zcu)),
.@"packed" => @divExact(@as(i32, ptr_container_ty.ptrInfo(zcu).packed_offset.bit_offset) +
- (if (zcu.typeToStruct(container_ty)) |struct_obj| pt.structPackedFieldBitOffset(struct_obj, index) else 0) -
+ (if (zcu.typeToStruct(container_ty)) |struct_obj| zcu.structPackedFieldBitOffset(struct_obj, index) else 0) -
ptr_field_ty.ptrInfo(zcu).packed_offset.bit_offset, 8),
};
@@ -4615,7 +4615,7 @@ fn airStructFieldVal(func: *Func, inst: Air.Inst.Index) !void {
const field_off: u32 = switch (struct_ty.containerLayout(zcu)) {
.auto, .@"extern" => @intCast(struct_ty.structFieldOffset(index, zcu) * 8),
.@"packed" => if (zcu.typeToStruct(struct_ty)) |struct_type|
- pt.structPackedFieldBitOffset(struct_type, index)
+ zcu.structPackedFieldBitOffset(struct_type, index)
else
0,
};
@@ -8059,7 +8059,7 @@ fn airAggregateInit(func: *Func, inst: Air.Inst.Index) !void {
const elem_abi_size: u32 = @intCast(elem_ty.abiSize(zcu));
const elem_abi_bits = elem_abi_size * 8;
- const elem_off = pt.structPackedFieldBitOffset(struct_obj, elem_i);
+ const elem_off = zcu.structPackedFieldBitOffset(struct_obj, elem_i);
const elem_byte_off: i32 = @intCast(elem_off / elem_abi_bits * elem_abi_size);
const elem_bit_off = elem_off % elem_abi_bits;
const elem_mcv = try func.resolveInst(elem);
src/arch/sparc64/CodeGen.zig
@@ -267,7 +267,7 @@ pub fn generate(
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
air: *const Air,
- liveness: *const Air.Liveness,
+ liveness: *const ?Air.Liveness,
) CodeGenError!Mir {
const zcu = pt.zcu;
const gpa = zcu.gpa;
@@ -288,7 +288,7 @@ pub fn generate(
.gpa = gpa,
.pt = pt,
.air = air.*,
- .liveness = liveness.*,
+ .liveness = liveness.*.?,
.target = target,
.bin_file = lf,
.func_index = func_index,
src/arch/wasm/CodeGen.zig
@@ -1173,7 +1173,7 @@ pub fn generate(
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
air: *const Air,
- liveness: *const Air.Liveness,
+ liveness: *const ?Air.Liveness,
) Error!Mir {
_ = src_loc;
_ = bin_file;
@@ -1194,7 +1194,7 @@ pub fn generate(
.gpa = gpa,
.pt = pt,
.air = air.*,
- .liveness = liveness.*,
+ .liveness = liveness.*.?,
.owner_nav = cg.owner_nav,
.target = target,
.ptr_size = switch (target.cpu.arch) {
@@ -3776,7 +3776,7 @@ fn structFieldPtr(
break :offset @as(u32, 0);
}
const struct_type = zcu.typeToStruct(struct_ty).?;
- break :offset @divExact(pt.structPackedFieldBitOffset(struct_type, index) + struct_ptr_ty_info.packed_offset.bit_offset, 8);
+ break :offset @divExact(zcu.structPackedFieldBitOffset(struct_type, index) + struct_ptr_ty_info.packed_offset.bit_offset, 8);
},
.@"union" => 0,
else => unreachable,
@@ -3812,7 +3812,7 @@ fn airStructFieldVal(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
.@"packed" => switch (struct_ty.zigTypeTag(zcu)) {
.@"struct" => result: {
const packed_struct = zcu.typeToPackedStruct(struct_ty).?;
- const offset = pt.structPackedFieldBitOffset(packed_struct, field_index);
+ const offset = zcu.structPackedFieldBitOffset(packed_struct, field_index);
const backing_ty = Type.fromInterned(packed_struct.backingIntTypeUnordered(ip));
const host_bits = backing_ty.intInfo(zcu).bits;
@@ -5696,7 +5696,7 @@ fn airFieldParentPtr(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
.auto, .@"extern" => parent_ty.structFieldOffset(field_index, zcu),
.@"packed" => offset: {
const parent_ptr_offset = parent_ptr_ty.ptrInfo(zcu).packed_offset.bit_offset;
- const field_offset = if (zcu.typeToStruct(parent_ty)) |loaded_struct| pt.structPackedFieldBitOffset(loaded_struct, field_index) else 0;
+ const field_offset = if (zcu.typeToStruct(parent_ty)) |loaded_struct| zcu.structPackedFieldBitOffset(loaded_struct, field_index) else 0;
const field_ptr_offset = field_ptr_ty.ptrInfo(zcu).packed_offset.bit_offset;
break :offset @divExact(parent_ptr_offset + field_offset - field_ptr_offset, 8);
},
src/arch/x86_64/CodeGen.zig
@@ -878,7 +878,7 @@ pub fn generate(
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
air: *const Air,
- liveness: *const Air.Liveness,
+ liveness: *const ?Air.Liveness,
) codegen.CodeGenError!Mir {
_ = bin_file;
const zcu = pt.zcu;
@@ -894,7 +894,7 @@ pub fn generate(
.gpa = gpa,
.pt = pt,
.air = air.*,
- .liveness = liveness.*,
+ .liveness = liveness.*.?,
.target = &mod.resolved_target.result,
.mod = mod,
.owner = .{ .nav_index = func.owner_nav },
@@ -100674,11 +100674,12 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
const ty_pl = air_datas[@intFromEnum(inst)].ty_pl;
const struct_field = cg.air.extraData(Air.StructField, ty_pl.payload).data;
var ops = try cg.tempsFromOperands(inst, .{struct_field.struct_operand});
- try ops[0].toOffset(cg.fieldOffset(
+ try ops[0].toOffset(@intCast(codegen.fieldOffset(
cg.typeOf(struct_field.struct_operand),
ty_pl.ty.toType(),
struct_field.field_index,
- ), cg);
+ zcu,
+ )), cg);
try ops[0].finish(inst, &.{struct_field.struct_operand}, &ops, cg);
},
.struct_field_ptr_index_0,
@@ -100688,7 +100689,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
=> |air_tag| {
const ty_op = air_datas[@intFromEnum(inst)].ty_op;
var ops = try cg.tempsFromOperands(inst, .{ty_op.operand});
- try ops[0].toOffset(cg.fieldOffset(
+ try ops[0].toOffset(@intCast(codegen.fieldOffset(
cg.typeOf(ty_op.operand),
ty_op.ty.toType(),
switch (air_tag) {
@@ -100698,7 +100699,8 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
.struct_field_ptr_index_2 => 2,
.struct_field_ptr_index_3 => 3,
},
- ), cg);
+ zcu,
+ )), cg);
try ops[0].finish(inst, &.{ty_op.operand}, &ops, cg);
},
.struct_field_val => {
@@ -168108,11 +168110,12 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
const ty_pl = air_datas[@intFromEnum(inst)].ty_pl;
const field_parent_ptr = cg.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
var ops = try cg.tempsFromOperands(inst, .{field_parent_ptr.field_ptr});
- try ops[0].toOffset(-cg.fieldOffset(
+ try ops[0].toOffset(-@as(i32, @intCast(codegen.fieldOffset(
ty_pl.ty.toType(),
cg.typeOf(field_parent_ptr.field_ptr),
field_parent_ptr.field_index,
- ), cg);
+ zcu,
+ ))), cg);
try ops[0].finish(inst, &.{field_parent_ptr.field_ptr}, &ops, cg);
},
.wasm_memory_size, .wasm_memory_grow => unreachable,
@@ -174809,18 +174812,6 @@ fn airStore(self: *CodeGen, inst: Air.Inst.Index, safety: bool) !void {
return self.finishAir(inst, .none, .{ bin_op.lhs, bin_op.rhs, .none });
}
-fn fieldOffset(self: *CodeGen, ptr_agg_ty: Type, ptr_field_ty: Type, field_index: u32) i32 {
- const pt = self.pt;
- const zcu = pt.zcu;
- const agg_ty = ptr_agg_ty.childType(zcu);
- return switch (agg_ty.containerLayout(zcu)) {
- .auto, .@"extern" => @intCast(agg_ty.structFieldOffset(field_index, zcu)),
- .@"packed" => @divExact(@as(i32, ptr_agg_ty.ptrInfo(zcu).packed_offset.bit_offset) +
- (if (zcu.typeToStruct(agg_ty)) |loaded_struct| pt.structPackedFieldBitOffset(loaded_struct, field_index) else 0) -
- ptr_field_ty.ptrInfo(zcu).packed_offset.bit_offset, 8),
- };
-}
-
fn genUnOp(self: *CodeGen, maybe_inst: ?Air.Inst.Index, tag: Air.Inst.Tag, src_air: Air.Inst.Ref) !MCValue {
const pt = self.pt;
const zcu = pt.zcu;
@@ -184575,7 +184566,7 @@ fn airAggregateInit(self: *CodeGen, inst: Air.Inst.Index) !void {
}
const elem_abi_size: u32 = @intCast(elem_ty.abiSize(zcu));
const elem_abi_bits = elem_abi_size * 8;
- const elem_off = pt.structPackedFieldBitOffset(loaded_struct, elem_i);
+ const elem_off = zcu.structPackedFieldBitOffset(loaded_struct, elem_i);
const elem_byte_off: i32 = @intCast(elem_off / elem_abi_bits * elem_abi_size);
const elem_bit_off = elem_off % elem_abi_bits;
const elem_mcv = try self.resolveInst(elem);
@@ -185625,21 +185616,19 @@ fn resolveCallingConventionValues(
fn fail(cg: *CodeGen, comptime format: []const u8, args: anytype) error{ OutOfMemory, CodegenFail } {
@branchHint(.cold);
const zcu = cg.pt.zcu;
- switch (cg.owner) {
- .nav_index => |i| return zcu.codegenFail(i, format, args),
- .lazy_sym => |s| return zcu.codegenFailType(s.ty, format, args),
- }
- return error.CodegenFail;
+ return switch (cg.owner) {
+ .nav_index => |i| zcu.codegenFail(i, format, args),
+ .lazy_sym => |s| zcu.codegenFailType(s.ty, format, args),
+ };
}
fn failMsg(cg: *CodeGen, msg: *Zcu.ErrorMsg) error{ OutOfMemory, CodegenFail } {
@branchHint(.cold);
const zcu = cg.pt.zcu;
- switch (cg.owner) {
- .nav_index => |i| return zcu.codegenFailMsg(i, msg),
- .lazy_sym => |s| return zcu.codegenFailTypeMsg(s.ty, msg),
- }
- return error.CodegenFail;
+ return switch (cg.owner) {
+ .nav_index => |i| zcu.codegenFailMsg(i, msg),
+ .lazy_sym => |s| zcu.codegenFailTypeMsg(s.ty, msg),
+ };
}
fn parseRegName(name: []const u8) ?Register {
src/codegen/aarch64/abi.zig
@@ -1,7 +1,5 @@
+const assert = @import("std").debug.assert;
const std = @import("std");
-const builtin = @import("builtin");
-const bits = @import("../../arch/aarch64/bits.zig");
-const Register = bits.Register;
const Type = @import("../../Type.zig");
const Zcu = @import("../../Zcu.zig");
@@ -15,7 +13,7 @@ pub const Class = union(enum) {
/// For `float_array` the second element will be the amount of floats.
pub fn classifyType(ty: Type, zcu: *Zcu) Class {
- std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(zcu));
+ assert(ty.hasRuntimeBitsIgnoreComptime(zcu));
var maybe_float_bits: ?u16 = null;
switch (ty.zigTypeTag(zcu)) {
@@ -47,11 +45,11 @@ pub fn classifyType(ty: Type, zcu: *Zcu) Class {
return .byval;
},
.optional => {
- std.debug.assert(ty.isPtrLikeOptional(zcu));
+ assert(ty.isPtrLikeOptional(zcu));
return .byval;
},
.pointer => {
- std.debug.assert(!ty.isSlice(zcu));
+ assert(!ty.isSlice(zcu));
return .byval;
},
.error_union,
@@ -138,13 +136,3 @@ pub fn getFloatArrayType(ty: Type, zcu: *Zcu) ?Type {
else => return null,
}
}
-
-pub const callee_preserved_regs = [_]Register{
- .x19, .x20, .x21, .x22, .x23,
- .x24, .x25, .x26, .x27, .x28,
-};
-
-pub const c_abi_int_param_regs = [_]Register{ .x0, .x1, .x2, .x3, .x4, .x5, .x6, .x7 };
-pub const c_abi_int_return_regs = [_]Register{ .x0, .x1, .x2, .x3, .x4, .x5, .x6, .x7 };
-
-const allocatable_registers = callee_preserved_regs;
src/codegen/aarch64/Assemble.zig
@@ -0,0 +1,1653 @@
+source: [*:0]const u8,
+operands: std.StringHashMapUnmanaged(Operand),
+
+pub const Operand = union(enum) {
+ register: aarch64.encoding.Register,
+};
+
+pub fn nextInstruction(as: *Assemble) !?Instruction {
+ @setEvalBranchQuota(37_000);
+ comptime var ct_token_buf: [token_buf_len]u8 = undefined;
+ var token_buf: [token_buf_len]u8 = undefined;
+ const original_source = while (true) {
+ const original_source = as.source;
+ const source_token = try as.nextToken(&token_buf, .{});
+ if (source_token.len == 0) return null;
+ if (source_token[0] != '\n') break original_source;
+ };
+ log.debug(
+ \\.
+ \\=========================
+ \\= Assembling "{f}"
+ \\=========================
+ \\
+ , .{std.zig.fmtString(std.mem.span(original_source))});
+ inline for (instructions) |instruction| {
+ next_pattern: {
+ as.source = original_source;
+ var symbols: Symbols: {
+ const symbols = @typeInfo(@TypeOf(instruction.symbols)).@"struct".fields;
+ var symbol_fields: [symbols.len]std.builtin.Type.StructField = undefined;
+ for (&symbol_fields, symbols) |*symbol_field, symbol| symbol_field.* = .{
+ .name = symbol.name,
+ .type = zonCast(SymbolSpec, @field(instruction.symbols, symbol.name), .{}).Storage(),
+ .default_value_ptr = null,
+ .is_comptime = false,
+ .alignment = 0,
+ };
+ break :Symbols @Type(.{ .@"struct" = .{
+ .layout = .auto,
+ .fields = &symbol_fields,
+ .decls = &.{},
+ .is_tuple = false,
+ } });
+ } = undefined;
+ comptime var pattern_as: Assemble = .{ .source = instruction.pattern, .operands = undefined };
+ inline while (true) {
+ const pattern_token = comptime pattern_as.nextToken(&ct_token_buf, .{ .placeholders = true }) catch |err|
+ @compileError(@errorName(err) ++ " while parsing '" ++ instruction.pattern ++ "'");
+ const source_token = try as.nextToken(&token_buf, .{ .operands = true });
+ log.debug("\"{f}\" -> \"{f}\"", .{
+ std.zig.fmtString(pattern_token),
+ std.zig.fmtString(source_token),
+ });
+ if (pattern_token.len == 0) {
+ if (source_token.len > 0 and source_token[0] != '\n') break :next_pattern;
+ const encode = @field(Instruction, @tagName(instruction.encode[0]));
+ const Encode = @TypeOf(encode);
+ var args: std.meta.ArgsTuple(Encode) = undefined;
+ inline for (&args, @typeInfo(Encode).@"fn".params, 1..instruction.encode.len) |*arg, param, encode_index|
+ arg.* = zonCast(param.type.?, instruction.encode[encode_index], symbols);
+ return @call(.auto, encode, args);
+ } else if (pattern_token[0] == '<') {
+ const symbol_name = comptime pattern_token[1 .. std.mem.indexOfScalarPos(u8, pattern_token, 1, '|') orelse
+ pattern_token.len - 1];
+ const symbol = &@field(symbols, symbol_name);
+ symbol.* = zonCast(SymbolSpec, @field(instruction.symbols, symbol_name), .{}).parse(source_token) orelse break :next_pattern;
+ log.debug("{s} = {any}", .{ symbol_name, symbol.* });
+ } else if (!std.ascii.eqlIgnoreCase(pattern_token, source_token)) break :next_pattern;
+ }
+ }
+ log.debug("'{s}' not matched...", .{instruction.pattern});
+ }
+ as.source = original_source;
+ log.debug("Nothing matched!\n", .{});
+ return error.InvalidSyntax;
+}
+
+fn zonCast(comptime Result: type, zon_value: anytype, symbols: anytype) Result {
+ const ZonValue = @TypeOf(zon_value);
+ const Symbols = @TypeOf(symbols);
+ switch (@typeInfo(ZonValue)) {
+ .void, .bool, .int, .float, .pointer, .comptime_float, .comptime_int, .@"enum" => return zon_value,
+ .@"struct" => |zon_struct| switch (@typeInfo(Result)) {
+ .@"struct" => |result_struct| {
+ comptime var used_zon_fields = 0;
+ var result: Result = undefined;
+ inline for (result_struct.fields) |result_field| @field(result, result_field.name) = if (@hasField(ZonValue, result_field.name)) result: {
+ used_zon_fields += 1;
+ break :result zonCast(@FieldType(Result, result_field.name), @field(zon_value, result_field.name), symbols);
+ } else result_field.defaultValue() orelse @compileError(std.fmt.comptimePrint("missing zon field '{s}': {} <- {any}", .{ result_field.name, Result, zon_value }));
+ if (used_zon_fields != zon_struct.fields.len) @compileError(std.fmt.comptimePrint("unused zon field: {} <- {any}", .{ Result, zon_value }));
+ return result;
+ },
+ .@"union" => {
+ if (zon_struct.fields.len != 1) @compileError(std.fmt.comptimePrint("{} <- {any}", .{ Result, zon_value }));
+ const field_name = zon_struct.fields[0].name;
+ return @unionInit(
+ Result,
+ field_name,
+ zonCast(@FieldType(Result, field_name), @field(zon_value, field_name), symbols),
+ );
+ },
+ else => @compileError(std.fmt.comptimePrint("unsupported zon type: {} <- {any}", .{ Result, zon_value })),
+ },
+ .enum_literal => if (@hasField(Symbols, @tagName(zon_value))) {
+ const symbol = @field(symbols, @tagName(zon_value));
+ const Symbol = @TypeOf(symbol);
+ switch (@typeInfo(Result)) {
+ .@"enum" => switch (@typeInfo(Symbol)) {
+ .int => |symbol_int| {
+ var buf: [
+ std.fmt.count("{d}", .{switch (symbol_int.signedness) {
+ .signed => std.math.minInt(Symbol),
+ .unsigned => std.math.maxInt(Symbol),
+ }})
+ ]u8 = undefined;
+ return std.meta.stringToEnum(Result, std.fmt.bufPrint(&buf, "{d}", .{symbol}) catch unreachable).?;
+ },
+ else => return symbol,
+ },
+ else => return symbol,
+ }
+ } else return if (@hasDecl(Result, @tagName(zon_value))) @field(Result, @tagName(zon_value)) else zon_value,
+ else => @compileError(std.fmt.comptimePrint("unsupported zon type: {} <- {any}", .{ Result, zon_value })),
+ }
+}
+
+const token_buf_len = "v31.b[15]".len;
+fn nextToken(as: *Assemble, buf: *[token_buf_len]u8, comptime opts: struct {
+ operands: bool = false,
+ placeholders: bool = false,
+}) ![]const u8 {
+ const invalid_syntax: u8 = 1;
+ while (true) c: switch (as.source[0]) {
+ 0 => return as.source[0..0],
+ '\t', '\n' + 1...'\r', ' ' => as.source = as.source[1..],
+ '\n', '!', '#', ',', '[', ']' => {
+ defer as.source = as.source[1..];
+ return as.source[0..1];
+ },
+ '%' => if (opts.operands) {
+ if (as.source[1] != '[') continue :c invalid_syntax;
+ const name_start: usize = 2;
+ var index = name_start;
+ while (switch (as.source[index]) {
+ else => true,
+ ':', ']' => false,
+ }) index += 1;
+ const operand = as.operands.get(as.source[name_start..index]) orelse continue :c invalid_syntax;
+ const modifier = modifier: switch (as.source[index]) {
+ else => unreachable,
+ ':' => {
+ index += 1;
+ const modifier_start = index;
+ while (switch (as.source[index]) {
+ else => true,
+ ']' => false,
+ }) index += 1;
+ break :modifier as.source[modifier_start..index];
+ },
+ ']' => "",
+ };
+ assert(as.source[index] == ']');
+ const modified_operand: Operand = if (std.mem.eql(u8, modifier, ""))
+ operand
+ else if (std.mem.eql(u8, modifier, "w")) switch (operand) {
+ .register => |reg| .{ .register = reg.alias.w() },
+ } else if (std.mem.eql(u8, modifier, "x")) switch (operand) {
+ .register => |reg| .{ .register = reg.alias.x() },
+ } else if (std.mem.eql(u8, modifier, "b")) switch (operand) {
+ .register => |reg| .{ .register = reg.alias.b() },
+ } else if (std.mem.eql(u8, modifier, "h")) switch (operand) {
+ .register => |reg| .{ .register = reg.alias.h() },
+ } else if (std.mem.eql(u8, modifier, "s")) switch (operand) {
+ .register => |reg| .{ .register = reg.alias.s() },
+ } else if (std.mem.eql(u8, modifier, "d")) switch (operand) {
+ .register => |reg| .{ .register = reg.alias.d() },
+ } else if (std.mem.eql(u8, modifier, "q")) switch (operand) {
+ .register => |reg| .{ .register = reg.alias.q() },
+ } else if (std.mem.eql(u8, modifier, "Z")) switch (operand) {
+ .register => |reg| .{ .register = reg.alias.z() },
+ } else continue :c invalid_syntax;
+ switch (modified_operand) {
+ .register => |reg| {
+ as.source = as.source[index + 1 ..];
+ return std.fmt.bufPrint(buf, "{f}", .{reg.fmt()}) catch unreachable;
+ },
+ }
+ } else continue :c invalid_syntax,
+ '-', '0'...'9', 'A'...'Z', '_', 'a'...'z' => {
+ var index: usize = 1;
+ while (switch (as.source[index]) {
+ '0'...'9', 'A'...'Z', '_', 'a'...'z' => true,
+ else => false,
+ }) index += 1;
+ defer as.source = as.source[index..];
+ return as.source[0..index];
+ },
+ '<' => if (opts.placeholders) {
+ var index: usize = 1;
+ while (switch (as.source[index]) {
+ 0 => return error.UnterminatedPlaceholder,
+ '>' => false,
+ else => true,
+ }) index += 1;
+ defer as.source = as.source[index + 1 ..];
+ return as.source[0 .. index + 1];
+ } else continue :c invalid_syntax,
+ else => {
+ if (!@inComptime()) log.debug("invalid token \"{f}\"", .{std.zig.fmtString(std.mem.span(as.source))});
+ return error.InvalidSyntax;
+ },
+ };
+}
+
+const SymbolSpec = union(enum) {
+ reg: struct { format: aarch64.encoding.Register.Format, allow_sp: bool = false },
+ imm: struct {
+ type: std.builtin.Type.Int,
+ multiple_of: comptime_int = 1,
+ max_valid: ?comptime_int = null,
+ },
+ extend: struct { size: aarch64.encoding.Register.IntegerSize },
+ shift: struct { allow_ror: bool = true },
+ barrier: struct { only_sy: bool = false },
+
+ fn Storage(comptime spec: SymbolSpec) type {
+ return switch (spec) {
+ .reg => aarch64.encoding.Register,
+ .imm => |imm| @Type(.{ .int = imm.type }),
+ .extend => Instruction.DataProcessingRegister.AddSubtractExtendedRegister.Option,
+ .shift => Instruction.DataProcessingRegister.Shift.Op,
+ .barrier => Instruction.BranchExceptionGeneratingSystem.Barriers.Option,
+ };
+ }
+
+ fn parse(comptime spec: SymbolSpec, token: []const u8) ?Storage(spec) {
+ const Result = Storage(spec);
+ switch (spec) {
+ .reg => |reg_spec| {
+ var buf: [token_buf_len]u8 = undefined;
+ const reg = Result.parse(std.ascii.lowerString(&buf, token[0..@min(token.len, buf.len)])) orelse {
+ log.debug("invalid register: \"{f}\"", .{std.zig.fmtString(token)});
+ return null;
+ };
+ if (reg.format.integer != reg_spec.format.integer) {
+ log.debug("invalid register size: \"{f}\"", .{std.zig.fmtString(token)});
+ return null;
+ }
+ if (reg.alias == if (reg_spec.allow_sp) .zr else .sp) {
+ log.debug("invalid register usage: \"{f}\"", .{std.zig.fmtString(token)});
+ return null;
+ }
+ return reg;
+ },
+ .imm => |imm_spec| {
+ const imm = std.fmt.parseInt(Result, token, 0) catch {
+ log.debug("invalid immediate: \"{f}\"", .{std.zig.fmtString(token)});
+ return null;
+ };
+ if (@rem(imm, imm_spec.multiple_of) != 0) {
+ log.debug("invalid immediate usage: \"{f}\"", .{std.zig.fmtString(token)});
+ return null;
+ }
+ if (imm_spec.max_valid) |max_valid| if (imm > max_valid) {
+ log.debug("out of range immediate: \"{f}\"", .{std.zig.fmtString(token)});
+ return null;
+ };
+ return imm;
+ },
+ .extend => |extend_spec| {
+ const Option = Instruction.DataProcessingRegister.AddSubtractExtendedRegister.Option;
+ var buf: [
+ max_len: {
+ var max_len = 0;
+ for (@typeInfo(Option).@"enum".fields) |field| max_len = @max(max_len, field.name.len);
+ break :max_len max_len;
+ } + 1
+ ]u8 = undefined;
+ const extend = std.meta.stringToEnum(Option, std.ascii.lowerString(
+ &buf,
+ token[0..@min(token.len, buf.len)],
+ )) orelse {
+ log.debug("invalid extend: \"{f}\"", .{std.zig.fmtString(token)});
+ return null;
+ };
+ if (extend.sf() != extend_spec.size) {
+ log.debug("invalid extend: \"{f}\"", .{std.zig.fmtString(token)});
+ return null;
+ }
+ return extend;
+ },
+ .shift => |shift_spec| {
+ const ShiftOp = Instruction.DataProcessingRegister.Shift.Op;
+ var buf: [
+ max_len: {
+ var max_len = 0;
+ for (@typeInfo(ShiftOp).@"enum".fields) |field| max_len = @max(max_len, field.name.len);
+ break :max_len max_len;
+ } + 1
+ ]u8 = undefined;
+ const shift = std.meta.stringToEnum(ShiftOp, std.ascii.lowerString(
+ &buf,
+ token[0..@min(token.len, buf.len)],
+ )) orelse {
+ log.debug("invalid shift: \"{f}\"", .{std.zig.fmtString(token)});
+ return null;
+ };
+ if (!shift_spec.allow_ror and shift == .ror) {
+ log.debug("invalid shift usage: \"{f}\"", .{std.zig.fmtString(token)});
+ return null;
+ }
+ return shift;
+ },
+ .barrier => |barrier_spec| {
+ const Option = Instruction.BranchExceptionGeneratingSystem.Barriers.Option;
+ var buf: [
+ max_len: {
+ var max_len = 0;
+ for (@typeInfo(Option).@"enum".fields) |field| max_len = @max(max_len, field.name.len);
+ break :max_len max_len;
+ } + 1
+ ]u8 = undefined;
+ const barrier = std.meta.stringToEnum(Option, std.ascii.lowerString(
+ &buf,
+ token[0..@min(token.len, buf.len)],
+ )) orelse {
+ log.debug("invalid barrier: \"{f}\"", .{std.zig.fmtString(token)});
+ return null;
+ };
+ if (barrier_spec.only_sy and barrier != .sy) {
+ log.debug("invalid barrier: \"{f}\"", .{std.zig.fmtString(token)});
+ return null;
+ }
+ return barrier;
+ },
+ }
+ }
+};
+
+test "add sub" {
+ var as: Assemble = .{
+ .source =
+ \\ add w0, w0, w1
+ \\ add w2, w3, w4
+ \\ add wsp, w5, w6
+ \\ add w7, wsp, w8
+ \\ add wsp, wsp, w9
+ \\ add w10, w10, wzr
+ \\ add w11, w12, wzr
+ \\ add wsp, w13, wzr
+ \\ add w14, wsp, wzr
+ \\ add wsp, wsp, wzr
+ \\
+ \\ add x0, x0, x1
+ \\ add x2, x3, x4
+ \\ add sp, x5, x6
+ \\ add x7, sp, x8
+ \\ add sp, sp, x9
+ \\ add x10, x10, xzr
+ \\ add x11, x12, xzr
+ \\ add sp, x13, xzr
+ \\ add x14, sp, xzr
+ \\ add sp, sp, xzr
+ \\
+ \\ add w0, w0, w1
+ \\ add w2, w3, w4, uxtb #0
+ \\ add wsp, w5, w6, uxth #1
+ \\ add w7, wsp, w8, uxtw #0
+ \\ add wsp, wsp, w9, uxtw #2
+ \\ add w10, w10, wzr, uxtw #3
+ \\ add w11, w12, wzr, sxtb #4
+ \\ add wsp, w13, wzr, sxth #0
+ \\ add w14, wsp, wzr, sxtw #1
+ \\ add wsp, wsp, wzr, sxtw #2
+ \\
+ \\ add x0, x0, x1
+ \\ add x2, x3, w4, uxtb #0
+ \\ add sp, x5, w6, uxth #1
+ \\ add x7, sp, w8, uxtw #2
+ \\ add sp, sp, x9, uxtx #0
+ \\ add x10, x10, xzr, uxtx #3
+ \\ add x11, x12, wzr, sxtb #4
+ \\ add sp, x13, wzr, sxth #0
+ \\ add x14, sp, wzr, sxtw #1
+ \\ add sp, sp, xzr, sxtx #2
+ \\
+ \\ add w0, w0, #0
+ \\ add w0, w1, #1, lsl #0
+ \\ add wsp, w2, #2, lsl #12
+ \\ add w3, wsp, #3, lsl #0
+ \\ add wsp, wsp, #4095, lsl #12
+ \\ add w0, w1, #0
+ \\ add w2, w3, #0, lsl #0
+ \\ add w4, wsp, #0
+ \\ add w5, wsp, #0, lsl #0
+ \\ add wsp, w6, #0
+ \\ add wsp, w7, #0, lsl #0
+ \\ add wsp, wsp, #0
+ \\ add wsp, wsp, #0, lsl #0
+ \\
+ \\ add x0, x0, #0
+ \\ add x0, x1, #1, lsl #0
+ \\ add sp, x2, #2, lsl #12
+ \\ add x3, sp, #3, lsl #0
+ \\ add sp, sp, #4095, lsl #12
+ \\ add x0, x1, #0
+ \\ add x2, x3, #0, lsl #0
+ \\ add x4, sp, #0
+ \\ add x5, sp, #0, lsl #0
+ \\ add sp, x6, #0
+ \\ add sp, x7, #0, lsl #0
+ \\ add sp, sp, #0
+ \\ add sp, sp, #0, lsl #0
+ \\
+ \\ add w0, w0, w0
+ \\ add w1, w1, w2, lsl #0
+ \\ add w3, w4, w5, lsl #1
+ \\ add w6, w6, wzr, lsl #31
+ \\ add w7, wzr, w8, lsr #0
+ \\ add w9, wzr, wzr, lsr #30
+ \\ add wzr, w10, w11, lsr #31
+ \\ add wzr, w12, wzr, asr #0x0
+ \\ add wzr, wzr, w13, asr #0x10
+ \\ add wzr, wzr, wzr, asr #0x1f
+ \\
+ \\ add x0, x0, x0
+ \\ add x1, x1, x2, lsl #0
+ \\ add x3, x4, x5, lsl #1
+ \\ add x6, x6, xzr, lsl #63
+ \\ add x7, xzr, x8, lsr #0
+ \\ add x9, xzr, xzr, lsr #62
+ \\ add xzr, x10, x11, lsr #63
+ \\ add xzr, x12, xzr, asr #0x0
+ \\ add xzr, xzr, x13, asr #0x1F
+ \\ add xzr, xzr, xzr, asr #0x3f
+ \\
+ \\ sub w0, w0, w1
+ \\ sub w2, w3, w4
+ \\ sub wsp, w5, w6
+ \\ sub w7, wsp, w8
+ \\ sub wsp, wsp, w9
+ \\ sub w10, w10, wzr
+ \\ sub w11, w12, wzr
+ \\ sub wsp, w13, wzr
+ \\ sub w14, wsp, wzr
+ \\ sub wsp, wsp, wzr
+ \\
+ \\ sub x0, x0, x1
+ \\ sub x2, x3, x4
+ \\ sub sp, x5, x6
+ \\ sub x7, sp, x8
+ \\ sub sp, sp, x9
+ \\ sub x10, x10, xzr
+ \\ sub x11, x12, xzr
+ \\ sub sp, x13, xzr
+ \\ sub x14, sp, xzr
+ \\ sub sp, sp, xzr
+ \\
+ \\ sub w0, w0, w1
+ \\ sub w2, w3, w4, uxtb #0
+ \\ sub wsp, w5, w6, uxth #1
+ \\ sub w7, wsp, w8, uxtw #0
+ \\ sub wsp, wsp, w9, uxtw #2
+ \\ sub w10, w10, wzr, uxtw #3
+ \\ sub w11, w12, wzr, sxtb #4
+ \\ sub wsp, w13, wzr, sxth #0
+ \\ sub w14, wsp, wzr, sxtw #1
+ \\ sub wsp, wsp, wzr, sxtw #2
+ \\
+ \\ sub x0, x0, x1
+ \\ sub x2, x3, w4, uxtb #0
+ \\ sub sp, x5, w6, uxth #1
+ \\ sub x7, sp, w8, uxtw #2
+ \\ sub sp, sp, x9, uxtx #0
+ \\ sub x10, x10, xzr, uxtx #3
+ \\ sub x11, x12, wzr, sxtb #4
+ \\ sub sp, x13, wzr, sxth #0
+ \\ sub x14, sp, wzr, sxtw #1
+ \\ sub sp, sp, xzr, sxtx #2
+ \\
+ \\ sub w0, w0, #0
+ \\ sub w0, w1, #1, lsl #0
+ \\ sub wsp, w2, #2, lsl #12
+ \\ sub w3, wsp, #3, lsl #0
+ \\ sub wsp, wsp, #4095, lsl #12
+ \\ sub w0, w1, #0
+ \\ sub w2, w3, #0, lsl #0
+ \\ sub w4, wsp, #0
+ \\ sub w5, wsp, #0, lsl #0
+ \\ sub wsp, w6, #0
+ \\ sub wsp, w7, #0, lsl #0
+ \\ sub wsp, wsp, #0
+ \\ sub wsp, wsp, #0, lsl #0
+ \\
+ \\ sub x0, x0, #0
+ \\ sub x0, x1, #1, lsl #0
+ \\ sub sp, x2, #2, lsl #12
+ \\ sub x3, sp, #3, lsl #0
+ \\ sub sp, sp, #4095, lsl #12
+ \\ sub x0, x1, #0
+ \\ sub x2, x3, #0, lsl #0
+ \\ sub x4, sp, #0
+ \\ sub x5, sp, #0, lsl #0
+ \\ sub sp, x6, #0
+ \\ sub sp, x7, #0, lsl #0
+ \\ sub sp, sp, #0
+ \\ sub sp, sp, #0, lsl #0
+ \\
+ \\ sub w0, w0, w0
+ \\ sub w1, w1, w2, lsl #0
+ \\ sub w3, w4, w5, lsl #1
+ \\ sub w6, w6, wzr, lsl #31
+ \\ sub w7, wzr, w8, lsr #0
+ \\ sub w9, wzr, wzr, lsr #30
+ \\ sub wzr, w10, w11, lsr #31
+ \\ sub wzr, w12, wzr, asr #0x0
+ \\ sub wzr, wzr, w13, asr #0x10
+ \\ sub wzr, wzr, wzr, asr #0x1f
+ \\
+ \\ sub x0, x0, x0
+ \\ sub x1, x1, x2, lsl #0
+ \\ sub x3, x4, x5, lsl #1
+ \\ sub x6, x6, xzr, lsl #63
+ \\ sub x7, xzr, x8, lsr #0
+ \\ sub x9, xzr, xzr, lsr #62
+ \\ sub xzr, x10, x11, lsr #63
+ \\ sub xzr, x12, xzr, asr #0x0
+ \\ sub xzr, xzr, x13, asr #0x1F
+ \\ sub xzr, xzr, xzr, asr #0x3f
+ \\
+ \\ neg w0, w0
+ \\ neg w1, w2, lsl #0
+ \\ neg w3, wzr, lsl #7
+ \\ neg wzr, w4, lsr #14
+ \\ neg wzr, wzr, asr #21
+ \\
+ \\ neg x0, x0
+ \\ neg x1, x2, lsl #0
+ \\ neg x3, xzr, lsl #11
+ \\ neg xzr, x4, lsr #22
+ \\ neg xzr, xzr, asr #33
+ ,
+ .operands = .empty,
+ };
+
+ try std.testing.expectFmt("add w0, w0, w1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add w2, w3, w4", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add wsp, w5, w6", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add w7, wsp, w8", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add wsp, wsp, w9", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add w10, w10, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add w11, w12, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add wsp, w13, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add w14, wsp, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add wsp, wsp, wzr", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("add x0, x0, x1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add x2, x3, x4", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add sp, x5, x6", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add x7, sp, x8", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add sp, sp, x9", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add x10, x10, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add x11, x12, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add sp, x13, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add x14, sp, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add sp, sp, xzr", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("add w0, w0, w1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add w2, w3, w4, uxtb #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add wsp, w5, w6, uxth #1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add w7, wsp, w8", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add wsp, wsp, w9, uxtw #2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add w10, w10, wzr, uxtw #3", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add w11, w12, wzr, sxtb #4", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add wsp, w13, wzr, sxth #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add w14, wsp, wzr, sxtw #1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add wsp, wsp, wzr, sxtw #2", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("add x0, x0, x1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add x2, x3, w4, uxtb #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add sp, x5, w6, uxth #1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add x7, sp, w8, uxtw #2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add sp, sp, x9", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add x10, x10, xzr, uxtx #3", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add x11, x12, wzr, sxtb #4", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add sp, x13, wzr, sxth #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add x14, sp, wzr, sxtw #1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add sp, sp, xzr, sxtx #2", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("add w0, w0, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add w0, w1, #0x1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add wsp, w2, #0x2, lsl #12", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add w3, wsp, #0x3", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add wsp, wsp, #0xfff, lsl #12", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add w0, w1, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add w2, w3, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov w4, wsp", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov w5, wsp", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov wsp, w6", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov wsp, w7", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov wsp, wsp", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov wsp, wsp", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("add x0, x0, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add x0, x1, #0x1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add sp, x2, #0x2, lsl #12", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add x3, sp, #0x3", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add sp, sp, #0xfff, lsl #12", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add x0, x1, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add x2, x3, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov x4, sp", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov x5, sp", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov sp, x6", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov sp, x7", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov sp, sp", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov sp, sp", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("add w0, w0, w0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add w1, w1, w2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add w3, w4, w5, lsl #1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add w6, w6, wzr, lsl #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add w7, wzr, w8, lsr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add w9, wzr, wzr, lsr #30", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add wzr, w10, w11, lsr #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add wzr, w12, wzr, asr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add wzr, wzr, w13, asr #16", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add wzr, wzr, wzr, asr #31", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("add x0, x0, x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add x1, x1, x2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add x3, x4, x5, lsl #1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add x6, x6, xzr, lsl #63", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add x7, xzr, x8, lsr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add x9, xzr, xzr, lsr #62", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add xzr, x10, x11, lsr #63", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add xzr, x12, xzr, asr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add xzr, xzr, x13, asr #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("add xzr, xzr, xzr, asr #63", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("sub w0, w0, w1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub w2, w3, w4", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub wsp, w5, w6", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub w7, wsp, w8", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub wsp, wsp, w9", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub w10, w10, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub w11, w12, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub wsp, w13, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub w14, wsp, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub wsp, wsp, wzr", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("sub x0, x0, x1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub x2, x3, x4", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub sp, x5, x6", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub x7, sp, x8", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub sp, sp, x9", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub x10, x10, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub x11, x12, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub sp, x13, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub x14, sp, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub sp, sp, xzr", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("sub w0, w0, w1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub w2, w3, w4, uxtb #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub wsp, w5, w6, uxth #1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub w7, wsp, w8", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub wsp, wsp, w9, uxtw #2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub w10, w10, wzr, uxtw #3", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub w11, w12, wzr, sxtb #4", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub wsp, w13, wzr, sxth #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub w14, wsp, wzr, sxtw #1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub wsp, wsp, wzr, sxtw #2", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("sub x0, x0, x1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub x2, x3, w4, uxtb #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub sp, x5, w6, uxth #1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub x7, sp, w8, uxtw #2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub sp, sp, x9", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub x10, x10, xzr, uxtx #3", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub x11, x12, wzr, sxtb #4", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub sp, x13, wzr, sxth #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub x14, sp, wzr, sxtw #1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub sp, sp, xzr, sxtx #2", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("sub w0, w0, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub w0, w1, #0x1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub wsp, w2, #0x2, lsl #12", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub w3, wsp, #0x3", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub wsp, wsp, #0xfff, lsl #12", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub w0, w1, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub w2, w3, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub w4, wsp, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub w5, wsp, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub wsp, w6, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub wsp, w7, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub wsp, wsp, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub wsp, wsp, #0x0", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("sub x0, x0, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub x0, x1, #0x1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub sp, x2, #0x2, lsl #12", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub x3, sp, #0x3", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub sp, sp, #0xfff, lsl #12", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub x0, x1, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub x2, x3, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub x4, sp, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub x5, sp, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub sp, x6, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub sp, x7, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub sp, sp, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub sp, sp, #0x0", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("sub w0, w0, w0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub w1, w1, w2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub w3, w4, w5, lsl #1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub w6, w6, wzr, lsl #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("neg w7, w8, lsr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("neg w9, wzr, lsr #30", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub wzr, w10, w11, lsr #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub wzr, w12, wzr, asr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("neg wzr, w13, asr #16", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("neg wzr, wzr, asr #31", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("sub x0, x0, x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub x1, x1, x2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub x3, x4, x5, lsl #1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub x6, x6, xzr, lsl #63", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("neg x7, x8, lsr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("neg x9, xzr, lsr #62", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub xzr, x10, x11, lsr #63", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sub xzr, x12, xzr, asr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("neg xzr, x13, asr #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("neg xzr, xzr, asr #63", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("neg w0, w0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("neg w1, w2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("neg w3, wzr, lsl #7", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("neg wzr, w4, lsr #14", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("neg wzr, wzr, asr #21", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("neg x0, x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("neg x1, x2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("neg x3, xzr, lsl #11", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("neg xzr, x4, lsr #22", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("neg xzr, xzr, asr #33", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expect(null == try as.nextInstruction());
+}
+test "bitfield" {
+ var as: Assemble = .{
+ .source =
+ \\sbfm w0, w0, #0, #31
+ \\sbfm w0, w0, #31, #0
+ \\
+ \\sbfm x0, x0, #0, #63
+ \\sbfm x0, x0, #63, #0
+ \\
+ \\bfm w0, w0, #0, #31
+ \\bfm w0, w0, #31, #0
+ \\
+ \\bfm x0, x0, #0, #63
+ \\bfm x0, x0, #63, #0
+ \\
+ \\ubfm w0, w0, #0, #31
+ \\ubfm w0, w0, #31, #0
+ \\
+ \\ubfm x0, x0, #0, #63
+ \\ubfm x0, x0, #63, #0
+ ,
+ .operands = .empty,
+ };
+
+ try std.testing.expectFmt("sbfm w0, w0, #0, #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sbfm w0, w0, #31, #0", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("sbfm x0, x0, #0, #63", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sbfm x0, x0, #63, #0", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("bfm w0, w0, #0, #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("bfm w0, w0, #31, #0", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("bfm x0, x0, #0, #63", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("bfm x0, x0, #63, #0", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("ubfm w0, w0, #0, #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ubfm w0, w0, #31, #0", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("ubfm x0, x0, #0, #63", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ubfm x0, x0, #63, #0", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expect(null == try as.nextInstruction());
+}
+test "branch register" {
+ var as: Assemble = .{
+ .source =
+ \\ret
+ \\br x30
+ \\blr x30
+ \\ret x30
+ \\br x29
+ \\blr x29
+ \\ret x29
+ \\br x2
+ \\blr x1
+ \\ret x0
+ ,
+ .operands = .empty,
+ };
+
+ try std.testing.expectFmt("ret", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("br x30", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("blr x30", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ret", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("br x29", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("blr x29", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ret x29", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("br x2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("blr x1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ret x0", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expect(null == try as.nextInstruction());
+}
+test "exception generating" {
+ var as: Assemble = .{
+ .source =
+ \\SVC #0
+ \\HVC #0x1
+ \\SMC #0o15
+ \\BRK #42
+ \\HLT #0x42
+ \\TCANCEL #123
+ \\DCPS1 #1234
+ \\DCPS2 #12345
+ \\DCPS3 #65535
+ \\DCPS3 #0x0
+ \\DCPS2 #0
+ \\DCPS1
+ ,
+ .operands = .empty,
+ };
+
+ try std.testing.expectFmt("svc #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("hvc #0x1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("smc #0xd", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("brk #0x2a", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("hlt #0x42", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("tcancel #0x7b", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("dcps1 #0x4d2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("dcps2 #0x3039", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("dcps3 #0xffff", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("dcps3", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("dcps2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("dcps1", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expect(null == try as.nextInstruction());
+}
+test "extract" {
+ var as: Assemble = .{
+ .source =
+ \\extr W0, W1, W2, #0
+ \\extr W3, W3, W4, #1
+ \\extr W5, W5, W5, #31
+ \\
+ \\extr X0, X1, X2, #0
+ \\extr X3, X3, X4, #1
+ \\extr X5, X5, X5, #63
+ ,
+ .operands = .empty,
+ };
+
+ try std.testing.expectFmt("extr w0, w1, w2, #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("extr w3, w3, w4, #1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("extr w5, w5, w5, #31", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("extr x0, x1, x2, #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("extr x3, x3, x4, #1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("extr x5, x5, x5, #63", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expect(null == try as.nextInstruction());
+}
+test "hints" {
+ var as: Assemble = .{
+ .source =
+ \\NOP
+ \\hint #0
+ \\YiElD
+ \\Hint #0x1
+ \\WfE
+ \\hInt #02
+ \\wFi
+ \\hiNt #0b11
+ \\sEv
+ \\hinT #4
+ \\sevl
+ \\HINT #0b101
+ \\hint #0x7F
+ ,
+ .operands = .empty,
+ };
+
+ try std.testing.expectFmt("nop", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("nop", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("yield", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("yield", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("wfe", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("wfe", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("wfi", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("wfi", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sev", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sev", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sevl", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("sevl", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("hint #0x7f", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expect(null == try as.nextInstruction());
+}
+test "load store" {
+ var as: Assemble = .{
+ .source =
+ \\ LDP w0, w1, [x2], #-256
+ \\ LDP w3, w4, [x5], #0
+ \\ LDP w6, w7, [sp], #252
+ \\ LDP w0, w1, [x2, #-0x100]!
+ \\ LDP w3, w4, [x5, #0]!
+ \\ LDP w6, w7, [sp, #0xfc]!
+ \\ LDP w0, w1, [x2, #-256]
+ \\ LDP w3, w4, [x5]
+ \\ LDP w6, w7, [x8, #0]
+ \\ LDP w9, w10, [sp, #252]
+ \\
+ \\ LDP x0, x1, [x2], #-512
+ \\ LDP x3, x4, [x5], #0
+ \\ LDP x6, x7, [sp], #504
+ \\ LDP x0, x1, [x2, #-0x200]!
+ \\ LDP x3, x4, [x5, #0]!
+ \\ LDP x6, x7, [sp, #0x1f8]!
+ \\ LDP x0, x1, [x2, #-512]
+ \\ LDP x3, x4, [x5]
+ \\ LDP x6, x7, [x8, #0]
+ \\ LDP x9, x10, [sp, #504]
+ \\
+ \\ LDR w0, [x1], #-256
+ \\ LDR w2, [x3], #0
+ \\ LDR w4, [sp], #255
+ \\ LDR w0, [x1, #-0x100]!
+ \\ LDR w2, [x3, #0]!
+ \\ LDR w4, [sp, #0xff]!
+ \\ LDR w0, [x1, #0]
+ \\ LDR w2, [x3]
+ \\ LDR w4, [sp, #16380]
+ \\
+ \\ LDR x0, [x1], #-256
+ \\ LDR x2, [x3], #0
+ \\ LDR x4, [sp], #255
+ \\ LDR x0, [x1, #-0x100]!
+ \\ LDR x2, [x3, #0]!
+ \\ LDR x4, [sp, #0xff]!
+ \\ LDR x0, [x1, #0]
+ \\ LDR x2, [x3]
+ \\ LDR x4, [sp, #32760]
+ \\
+ \\ STP w0, w1, [x2], #-256
+ \\ STP w3, w4, [x5], #0
+ \\ STP w6, w7, [sp], #252
+ \\ STP w0, w1, [x2, #-0x100]!
+ \\ STP w3, w4, [x5, #0]!
+ \\ STP w6, w7, [sp, #0xfc]!
+ \\ STP w0, w1, [x2, #-256]
+ \\ STP w3, w4, [x5]
+ \\ STP w6, w7, [x8, #0]
+ \\ STP w9, w10, [sp, #252]
+ \\
+ \\ STP x0, x1, [x2], #-512
+ \\ STP x3, x4, [x5], #0
+ \\ STP x6, x7, [sp], #504
+ \\ STP x0, x1, [x2, #-0x200]!
+ \\ STP x3, x4, [x5, #0]!
+ \\ STP x6, x7, [sp, #0x1f8]!
+ \\ STP x0, x1, [x2, #-512]
+ \\ STP x3, x4, [x5]
+ \\ STP x6, x7, [x8, #0]
+ \\ STP x9, x10, [sp, #504]
+ \\
+ \\ STR w0, [x1], #-256
+ \\ STR w2, [x3], #0
+ \\ STR w4, [sp], #255
+ \\ STR w0, [x1, #-0x100]!
+ \\ STR w2, [x3, #0]!
+ \\ STR w4, [sp, #0xff]!
+ \\ STR w0, [x1, #0]
+ \\ STR w2, [x3]
+ \\ STR w4, [sp, #16380]
+ \\
+ \\ STR x0, [x1], #-256
+ \\ STR x2, [x3], #0
+ \\ STR x4, [sp], #255
+ \\ STR x0, [x1, #-0x100]!
+ \\ STR x2, [x3, #0]!
+ \\ STR x4, [sp, #0xff]!
+ \\ STR x0, [x1, #0]
+ \\ STR x2, [x3]
+ \\ STR x4, [sp, #32760]
+ ,
+ .operands = .empty,
+ };
+
+ try std.testing.expectFmt("ldp w0, w1, [x2], #-0x100", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldp w3, w4, [x5], #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldp w6, w7, [sp], #0xfc", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldp w0, w1, [x2, #-0x100]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldp w3, w4, [x5, #0x0]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldp w6, w7, [sp, #0xfc]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldp w0, w1, [x2, #-0x100]", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldp w3, w4, [x5]", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldp w6, w7, [x8]", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldp w9, w10, [sp, #0xfc]", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("ldp x0, x1, [x2], #-0x200", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldp x3, x4, [x5], #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldp x6, x7, [sp], #0x1f8", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldp x0, x1, [x2, #-0x200]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldp x3, x4, [x5, #0x0]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldp x6, x7, [sp, #0x1f8]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldp x0, x1, [x2, #-0x200]", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldp x3, x4, [x5]", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldp x6, x7, [x8]", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldp x9, x10, [sp, #0x1f8]", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("ldr w0, [x1], #-0x100", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldr w2, [x3], #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldr w4, [sp], #0xff", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldr w0, [x1, #-0x100]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldr w2, [x3, #0x0]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldr w4, [sp, #0xff]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldr w0, [x1]", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldr w2, [x3]", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldr w4, [sp, #0x3ffc]", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("ldr x0, [x1], #-0x100", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldr x2, [x3], #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldr x4, [sp], #0xff", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldr x0, [x1, #-0x100]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldr x2, [x3, #0x0]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldr x4, [sp, #0xff]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldr x0, [x1]", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldr x2, [x3]", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ldr x4, [sp, #0x7ff8]", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("stp w0, w1, [x2], #-0x100", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("stp w3, w4, [x5], #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("stp w6, w7, [sp], #0xfc", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("stp w0, w1, [x2, #-0x100]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("stp w3, w4, [x5, #0x0]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("stp w6, w7, [sp, #0xfc]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("stp w0, w1, [x2, #-0x100]", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("stp w3, w4, [x5]", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("stp w6, w7, [x8]", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("stp w9, w10, [sp, #0xfc]", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("stp x0, x1, [x2], #-0x200", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("stp x3, x4, [x5], #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("stp x6, x7, [sp], #0x1f8", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("stp x0, x1, [x2, #-0x200]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("stp x3, x4, [x5, #0x0]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("stp x6, x7, [sp, #0x1f8]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("stp x0, x1, [x2, #-0x200]", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("stp x3, x4, [x5]", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("stp x6, x7, [x8]", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("stp x9, x10, [sp, #0x1f8]", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("str w0, [x1], #-0x100", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("str w2, [x3], #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("str w4, [sp], #0xff", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("str w0, [x1, #-0x100]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("str w2, [x3, #0x0]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("str w4, [sp, #0xff]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("str w0, [x1]", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("str w2, [x3]", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("str w4, [sp, #0x3ffc]", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("str x0, [x1], #-0x100", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("str x2, [x3], #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("str x4, [sp], #0xff", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("str x0, [x1, #-0x100]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("str x2, [x3, #0x0]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("str x4, [sp, #0xff]!", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("str x0, [x1]", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("str x2, [x3]", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("str x4, [sp, #0x7ff8]", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expect(null == try as.nextInstruction());
+}
+test "logical" {
+ var as: Assemble = .{
+ .source =
+ \\ and w0, w0, w0
+ \\ and w1, w1, w2, lsl #0
+ \\ and w3, w4, w5, lsl #1
+ \\ and w6, w6, wzr, lsl #31
+ \\ and w7, wzr, w8, lsr #0
+ \\ and w9, wzr, wzr, lsr #30
+ \\ and wzr, w10, w11, lsr #31
+ \\ and wzr, w12, wzr, asr #0x0
+ \\ and wzr, wzr, w13, asr #0x10
+ \\ and wzr, wzr, wzr, asr #0x1f
+ \\ and w0, w0, wzr
+ \\ and w1, w2, wzr, lsl #0
+ \\ and w3, wzr, w3
+ \\ and w4, wzr, w5, lsl #0
+ \\ and w6, wzr, wzr
+ \\ and w7, wzr, wzr, lsl #0
+ \\ and wzr, w8, wzr
+ \\ and wzr, w9, wzr, lsl #0
+ \\ and wzr, wzr, w10
+ \\ and wzr, wzr, w11, lsl #0
+ \\ and wzr, wzr, wzr
+ \\ and wzr, wzr, wzr, lsl #0
+ \\
+ \\ and x0, x0, x0
+ \\ and x1, x1, x2, lsl #0
+ \\ and x3, x4, x5, lsl #1
+ \\ and x6, x6, xzr, lsl #63
+ \\ and x7, xzr, x8, lsr #0
+ \\ and x9, xzr, xzr, lsr #62
+ \\ and xzr, x10, x11, lsr #63
+ \\ and xzr, x12, xzr, asr #0x0
+ \\ and xzr, xzr, x13, asr #0x1F
+ \\ and xzr, xzr, xzr, asr #0x3f
+ \\ and x0, x0, xzr
+ \\ and x1, x2, xzr, lsl #0
+ \\ and x3, xzr, x3
+ \\ and x4, xzr, x5, lsl #0
+ \\ and x6, xzr, xzr
+ \\ and x7, xzr, xzr, lsl #0
+ \\ and xzr, x8, xzr
+ \\ and xzr, x9, xzr, lsl #0
+ \\ and xzr, xzr, x10
+ \\ and xzr, xzr, x11, lsl #0
+ \\ and xzr, xzr, xzr
+ \\ and xzr, xzr, xzr, lsl #0
+ \\
+ \\ orr w0, w0, w0
+ \\ orr w1, w1, w2, lsl #0
+ \\ orr w3, w4, w5, lsl #1
+ \\ orr w6, w6, wzr, lsl #31
+ \\ orr w7, wzr, w8, lsr #0
+ \\ orr w9, wzr, wzr, lsr #30
+ \\ orr wzr, w10, w11, lsr #31
+ \\ orr wzr, w12, wzr, asr #0x0
+ \\ orr wzr, wzr, w13, asr #0x10
+ \\ orr wzr, wzr, wzr, asr #0x1f
+ \\ orr w0, w0, wzr
+ \\ orr w1, w2, wzr, lsl #0
+ \\ orr w3, wzr, w3
+ \\ orr w4, wzr, w5, lsl #0
+ \\ orr w6, wzr, wzr
+ \\ orr w7, wzr, wzr, lsl #0
+ \\ orr wzr, w8, wzr
+ \\ orr wzr, w9, wzr, lsl #0
+ \\ orr wzr, wzr, w10
+ \\ orr wzr, wzr, w11, lsl #0
+ \\ orr wzr, wzr, wzr
+ \\ orr wzr, wzr, wzr, lsl #0
+ \\
+ \\ orr x0, x0, x0
+ \\ orr x1, x1, x2, lsl #0
+ \\ orr x3, x4, x5, lsl #1
+ \\ orr x6, x6, xzr, lsl #63
+ \\ orr x7, xzr, x8, lsr #0
+ \\ orr x9, xzr, xzr, lsr #62
+ \\ orr xzr, x10, x11, lsr #63
+ \\ orr xzr, x12, xzr, asr #0x0
+ \\ orr xzr, xzr, x13, asr #0x1F
+ \\ orr xzr, xzr, xzr, asr #0x3f
+ \\ orr x0, x0, xzr
+ \\ orr x1, x2, xzr, lsl #0
+ \\ orr x3, xzr, x3
+ \\ orr x4, xzr, x5, lsl #0
+ \\ orr x6, xzr, xzr
+ \\ orr x7, xzr, xzr, lsl #0
+ \\ orr xzr, x8, xzr
+ \\ orr xzr, x9, xzr, lsl #0
+ \\ orr xzr, xzr, x10
+ \\ orr xzr, xzr, x11, lsl #0
+ \\ orr xzr, xzr, xzr
+ \\ orr xzr, xzr, xzr, lsl #0
+ \\
+ \\ eor w0, w0, w0
+ \\ eor w1, w1, w2, lsl #0
+ \\ eor w3, w4, w5, lsl #1
+ \\ eor w6, w6, wzr, lsl #31
+ \\ eor w7, wzr, w8, lsr #0
+ \\ eor w9, wzr, wzr, lsr #30
+ \\ eor wzr, w10, w11, lsr #31
+ \\ eor wzr, w12, wzr, asr #0x0
+ \\ eor wzr, wzr, w13, asr #0x10
+ \\ eor wzr, wzr, wzr, asr #0x1f
+ \\ eor w0, w0, wzr
+ \\ eor w1, w2, wzr, lsl #0
+ \\ eor w3, wzr, w3
+ \\ eor w4, wzr, w5, lsl #0
+ \\ eor w6, wzr, wzr
+ \\ eor w7, wzr, wzr, lsl #0
+ \\ eor wzr, w8, wzr
+ \\ eor wzr, w9, wzr, lsl #0
+ \\ eor wzr, wzr, w10
+ \\ eor wzr, wzr, w11, lsl #0
+ \\ eor wzr, wzr, wzr
+ \\ eor wzr, wzr, wzr, lsl #0
+ \\
+ \\ eor x0, x0, x0
+ \\ eor x1, x1, x2, lsl #0
+ \\ eor x3, x4, x5, lsl #1
+ \\ eor x6, x6, xzr, lsl #63
+ \\ eor x7, xzr, x8, lsr #0
+ \\ eor x9, xzr, xzr, lsr #62
+ \\ eor xzr, x10, x11, lsr #63
+ \\ eor xzr, x12, xzr, asr #0x0
+ \\ eor xzr, xzr, x13, asr #0x1F
+ \\ eor xzr, xzr, xzr, asr #0x3f
+ \\ eor x0, x0, xzr
+ \\ eor x1, x2, xzr, lsl #0
+ \\ eor x3, xzr, x3
+ \\ eor x4, xzr, x5, lsl #0
+ \\ eor x6, xzr, xzr
+ \\ eor x7, xzr, xzr, lsl #0
+ \\ eor xzr, x8, xzr
+ \\ eor xzr, x9, xzr, lsl #0
+ \\ eor xzr, xzr, x10
+ \\ eor xzr, xzr, x11, lsl #0
+ \\ eor xzr, xzr, xzr
+ \\ eor xzr, xzr, xzr, lsl #0
+ \\
+ \\ ands w0, w0, w0
+ \\ ands w1, w1, w2, lsl #0
+ \\ ands w3, w4, w5, lsl #1
+ \\ ands w6, w6, wzr, lsl #31
+ \\ ands w7, wzr, w8, lsr #0
+ \\ ands w9, wzr, wzr, lsr #30
+ \\ ands wzr, w10, w11, lsr #31
+ \\ ands wzr, w12, wzr, asr #0x0
+ \\ ands wzr, wzr, w13, asr #0x10
+ \\ ands wzr, wzr, wzr, asr #0x1f
+ \\ ands w0, w0, wzr
+ \\ ands w1, w2, wzr, lsl #0
+ \\ ands w3, wzr, w3
+ \\ ands w4, wzr, w5, lsl #0
+ \\ ands w6, wzr, wzr
+ \\ ands w7, wzr, wzr, lsl #0
+ \\ ands wzr, w8, wzr
+ \\ ands wzr, w9, wzr, lsl #0
+ \\ ands wzr, wzr, w10
+ \\ ands wzr, wzr, w11, lsl #0
+ \\ ands wzr, wzr, wzr
+ \\ ands wzr, wzr, wzr, lsl #0
+ \\
+ \\ ands x0, x0, x0
+ \\ ands x1, x1, x2, lsl #0
+ \\ ands x3, x4, x5, lsl #1
+ \\ ands x6, x6, xzr, lsl #63
+ \\ ands x7, xzr, x8, lsr #0
+ \\ ands x9, xzr, xzr, lsr #62
+ \\ ands xzr, x10, x11, lsr #63
+ \\ ands xzr, x12, xzr, asr #0x0
+ \\ ands xzr, xzr, x13, asr #0x1F
+ \\ ands xzr, xzr, xzr, asr #0x3f
+ \\ ands x0, x0, xzr
+ \\ ands x1, x2, xzr, lsl #0
+ \\ ands x3, xzr, x3
+ \\ ands x4, xzr, x5, lsl #0
+ \\ ands x6, xzr, xzr
+ \\ ands x7, xzr, xzr, lsl #0
+ \\ ands xzr, x8, xzr
+ \\ ands xzr, x9, xzr, lsl #0
+ \\ ands xzr, xzr, x10
+ \\ ands xzr, xzr, x11, lsl #0
+ \\ ands xzr, xzr, xzr
+ \\ ands xzr, xzr, xzr, lsl #0
+ ,
+ .operands = .empty,
+ };
+
+ try std.testing.expectFmt("and w0, w0, w0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and w1, w1, w2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and w3, w4, w5, lsl #1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and w6, w6, wzr, lsl #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and w7, wzr, w8, lsr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and w9, wzr, wzr, lsr #30", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and wzr, w10, w11, lsr #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and wzr, w12, wzr, asr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and wzr, wzr, w13, asr #16", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and wzr, wzr, wzr, asr #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and w0, w0, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and w1, w2, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and w3, wzr, w3", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and w4, wzr, w5", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and w6, wzr, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and w7, wzr, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and wzr, w8, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and wzr, w9, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and wzr, wzr, w10", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and wzr, wzr, w11", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and wzr, wzr, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and wzr, wzr, wzr", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("and x0, x0, x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and x1, x1, x2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and x3, x4, x5, lsl #1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and x6, x6, xzr, lsl #63", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and x7, xzr, x8, lsr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and x9, xzr, xzr, lsr #62", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and xzr, x10, x11, lsr #63", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and xzr, x12, xzr, asr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and xzr, xzr, x13, asr #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and xzr, xzr, xzr, asr #63", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and x0, x0, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and x1, x2, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and x3, xzr, x3", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and x4, xzr, x5", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and x6, xzr, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and x7, xzr, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and xzr, x8, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and xzr, x9, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and xzr, xzr, x10", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and xzr, xzr, x11", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and xzr, xzr, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("and xzr, xzr, xzr", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("orr w0, w0, w0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr w1, w1, w2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr w3, w4, w5, lsl #1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr w6, w6, wzr, lsl #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr w7, wzr, w8, lsr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr w9, wzr, wzr, lsr #30", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr wzr, w10, w11, lsr #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr wzr, w12, wzr, asr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr wzr, wzr, w13, asr #16", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr wzr, wzr, wzr, asr #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr w0, w0, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr w1, w2, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov w3, w3", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov w4, w5", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov w6, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov w7, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr wzr, w8, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr wzr, w9, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov wzr, w10", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov wzr, w11", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov wzr, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov wzr, wzr", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("orr x0, x0, x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr x1, x1, x2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr x3, x4, x5, lsl #1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr x6, x6, xzr, lsl #63", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr x7, xzr, x8, lsr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr x9, xzr, xzr, lsr #62", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr xzr, x10, x11, lsr #63", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr xzr, x12, xzr, asr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr xzr, xzr, x13, asr #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr xzr, xzr, xzr, asr #63", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr x0, x0, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr x1, x2, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov x3, x3", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov x4, x5", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov x6, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov x7, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr xzr, x8, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("orr xzr, x9, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov xzr, x10", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov xzr, x11", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov xzr, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov xzr, xzr", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("eor w0, w0, w0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor w1, w1, w2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor w3, w4, w5, lsl #1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor w6, w6, wzr, lsl #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor w7, wzr, w8, lsr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor w9, wzr, wzr, lsr #30", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor wzr, w10, w11, lsr #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor wzr, w12, wzr, asr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor wzr, wzr, w13, asr #16", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor wzr, wzr, wzr, asr #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor w0, w0, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor w1, w2, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor w3, wzr, w3", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor w4, wzr, w5", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor w6, wzr, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor w7, wzr, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor wzr, w8, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor wzr, w9, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor wzr, wzr, w10", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor wzr, wzr, w11", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor wzr, wzr, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor wzr, wzr, wzr", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("eor x0, x0, x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor x1, x1, x2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor x3, x4, x5, lsl #1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor x6, x6, xzr, lsl #63", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor x7, xzr, x8, lsr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor x9, xzr, xzr, lsr #62", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor xzr, x10, x11, lsr #63", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor xzr, x12, xzr, asr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor xzr, xzr, x13, asr #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor xzr, xzr, xzr, asr #63", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor x0, x0, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor x1, x2, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor x3, xzr, x3", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor x4, xzr, x5", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor x6, xzr, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor x7, xzr, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor xzr, x8, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor xzr, x9, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor xzr, xzr, x10", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor xzr, xzr, x11", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor xzr, xzr, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("eor xzr, xzr, xzr", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("ands w0, w0, w0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ands w1, w1, w2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ands w3, w4, w5, lsl #1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ands w6, w6, wzr, lsl #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ands w7, wzr, w8, lsr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ands w9, wzr, wzr, lsr #30", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("tst w10, w11, lsr #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("tst w12, wzr, asr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("tst wzr, w13, asr #16", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("tst wzr, wzr, asr #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ands w0, w0, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ands w1, w2, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ands w3, wzr, w3", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ands w4, wzr, w5", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ands w6, wzr, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ands w7, wzr, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("tst w8, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("tst w9, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("tst wzr, w10", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("tst wzr, w11", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("tst wzr, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("tst wzr, wzr", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("ands x0, x0, x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ands x1, x1, x2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ands x3, x4, x5, lsl #1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ands x6, x6, xzr, lsl #63", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ands x7, xzr, x8, lsr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ands x9, xzr, xzr, lsr #62", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("tst x10, x11, lsr #63", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("tst x12, xzr, asr #0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("tst xzr, x13, asr #31", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("tst xzr, xzr, asr #63", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ands x0, x0, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ands x1, x2, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ands x3, xzr, x3", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ands x4, xzr, x5", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ands x6, xzr, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("ands x7, xzr, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("tst x8, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("tst x9, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("tst xzr, x10", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("tst xzr, x11", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("tst xzr, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("tst xzr, xzr", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expect(null == try as.nextInstruction());
+}
+test "mov" {
+ var as: Assemble = .{
+ .source =
+ \\MOV W0, #0
+ \\MOV WZR, #0xffff
+ \\
+ \\MOV X0, #0
+ \\MOV XZR, #0xffff
+ \\
+ \\MOV W0, WSP
+ \\MOV WSP, W1
+ \\MOV WSP, WSP
+ \\MOV X0, SP
+ \\MOV SP, X1
+ \\MOV SP, SP
+ \\
+ \\MOV W0, W0
+ \\MOV W1, W2
+ \\MOV W3, WZR
+ \\MOV WZR, W4
+ \\MOV WZR, WZR
+ \\MOV X0, X0
+ \\MOV X1, X2
+ \\MOV X3, XZR
+ \\MOV XZR, X4
+ \\MOV XZR, XZR
+ \\
+ \\MOVK W0, #0
+ \\MOVK W1, #1, lsl #0
+ \\MOVK W2, #2, lsl #16
+ \\MOVK X3, #3
+ \\MOVK X4, #4, lsl #0x00
+ \\MOVK X5, #5, lsl #0x10
+ \\MOVK X6, #6, lsl #0x20
+ \\MOVK X7, #7, lsl #0x30
+ \\
+ \\MOVN W0, #8
+ \\MOVN W1, #9, lsl #0
+ \\MOVN W2, #10, lsl #16
+ \\MOVN X3, #11
+ \\MOVN X4, #12, lsl #0x00
+ \\MOVN X5, #13, lsl #0x10
+ \\MOVN X6, #14, lsl #0x20
+ \\MOVN X7, #15, lsl #0x30
+ \\
+ \\MOVN WZR, #0, lsl #0
+ \\MOVN WZR, #0, lsl #16
+ \\MOVN XZR, #0, lsl #0
+ \\MOVN XZR, #0, lsl #16
+ \\MOVN XZR, #0, lsl #32
+ \\MOVN XZR, #0, lsl #48
+ \\
+ \\MOVN WZR, #0xffff, lsl #0
+ \\MOVN WZR, #0xffff, lsl #16
+ \\MOVN XZR, #0xffff, lsl #0
+ \\MOVN XZR, #0xffff, lsl #16
+ \\MOVN XZR, #0xffff, lsl #32
+ \\MOVN XZR, #0xffff, lsl #48
+ \\
+ \\MOVZ W0, #16
+ \\MOVZ W1, #17, lsl #0
+ \\MOVZ W2, #18, lsl #16
+ \\MOVZ X3, #19
+ \\MOVZ X4, #20, lsl #0x00
+ \\MOVZ X5, #21, lsl #0x10
+ \\MOVZ X6, #22, lsl #0x20
+ \\MOVZ X7, #23, lsl #0x30
+ \\
+ \\MOVZ WZR, #0, lsl #0
+ \\MOVZ WZR, #0, lsl #16
+ \\MOVZ XZR, #0, lsl #0
+ \\MOVZ XZR, #0, lsl #16
+ \\MOVZ XZR, #0, lsl #32
+ \\MOVZ XZR, #0, lsl #48
+ \\
+ \\MOVZ WZR, #0xffff, lsl #0
+ \\MOVZ WZR, #0xffff, lsl #16
+ \\MOVZ XZR, #0xffff, lsl #0
+ \\MOVZ XZR, #0xffff, lsl #16
+ \\MOVZ XZR, #0xffff, lsl #32
+ \\MOVZ XZR, #0xffff, lsl #48
+ ,
+ .operands = .empty,
+ };
+
+ try std.testing.expectFmt("mov w0, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov wzr, #0xffff", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov x0, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov xzr, #0xffff", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("mov w0, wsp", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov wsp, w1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov wsp, wsp", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov x0, sp", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov sp, x1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov sp, sp", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("mov w0, w0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov w1, w2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov w3, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov wzr, w4", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov wzr, wzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov x0, x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov x1, x2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov x3, xzr", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov xzr, x4", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov xzr, xzr", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("movk w0, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("movk w1, #0x1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("movk w2, #0x2, lsl #16", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("movk x3, #0x3", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("movk x4, #0x4", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("movk x5, #0x5, lsl #16", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("movk x6, #0x6, lsl #32", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("movk x7, #0x7, lsl #48", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("mov w0, #-0x9", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov w1, #-0xa", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov w2, #-0xa0001", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov x3, #-0xc", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov x4, #-0xd", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov x5, #-0xd0001", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov x6, #-0xe00000001", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov x7, #-0xf000000000001", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("mov wzr, #-0x1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("movn wzr, #0x0, lsl #16", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov xzr, #-0x1", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("movn xzr, #0x0, lsl #16", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("movn xzr, #0x0, lsl #32", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("movn xzr, #0x0, lsl #48", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("movn wzr, #0xffff", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("movn wzr, #0xffff, lsl #16", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov xzr, #-0x10000", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov xzr, #-0xffff0001", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov xzr, #-0xffff00000001", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov xzr, #0xffffffffffff", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("mov w0, #0x10", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov w1, #0x11", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov w2, #0x120000", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov x3, #0x13", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov x4, #0x14", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov x5, #0x150000", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov x6, #0x1600000000", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov x7, #0x17000000000000", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("mov wzr, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("movz wzr, #0x0, lsl #16", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov xzr, #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("movz xzr, #0x0, lsl #16", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("movz xzr, #0x0, lsl #32", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("movz xzr, #0x0, lsl #48", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expectFmt("mov wzr, #0xffff", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov wzr, #-0x10000", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov xzr, #0xffff", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov xzr, #0xffff0000", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov xzr, #0xffff00000000", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("mov xzr, #-0x1000000000000", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expect(null == try as.nextInstruction());
+}
+test "reserved" {
+ var as: Assemble = .{
+ .source = "\n\nudf #0x0\n\t\n\tudf\t#01234\n \nudf#65535",
+ .operands = .empty,
+ };
+
+ try std.testing.expectFmt("udf #0x0", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("udf #0x4d2", "{f}", .{(try as.nextInstruction()).?});
+ try std.testing.expectFmt("udf #0xffff", "{f}", .{(try as.nextInstruction()).?});
+
+ try std.testing.expect(null == try as.nextInstruction());
+}
+
+const aarch64 = @import("../aarch64.zig");
+const Assemble = @This();
+const assert = std.debug.assert;
+const Instruction = aarch64.encoding.Instruction;
+const instructions = @import("instructions.zon");
+const std = @import("std");
+const log = std.log.scoped(.@"asm");
src/codegen/aarch64/Disassemble.zig
@@ -0,0 +1,905 @@
+case: Case = .lower,
+mnemonic_operands_separator: []const u8 = " ",
+operands_separator: []const u8 = ", ",
+enable_aliases: bool = true,
+
+pub const Case = enum { lower, upper };
+
+pub fn printInstruction(dis: Disassemble, inst: Instruction, writer: *std.Io.Writer) std.Io.Writer.Error!void {
+ unallocated: switch (inst.decode()) {
+ .unallocated => break :unallocated,
+ .reserved => |reserved| switch (reserved.decode()) {
+ .unallocated => break :unallocated,
+ .udf => |udf| return writer.print("{f}{s}#0x{x}", .{
+ fmtCase(.udf, dis.case),
+ dis.mnemonic_operands_separator,
+ udf.imm16,
+ }),
+ },
+ .sme => {},
+ .sve => {},
+ .data_processing_immediate => |data_processing_immediate| switch (data_processing_immediate.decode()) {
+ .unallocated => break :unallocated,
+ .pc_relative_addressing => |pc_relative_addressing| {
+ const group = pc_relative_addressing.group;
+ const imm = (@as(i33, group.immhi) << 2 | @as(i33, group.immlo) << 0) + @as(i33, switch (group.op) {
+ .adr => Instruction.size,
+ .adrp => 0,
+ });
+ return writer.print("{f}{s}{f}{s}.{c}0x{x}", .{
+ fmtCase(group.op, dis.case),
+ dis.mnemonic_operands_separator,
+ group.Rd.decodeInteger(.doubleword, .{}).fmtCase(dis.case),
+ dis.operands_separator,
+ @as(u8, if (imm < 0) '-' else '+'),
+ switch (group.op) {
+ .adr => @abs(imm),
+ .adrp => @abs(imm) << 12,
+ },
+ });
+ },
+ .add_subtract_immediate => |add_subtract_immediate| {
+ const group = add_subtract_immediate.group;
+ const op = group.op;
+ const S = group.S;
+ const sf = group.sf;
+ const sh = group.sh;
+ const imm12 = group.imm12;
+ const Rn = group.Rn.decodeInteger(sf, .{ .sp = true });
+ const Rd = group.Rd.decodeInteger(sf, .{ .sp = !S });
+ const elide_shift = sh == .@"0";
+ if (dis.enable_aliases and op == .add and S == false and elide_shift and imm12 == 0 and
+ (Rn.alias == .sp or Rd.alias == .sp)) try writer.print("{f}{s}{f}{s}{f}", .{
+ fmtCase(.mov, dis.case),
+ dis.mnemonic_operands_separator,
+ Rd.fmtCase(dis.case),
+ dis.operands_separator,
+ Rn.fmtCase(dis.case),
+ }) else try writer.print("{f}{s}{s}{f}{s}{f}{s}#0x{x}", .{
+ fmtCase(op, dis.case),
+ if (S) "s" else "",
+ dis.mnemonic_operands_separator,
+ Rd.fmtCase(dis.case),
+ dis.operands_separator,
+ Rn.fmtCase(dis.case),
+ dis.operands_separator,
+ imm12,
+ });
+ return if (!elide_shift) writer.print("{s}{f} #{s}", .{
+ dis.operands_separator,
+ fmtCase(.lsl, dis.case),
+ @tagName(sh),
+ });
+ },
+ .add_subtract_immediate_with_tags => {},
+ .logical_immediate => |logical_immediate| {
+ const decoded = logical_immediate.decode();
+ if (decoded == .unallocated) break :unallocated;
+ const group = logical_immediate.group;
+ const sf = group.sf;
+ const decoded_imm = group.imm.decodeImmediate(sf);
+ const imm = switch (sf) {
+ .word => @as(i32, @bitCast(@as(u32, @intCast(decoded_imm)))),
+ .doubleword => @as(i64, @bitCast(decoded_imm)),
+ };
+ const Rn = group.Rn.decodeInteger(sf, .{});
+ const Rd = group.Rd.decodeInteger(sf, .{ .sp = decoded != .ands });
+ return if (dis.enable_aliases and decoded == .orr and Rn.alias == .zr and !group.imm.moveWidePreferred(sf)) writer.print("{f}{s}{f}{s}#{s}0x{x}", .{
+ fmtCase(.mov, dis.case),
+ dis.mnemonic_operands_separator,
+ Rd.fmtCase(dis.case),
+ dis.operands_separator,
+ if (imm < 0) "-" else "",
+ @abs(imm),
+ }) else if (dis.enable_aliases and decoded == .ands and Rd.alias == .zr) writer.print("{f}{s}{f}{s}#{s}0x{x}", .{
+ fmtCase(.tst, dis.case),
+ dis.mnemonic_operands_separator,
+ Rn.fmtCase(dis.case),
+ dis.operands_separator,
+ if (imm < 0) "-" else "",
+ @abs(imm),
+ }) else writer.print("{f}{s}{f}{s}{f}{s}#0x{x}", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ Rd.fmtCase(dis.case),
+ dis.operands_separator,
+ Rn.fmtCase(dis.case),
+ dis.operands_separator,
+ decoded_imm,
+ });
+ },
+ .move_wide_immediate => |move_wide_immediate| {
+ const decoded = move_wide_immediate.decode();
+ if (decoded == .unallocated) break :unallocated;
+ const group = move_wide_immediate.group;
+ const sf = group.sf;
+ const hw = group.hw;
+ const imm16 = group.imm16;
+ const Rd = group.Rd.decodeInteger(sf, .{});
+ const elide_shift = hw == .@"0";
+ if (dis.enable_aliases and switch (decoded) {
+ .unallocated => unreachable,
+ .movz => elide_shift or group.imm16 != 0,
+ .movn => (elide_shift or group.imm16 != 0) and switch (sf) {
+ .word => group.imm16 != std.math.maxInt(u16),
+ .doubleword => true,
+ },
+ .movk => false,
+ }) {
+ const decoded_imm = switch (sf) {
+ .word => @as(i32, @bitCast(@as(u32, group.imm16) << @intCast(hw.int()))),
+ .doubleword => @as(i64, @bitCast(@as(u64, group.imm16) << hw.int())),
+ };
+ const imm = switch (decoded) {
+ .unallocated => unreachable,
+ .movz => decoded_imm,
+ .movn => ~decoded_imm,
+ .movk => unreachable,
+ };
+ return writer.print("{f}{s}{f}{s}#{s}0x{x}", .{
+ fmtCase(.mov, dis.case),
+ dis.mnemonic_operands_separator,
+ Rd.fmtCase(dis.case),
+ dis.operands_separator,
+ if (imm < 0) "-" else "",
+ @abs(imm),
+ });
+ }
+ try writer.print("{f}{s}{f}{s}#0x{x}", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ Rd.fmtCase(dis.case),
+ dis.operands_separator,
+ imm16,
+ });
+ return if (!elide_shift) writer.print("{s}{f} #{s}", .{
+ dis.operands_separator,
+ fmtCase(.lsl, dis.case),
+ @tagName(hw),
+ });
+ },
+ .bitfield => |bitfield| {
+ const decoded = bitfield.decode();
+ if (decoded == .unallocated) break :unallocated;
+ const group = bitfield.group;
+ const sf = group.sf;
+ return writer.print("{f}{s}{f}{s}{f}{s}#{d}{s}#{d}", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ group.Rd.decodeInteger(sf, .{}).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rn.decodeInteger(sf, .{}).fmtCase(dis.case),
+ dis.operands_separator,
+ group.imm.immr,
+ dis.operands_separator,
+ group.imm.imms,
+ });
+ },
+ .extract => |extract| {
+ const decoded = extract.decode();
+ if (decoded == .unallocated) break :unallocated;
+ const group = extract.group;
+ const sf = group.sf;
+ return writer.print("{f}{s}{f}{s}{f}{s}{f}{s}#{d}", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ group.Rd.decodeInteger(sf, .{}).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rn.decodeInteger(sf, .{}).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rm.decodeInteger(sf, .{}).fmtCase(dis.case),
+ dis.operands_separator,
+ group.imms,
+ });
+ },
+ },
+ .branch_exception_generating_system => |branch_exception_generating_system| switch (branch_exception_generating_system.decode()) {
+ .unallocated => break :unallocated,
+ .conditional_branch_immediate => |conditional_branch_immediate| {
+ const decoded = conditional_branch_immediate.decode();
+ if (decoded == .unallocated) break :unallocated;
+ const group = conditional_branch_immediate.group;
+ const imm = @as(i21, group.imm19);
+ return writer.print("{f}.{f}{s}.{c}0x{x}", .{
+ fmtCase(decoded, dis.case),
+ fmtCase(group.cond, dis.case),
+ dis.mnemonic_operands_separator,
+ @as(u8, if (imm < 0) '-' else '+'),
+ @abs(imm) << 2,
+ });
+ },
+ .exception_generating => |exception_generating| {
+ const decoded = exception_generating.decode();
+ switch (decoded) {
+ .unallocated => break :unallocated,
+ .svc, .hvc, .smc, .brk, .hlt, .tcancel => {},
+ .dcps1, .dcps2, .dcps3 => switch (exception_generating.group.imm16) {
+ 0 => return writer.print("{f}", .{fmtCase(decoded, dis.case)}),
+ else => {},
+ },
+ }
+ return switch (exception_generating.group.imm16) {
+ 0 => writer.print("{f}{s}#0", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ }),
+ else => writer.print("{f}{s}#0x{x}", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ exception_generating.group.imm16,
+ }),
+ };
+ },
+ .system_register_argument => {},
+ .hints => |hints| switch (hints.decode()) {
+ .hint => |hint| return writer.print("{f}{s}#0x{x}", .{
+ fmtCase(.hint, dis.case),
+ dis.mnemonic_operands_separator,
+ @as(u7, hint.CRm) << 3 | @as(u7, hint.op2) << 0,
+ }),
+ else => |decoded| return writer.print("{f}", .{fmtCase(decoded, dis.case)}),
+ },
+ .barriers => {},
+ .pstate => {},
+ .system_result => {},
+ .system => {},
+ .system_register_move => {},
+ .unconditional_branch_register => |unconditional_branch_register| {
+ const decoded = unconditional_branch_register.decode();
+ if (decoded == .unallocated) break :unallocated;
+ const group = unconditional_branch_register.group;
+ const Rn = group.Rn.decodeInteger(.doubleword, .{});
+ try writer.print("{f}", .{fmtCase(decoded, dis.case)});
+ return if (decoded != .ret or Rn.alias != .r30) try writer.print("{s}{f}", .{
+ dis.mnemonic_operands_separator,
+ Rn.fmtCase(dis.case),
+ });
+ },
+ .unconditional_branch_immediate => |unconditional_branch_immediate| {
+ const group = unconditional_branch_immediate.group;
+ const imm = @as(i28, group.imm26);
+ return writer.print("{f}{s}.{c}0x{x}", .{
+ fmtCase(group.op, dis.case),
+ dis.mnemonic_operands_separator,
+ @as(u8, if (imm < 0) '-' else '+'),
+ @abs(imm) << 2,
+ });
+ },
+ .compare_branch_immediate => |compare_branch_immediate| {
+ const group = compare_branch_immediate.group;
+ const imm = @as(i21, group.imm19);
+ return writer.print("{f}{s}{f}{s}.{c}0x{x}", .{
+ fmtCase(group.op, dis.case),
+ dis.mnemonic_operands_separator,
+ group.Rt.decodeInteger(group.sf, .{}).fmtCase(dis.case),
+ dis.operands_separator,
+ @as(u8, if (imm < 0) '-' else '+'),
+ @abs(imm) << 2,
+ });
+ },
+ .test_branch_immediate => |test_branch_immediate| {
+ const group = test_branch_immediate.group;
+ const imm = @as(i16, group.imm14);
+ return writer.print("{f}{s}{f}{s}#0x{d}{s}.{c}0x{x}", .{
+ fmtCase(group.op, dis.case),
+ dis.mnemonic_operands_separator,
+ group.Rt.decodeInteger(@enumFromInt(group.b5), .{}).fmtCase(dis.case),
+ dis.operands_separator,
+ @as(u6, group.b5) << 5 |
+ @as(u6, group.b40) << 0,
+ dis.operands_separator,
+ @as(u8, if (imm < 0) '-' else '+'),
+ @abs(imm) << 2,
+ });
+ },
+ },
+ .load_store => |load_store| switch (load_store.decode()) {
+ .unallocated => break :unallocated,
+ .register_literal => {},
+ .memory => {},
+ .no_allocate_pair_offset => {},
+ .register_pair_post_indexed => |register_pair_post_indexed| switch (register_pair_post_indexed.decode()) {
+ .integer => |integer| {
+ const decoded = integer.decode();
+ if (decoded == .unallocated) break :unallocated;
+ const group = integer.group;
+ const sf: aarch64.encoding.Register.IntegerSize = @enumFromInt(group.opc >> 1);
+ return writer.print("{f}{s}{f}{s}{f}{s}[{f}]{s}#{s}0x{x}", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ group.Rt.decodeInteger(sf, .{}).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rt2.decodeInteger(sf, .{}).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rn.decodeInteger(.doubleword, .{ .sp = true }).fmtCase(dis.case),
+ dis.operands_separator,
+ if (group.imm7 < 0) "-" else "",
+ @as(u10, @abs(group.imm7)) << (@as(u2, 2) + @intFromEnum(sf)),
+ });
+ },
+ .vector => |vector| {
+ const decoded = vector.decode();
+ if (decoded == .unallocated) break :unallocated;
+ const group = vector.group;
+ const vs = group.opc.decode();
+ return writer.print("{f}{s}{f}{s}{f}{s}[{f}]{s}#{s}0x{x}", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ group.Rt.decodeVector(vs).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rt2.decodeVector(vs).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rn.decodeInteger(.doubleword, .{ .sp = true }).fmtCase(dis.case),
+ dis.operands_separator,
+ if (group.imm7 < 0) "-" else "",
+ @as(u11, @abs(group.imm7)) << (@as(u3, 2) + @intFromEnum(vs)),
+ });
+ },
+ },
+ .register_pair_offset => |register_pair_offset| switch (register_pair_offset.decode()) {
+ .integer => |integer| {
+ const decoded = integer.decode();
+ if (decoded == .unallocated) break :unallocated;
+ const group = integer.group;
+ const sf: aarch64.encoding.Register.IntegerSize = @enumFromInt(group.opc >> 1);
+ try writer.print("{f}{s}{f}{s}{f}{s}[{f}", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ group.Rt.decodeInteger(sf, .{}).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rt2.decodeInteger(sf, .{}).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rn.decodeInteger(.doubleword, .{ .sp = true }).fmtCase(dis.case),
+ });
+ if (group.imm7 != 0) try writer.print("{s}#{s}0x{x}", .{
+ dis.operands_separator,
+ if (group.imm7 < 0) "-" else "",
+ @as(u10, @abs(group.imm7)) << (@as(u2, 2) + @intFromEnum(sf)),
+ });
+ return writer.writeByte(']');
+ },
+ .vector => |vector| {
+ const decoded = vector.decode();
+ if (decoded == .unallocated) break :unallocated;
+ const group = vector.group;
+ const vs = group.opc.decode();
+ try writer.print("{f}{s}{f}{s}{f}{s}[{f}", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ group.Rt.decodeVector(vs).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rt2.decodeVector(vs).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rn.decodeInteger(.doubleword, .{ .sp = true }).fmtCase(dis.case),
+ });
+ if (group.imm7 != 0) try writer.print("{s}#{s}0x{x}", .{
+ dis.operands_separator,
+ if (group.imm7 < 0) "-" else "",
+ @as(u11, @abs(group.imm7)) << (@as(u3, 2) + @intFromEnum(vs)),
+ });
+ return writer.writeByte(']');
+ },
+ },
+ .register_pair_pre_indexed => |register_pair_pre_indexed| switch (register_pair_pre_indexed.decode()) {
+ .integer => |integer| {
+ const decoded = integer.decode();
+ if (decoded == .unallocated) break :unallocated;
+ const group = integer.group;
+ const sf: aarch64.encoding.Register.IntegerSize = @enumFromInt(group.opc >> 1);
+ return writer.print("{f}{s}{f}{s}{f}{s}[{f}{s}#{s}0x{x}]!", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ group.Rt.decodeInteger(sf, .{}).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rt2.decodeInteger(sf, .{}).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rn.decodeInteger(.doubleword, .{ .sp = true }).fmtCase(dis.case),
+ dis.operands_separator,
+ if (group.imm7 < 0) "-" else "",
+ @as(u10, @abs(group.imm7)) << (@as(u2, 2) + @intFromEnum(sf)),
+ });
+ },
+ .vector => |vector| {
+ const decoded = vector.decode();
+ if (decoded == .unallocated) break :unallocated;
+ const group = vector.group;
+ const vs = group.opc.decode();
+ return writer.print("{f}{s}{f}{s}{f}{s}[{f}{s}#{s}0x{x}]!", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ group.Rt.decodeVector(vs).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rt2.decodeVector(vs).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rn.decodeInteger(.doubleword, .{ .sp = true }).fmtCase(dis.case),
+ dis.operands_separator,
+ if (group.imm7 < 0) "-" else "",
+ @as(u11, @abs(group.imm7)) << (@as(u3, 2) + @intFromEnum(vs)),
+ });
+ },
+ },
+ .register_unscaled_immediate => {},
+ .register_immediate_post_indexed => |register_immediate_post_indexed| switch (register_immediate_post_indexed.decode()) {
+ .integer => |integer| {
+ const decoded = integer.decode();
+ const sf: aarch64.encoding.Register.IntegerSize = switch (decoded) {
+ .unallocated => break :unallocated,
+ .strb, .ldrb, .strh, .ldrh => .word,
+ inline .ldrsb, .ldrsh => |encoded| switch (encoded.opc0) {
+ 0b0 => .doubleword,
+ 0b1 => .word,
+ },
+ .ldrsw => .doubleword,
+ inline .str, .ldr => |encoded| encoded.sf,
+ };
+ const group = integer.group;
+ return writer.print("{f}{s}{f}{s}[{f}]{s}#{s}0x{x}", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ group.Rt.decodeInteger(sf, .{}).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rn.decodeInteger(.doubleword, .{ .sp = true }).fmtCase(dis.case),
+ dis.operands_separator,
+ if (group.imm9 < 0) "-" else "",
+ @abs(group.imm9),
+ });
+ },
+ .vector => {},
+ },
+ .register_unprivileged => {},
+ .register_immediate_pre_indexed => |register_immediate_pre_indexed| switch (register_immediate_pre_indexed.decode()) {
+ .integer => |integer| {
+ const decoded = integer.decode();
+ const sf: aarch64.encoding.Register.IntegerSize = switch (decoded) {
+ .unallocated => break :unallocated,
+ inline .ldrsb, .ldrsh => |encoded| switch (encoded.opc0) {
+ 0b0 => .doubleword,
+ 0b1 => .word,
+ },
+ .strb, .ldrb, .strh, .ldrh => .word,
+ .ldrsw => .doubleword,
+ inline .str, .ldr => |encoded| encoded.sf,
+ };
+ const group = integer.group;
+ return writer.print("{f}{s}{f}{s}[{f}{s}#{s}0x{x}]!", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ group.Rt.decodeInteger(sf, .{}).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rn.decodeInteger(.doubleword, .{ .sp = true }).fmtCase(dis.case),
+ dis.operands_separator,
+ if (group.imm9 < 0) "-" else "",
+ @abs(group.imm9),
+ });
+ },
+ .vector => |vector| {
+ const decoded = vector.decode();
+ if (decoded == .unallocated) break :unallocated;
+ const group = vector.group;
+ return writer.print("{f}{s}{f}{s}[{f}{s}#{s}0x{x}]!", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ group.Rt.decodeVector(group.opc1.decode(group.size)).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rn.decodeInteger(.doubleword, .{ .sp = true }).fmtCase(dis.case),
+ dis.operands_separator,
+ if (group.imm9 < 0) "-" else "",
+ @abs(group.imm9),
+ });
+ },
+ },
+ .register_register_offset => |register_register_offset| switch (register_register_offset.decode()) {
+ .integer => |integer| {
+ const decoded = integer.decode();
+ const sf: aarch64.encoding.Register.IntegerSize = switch (decoded) {
+ .unallocated, .prfm => break :unallocated,
+ .strb, .ldrb, .strh, .ldrh => .word,
+ inline .ldrsb, .ldrsh => |encoded| switch (encoded.opc0) {
+ 0b0 => .doubleword,
+ 0b1 => .word,
+ },
+ .ldrsw => .doubleword,
+ inline .str, .ldr => |encoded| encoded.sf,
+ };
+ const group = integer.group;
+ try writer.print("{f}{s}{f}{s}[{f}{s}{f}", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ group.Rt.decodeInteger(sf, .{}).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rn.decodeInteger(.doubleword, .{ .sp = true }).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rm.decodeInteger(group.option.sf(), .{}).fmtCase(dis.case),
+ });
+ if (group.option != .lsl or group.S) {
+ try writer.print("{s}{f}", .{
+ dis.operands_separator,
+ fmtCase(group.option, dis.case),
+ });
+ if (group.S) try writer.print(" #{d}", .{
+ @intFromEnum(group.size),
+ });
+ }
+ return writer.writeByte(']');
+ },
+ .vector => {},
+ },
+ .register_unsigned_immediate => |register_unsigned_immediate| switch (register_unsigned_immediate.decode()) {
+ .integer => |integer| {
+ const decoded = integer.decode();
+ const sf: aarch64.encoding.Register.IntegerSize = switch (decoded) {
+ .unallocated, .prfm => break :unallocated,
+ .strb, .ldrb, .strh, .ldrh => .word,
+ inline .ldrsb, .ldrsh => |encoded| switch (encoded.opc0) {
+ 0b0 => .doubleword,
+ 0b1 => .word,
+ },
+ .ldrsw => .doubleword,
+ inline .str, .ldr => |encoded| encoded.sf,
+ };
+ const group = integer.group;
+ try writer.print("{f}{s}{f}{s}[{f}", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ group.Rt.decodeInteger(sf, .{}).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rn.decodeInteger(.doubleword, .{ .sp = true }).fmtCase(dis.case),
+ });
+ if (group.imm12 > 0) try writer.print("{s}#0x{x}", .{
+ dis.operands_separator,
+ @as(u15, group.imm12) << @intFromEnum(group.size),
+ });
+ return writer.writeByte(']');
+ },
+ .vector => {},
+ },
+ },
+ .data_processing_register => |data_processing_register| switch (data_processing_register.decode()) {
+ .unallocated => break :unallocated,
+ .data_processing_two_source => |data_processing_two_source| {
+ const decoded = data_processing_two_source.decode();
+ if (decoded == .unallocated) break :unallocated;
+ const group = data_processing_two_source.group;
+ const sf = group.sf;
+ return writer.print("{f}{s}{f}{s}{f}{s}{f}", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ group.Rd.decodeInteger(sf, .{}).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rn.decodeInteger(sf, .{}).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rm.decodeInteger(sf, .{}).fmtCase(dis.case),
+ });
+ },
+ .data_processing_one_source => |data_processing_one_source| {
+ const decoded = data_processing_one_source.decode();
+ if (decoded == .unallocated) break :unallocated;
+ const group = data_processing_one_source.group;
+ const sf = group.sf;
+ return writer.print("{f}{s}{f}{s}{f}", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ group.Rd.decodeInteger(sf, .{}).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rn.decodeInteger(sf, .{}).fmtCase(dis.case),
+ });
+ },
+ .logical_shifted_register => |logical_shifted_register| {
+ const decoded = logical_shifted_register.decode();
+ if (decoded == .unallocated) break :unallocated;
+ const group = logical_shifted_register.group;
+ const sf = group.sf;
+ const shift = group.shift;
+ const Rm = group.Rm.decodeInteger(sf, .{});
+ const amount = group.imm6;
+ const Rn = group.Rn.decodeInteger(sf, .{});
+ const Rd = group.Rd.decodeInteger(sf, .{});
+ const elide_shift = shift == .lsl and amount == 0;
+ if (dis.enable_aliases and switch (decoded) {
+ else => false,
+ .orr => elide_shift,
+ .orn => true,
+ } and Rn.alias == .zr) try writer.print("{f}{s}{f}{s}{f}", .{
+ fmtCase(@as(enum { mov, mvn }, switch (decoded) {
+ else => unreachable,
+ .orr => .mov,
+ .orn => .mvn,
+ }), dis.case),
+ dis.mnemonic_operands_separator,
+ Rd.fmtCase(dis.case),
+ dis.operands_separator,
+ Rm.fmtCase(dis.case),
+ }) else if (dis.enable_aliases and decoded == .ands and Rd.alias == .zr) try writer.print("{f}{s}{f}{s}{f}", .{
+ fmtCase(.tst, dis.case),
+ dis.mnemonic_operands_separator,
+ Rn.fmtCase(dis.case),
+ dis.operands_separator,
+ Rm.fmtCase(dis.case),
+ }) else try writer.print("{f}{s}{f}{s}{f}{s}{f}", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ Rd.fmtCase(dis.case),
+ dis.operands_separator,
+ Rn.fmtCase(dis.case),
+ dis.operands_separator,
+ Rm.fmtCase(dis.case),
+ });
+ return if (!elide_shift) writer.print("{s}{f} #{d}", .{
+ dis.operands_separator,
+ fmtCase(shift, dis.case),
+ amount,
+ });
+ },
+ .add_subtract_shifted_register => |add_subtract_shifted_register| {
+ const decoded = add_subtract_shifted_register.decode();
+ if (decoded == .unallocated) break :unallocated;
+ const group = add_subtract_shifted_register.group;
+ const sf = group.sf;
+ const shift = group.shift;
+ const Rm = group.Rm.decodeInteger(sf, .{});
+ const imm6 = group.imm6;
+ const Rn = group.Rn.decodeInteger(sf, .{});
+ const Rd = group.Rd.decodeInteger(sf, .{});
+ if (dis.enable_aliases and group.S and Rd.alias == .zr) try writer.print("{f}{s}{f}{s}{f}", .{
+ fmtCase(@as(enum { cmn, cmp }, switch (group.op) {
+ .add => .cmn,
+ .sub => .cmp,
+ }), dis.case),
+ dis.mnemonic_operands_separator,
+ Rn.fmtCase(dis.case),
+ dis.operands_separator,
+ Rm.fmtCase(dis.case),
+ }) else if (dis.enable_aliases and group.op == .sub and Rn.alias == .zr) try writer.print("{f}{s}{f}{s}{f}", .{
+ fmtCase(@as(enum { neg, negs }, switch (group.S) {
+ false => .neg,
+ true => .negs,
+ }), dis.case),
+ dis.mnemonic_operands_separator,
+ Rd.fmtCase(dis.case),
+ dis.operands_separator,
+ Rm.fmtCase(dis.case),
+ }) else try writer.print("{f}{s}{f}{s}{f}{s}{f}", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ Rd.fmtCase(dis.case),
+ dis.operands_separator,
+ Rn.fmtCase(dis.case),
+ dis.operands_separator,
+ Rm.fmtCase(dis.case),
+ });
+ return if (shift != .lsl or imm6 != 0) return writer.print("{s}{f} #{d}", .{
+ dis.operands_separator,
+ fmtCase(shift, dis.case),
+ imm6,
+ });
+ },
+ .add_subtract_extended_register => |add_subtract_extended_register| {
+ const decoded = add_subtract_extended_register.decode();
+ if (decoded == .unallocated) break :unallocated;
+ const group = add_subtract_extended_register.group;
+ const sf = group.sf;
+ const Rm = group.Rm.decodeInteger(group.option.sf(), .{});
+ const Rn = group.Rn.decodeInteger(sf, .{ .sp = true });
+ const Rd = group.Rd.decodeInteger(sf, .{ .sp = true });
+ if (dis.enable_aliases and group.S and Rd.alias == .zr) try writer.print("{f}{s}{f}{s}{f}", .{
+ fmtCase(@as(enum { cmn, cmp }, switch (group.op) {
+ .add => .cmn,
+ .sub => .cmp,
+ }), dis.case),
+ dis.mnemonic_operands_separator,
+ Rn.fmtCase(dis.case),
+ dis.operands_separator,
+ Rm.fmtCase(dis.case),
+ }) else try writer.print("{f}{s}{f}{s}{f}{s}{f}", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ Rd.fmtCase(dis.case),
+ dis.operands_separator,
+ Rn.fmtCase(dis.case),
+ dis.operands_separator,
+ Rm.fmtCase(dis.case),
+ });
+ return if (group.option != @as(Instruction.DataProcessingRegister.AddSubtractExtendedRegister.Option, switch (sf) {
+ .word => .uxtw,
+ .doubleword => .uxtx,
+ }) or group.imm3 != 0) writer.print("{s}{f} #{d}", .{
+ dis.operands_separator,
+ fmtCase(group.option, dis.case),
+ group.imm3,
+ });
+ },
+ .add_subtract_with_carry => |add_subtract_with_carry| {
+ const decoded = add_subtract_with_carry.decode();
+ const group = add_subtract_with_carry.group;
+ const sf = group.sf;
+ const Rm = group.Rm.decodeInteger(sf, .{});
+ const Rn = group.Rn.decodeInteger(sf, .{});
+ const Rd = group.Rd.decodeInteger(sf, .{});
+ return if (dis.enable_aliases and group.op == .sbc and Rn.alias == .zr) try writer.print("{f}{s}{f}{s}{f}", .{
+ fmtCase(@as(enum { ngc, ngcs }, switch (group.S) {
+ false => .ngc,
+ true => .ngcs,
+ }), dis.case),
+ dis.mnemonic_operands_separator,
+ Rd.fmtCase(dis.case),
+ dis.operands_separator,
+ Rm.fmtCase(dis.case),
+ }) else try writer.print("{f}{s}{f}{s}{f}{s}{f}", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ Rd.fmtCase(dis.case),
+ dis.operands_separator,
+ Rn.fmtCase(dis.case),
+ dis.operands_separator,
+ Rm.fmtCase(dis.case),
+ });
+ },
+ .rotate_right_into_flags => {},
+ .evaluate_into_flags => {},
+ .conditional_compare_register => {},
+ .conditional_compare_immediate => {},
+ .conditional_select => |conditional_select| {
+ const decoded = conditional_select.decode();
+ if (decoded == .unallocated) break :unallocated;
+ const group = conditional_select.group;
+ const sf = group.sf;
+ const Rm = group.Rm.decodeInteger(sf, .{});
+ const cond = group.cond;
+ const Rn = group.Rn.decodeInteger(sf, .{});
+ const Rd = group.Rd.decodeInteger(sf, .{});
+ return if (dis.enable_aliases and group.op != group.op2 and Rm.alias == .zr and cond != .al and cond != .nv and Rn.alias == Rm.alias) writer.print("{f}{s}{f}{s}{f}", .{
+ fmtCase(@as(enum { cset, csetm }, switch (decoded) {
+ else => unreachable,
+ .csinc => .cset,
+ .csinv => .csetm,
+ }), dis.case),
+ dis.mnemonic_operands_separator,
+ Rd.fmtCase(dis.case),
+ dis.operands_separator,
+ fmtCase(cond.invert(), dis.case),
+ }) else if (dis.enable_aliases and decoded != .csel and cond != .al and cond != .nv and Rn.alias == Rm.alias) writer.print("{f}{s}{f}{s}{f}{s}{f}", .{
+ fmtCase(@as(enum { cinc, cinv, cneg }, switch (decoded) {
+ else => unreachable,
+ .csinc => .cinc,
+ .csinv => .cinv,
+ .csneg => .cneg,
+ }), dis.case),
+ dis.mnemonic_operands_separator,
+ Rd.fmtCase(dis.case),
+ dis.operands_separator,
+ Rn.fmtCase(dis.case),
+ dis.operands_separator,
+ fmtCase(cond.invert(), dis.case),
+ }) else writer.print("{f}{s}{f}{s}{f}{s}{f}{s}{f}", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ Rd.fmtCase(dis.case),
+ dis.operands_separator,
+ Rn.fmtCase(dis.case),
+ dis.operands_separator,
+ Rm.fmtCase(dis.case),
+ dis.operands_separator,
+ fmtCase(cond, dis.case),
+ });
+ },
+ .data_processing_three_source => |data_processing_three_source| {
+ const decoded = data_processing_three_source.decode();
+ if (decoded == .unallocated) break :unallocated;
+ const group = data_processing_three_source.group;
+ const sf = group.sf;
+ try writer.print("{f}{s}{f}{s}{f}{s}{f}", .{
+ fmtCase(decoded, dis.case),
+ dis.mnemonic_operands_separator,
+ group.Rd.decodeInteger(sf, .{}).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rn.decodeInteger(sf, .{}).fmtCase(dis.case),
+ dis.operands_separator,
+ group.Rm.decodeInteger(sf, .{}).fmtCase(dis.case),
+ });
+ return switch (decoded) {
+ .unallocated => unreachable,
+ .madd, .msub, .smaddl, .smsubl, .umaddl, .umsubl => writer.print("{s}{f}", .{
+ dis.operands_separator,
+ group.Ra.decodeInteger(sf, .{}).fmtCase(dis.case),
+ }),
+ .smulh, .umulh => {},
+ };
+ },
+ },
+ .data_processing_vector => {},
+ }
+ return writer.print(".{f}{s}0x{x:0>8}", .{
+ fmtCase(.word, dis.case),
+ dis.mnemonic_operands_separator,
+ @as(Instruction.Backing, @bitCast(inst)),
+ });
+}
+
+fn fmtCase(tag: anytype, case: Case) struct {
+ tag: []const u8,
+ case: Case,
+ pub fn format(data: @This(), writer: *std.Io.Writer) std.Io.Writer.Error!void {
+ for (data.tag) |c| try writer.writeByte(switch (data.case) {
+ .lower => std.ascii.toLower(c),
+ .upper => std.ascii.toUpper(c),
+ });
+ }
+} {
+ return .{ .tag = @tagName(tag), .case = case };
+}
+
+pub const RegisterFormatter = struct {
+ reg: aarch64.encoding.Register,
+ case: Case,
+ pub fn format(data: @This(), writer: *std.Io.Writer) std.Io.Writer.Error!void {
+ switch (data.reg.format) {
+ .alias => try writer.print("{f}", .{fmtCase(data.reg.alias, data.case)}),
+ .integer => |size| switch (data.reg.alias) {
+ .r0,
+ .r1,
+ .r2,
+ .r3,
+ .r4,
+ .r5,
+ .r6,
+ .r7,
+ .r8,
+ .r9,
+ .r10,
+ .r11,
+ .r12,
+ .r13,
+ .r14,
+ .r15,
+ .r16,
+ .r17,
+ .r18,
+ .r19,
+ .r20,
+ .r21,
+ .r22,
+ .r23,
+ .r24,
+ .r25,
+ .r26,
+ .r27,
+ .r28,
+ .r29,
+ .r30,
+ => |alias| try writer.print("{c}{d}", .{
+ size.prefix(),
+ @intFromEnum(alias.encode(.{})),
+ }),
+ .zr => try writer.print("{c}{f}", .{
+ size.prefix(),
+ fmtCase(data.reg.alias, data.case),
+ }),
+ else => try writer.print("{s}{f}", .{
+ switch (size) {
+ .word => "w",
+ .doubleword => "",
+ },
+ fmtCase(data.reg.alias, data.case),
+ }),
+ },
+ .scalar => |size| try writer.print("{c}{d}", .{
+ size.prefix(),
+ @intFromEnum(data.reg.alias.encode(.{ .V = true })),
+ }),
+ .vector => |arrangement| try writer.print("{f}.{f}", .{
+ fmtCase(data.reg.alias, data.case),
+ fmtCase(arrangement, data.case),
+ }),
+ .element => |element| try writer.print("{f}.{c}[{d}]", .{
+ fmtCase(data.reg.alias, data.case),
+ element.size.prefix(),
+ element.index,
+ }),
+ }
+ }
+};
+
+const aarch64 = @import("../aarch64.zig");
+const Disassemble = @This();
+const Instruction = aarch64.encoding.Instruction;
+const std = @import("std");
src/codegen/aarch64/encoding.zig
@@ -0,0 +1,11799 @@
+/// B1.2 Registers in AArch64 Execution state
+pub const Register = struct {
+ alias: Alias,
+ format: Format,
+
+ pub const Format = union(enum) {
+ alias,
+ integer: IntegerSize,
+ scalar: VectorSize,
+ vector: Arrangement,
+ element: struct { size: VectorSize, index: u4 },
+ };
+
+ pub const IntegerSize = enum(u1) {
+ word = 0b0,
+ doubleword = 0b1,
+
+ pub fn prefix(is: IntegerSize) u8 {
+ return (comptime std.enums.EnumArray(IntegerSize, u8).init(.{
+ .word = 'w',
+ .doubleword = 'x',
+ })).get(is);
+ }
+ };
+
+ pub const VectorSize = enum(u3) {
+ byte = 0,
+ half = 1,
+ single = 2,
+ double = 3,
+ quad = 4,
+ scalable,
+ predicate,
+
+ pub fn prefix(vs: VectorSize) u8 {
+ return (comptime std.enums.EnumArray(VectorSize, u8).init(.{
+ .byte = 'b',
+ .half = 'h',
+ .single = 's',
+ .double = 'd',
+ .quad = 'q',
+ .scalable = 'z',
+ .predicate = 'p',
+ })).get(vs);
+ }
+ };
+
+ pub const Arrangement = enum {
+ @"2d",
+ @"4s",
+ @"8h",
+ @"16b",
+
+ @"1d",
+ @"2s",
+ @"4h",
+ @"8b",
+
+ pub fn len(arrangement: Arrangement) u5 {
+ return switch (arrangement) {
+ .@"1d" => 1,
+ .@"2d", .@"2s" => 2,
+ .@"4s", .@"4h" => 4,
+ .@"8h", .@"8b" => 8,
+ .@"16b" => 16,
+ };
+ }
+
+ pub fn size(arrangement: Arrangement) Instruction.DataProcessingVector.Q {
+ return switch (arrangement) {
+ .@"2d", .@"4s", .@"8h", .@"16b" => .quad,
+ .@"1d", .@"2s", .@"4h", .@"8b" => .double,
+ };
+ }
+
+ pub fn elemSize(arrangement: Arrangement) Instruction.DataProcessingVector.Size {
+ return switch (arrangement) {
+ .@"2d", .@"1d" => .double,
+ .@"4s", .@"2s" => .single,
+ .@"8h", .@"4h" => .half,
+ .@"16b", .@"8b" => .byte,
+ };
+ }
+ };
+
+ pub const x0: Register = .{ .alias = .r0, .format = .{ .integer = .doubleword } };
+ pub const x1: Register = .{ .alias = .r1, .format = .{ .integer = .doubleword } };
+ pub const x2: Register = .{ .alias = .r2, .format = .{ .integer = .doubleword } };
+ pub const x3: Register = .{ .alias = .r3, .format = .{ .integer = .doubleword } };
+ pub const x4: Register = .{ .alias = .r4, .format = .{ .integer = .doubleword } };
+ pub const x5: Register = .{ .alias = .r5, .format = .{ .integer = .doubleword } };
+ pub const x6: Register = .{ .alias = .r6, .format = .{ .integer = .doubleword } };
+ pub const x7: Register = .{ .alias = .r7, .format = .{ .integer = .doubleword } };
+ pub const x8: Register = .{ .alias = .r8, .format = .{ .integer = .doubleword } };
+ pub const x9: Register = .{ .alias = .r9, .format = .{ .integer = .doubleword } };
+ pub const x10: Register = .{ .alias = .r10, .format = .{ .integer = .doubleword } };
+ pub const x11: Register = .{ .alias = .r11, .format = .{ .integer = .doubleword } };
+ pub const x12: Register = .{ .alias = .r12, .format = .{ .integer = .doubleword } };
+ pub const x13: Register = .{ .alias = .r13, .format = .{ .integer = .doubleword } };
+ pub const x14: Register = .{ .alias = .r14, .format = .{ .integer = .doubleword } };
+ pub const x15: Register = .{ .alias = .r15, .format = .{ .integer = .doubleword } };
+ pub const x16: Register = .{ .alias = .r16, .format = .{ .integer = .doubleword } };
+ pub const x17: Register = .{ .alias = .r17, .format = .{ .integer = .doubleword } };
+ pub const x18: Register = .{ .alias = .r18, .format = .{ .integer = .doubleword } };
+ pub const x19: Register = .{ .alias = .r19, .format = .{ .integer = .doubleword } };
+ pub const x20: Register = .{ .alias = .r20, .format = .{ .integer = .doubleword } };
+ pub const x21: Register = .{ .alias = .r21, .format = .{ .integer = .doubleword } };
+ pub const x22: Register = .{ .alias = .r22, .format = .{ .integer = .doubleword } };
+ pub const x23: Register = .{ .alias = .r23, .format = .{ .integer = .doubleword } };
+ pub const x24: Register = .{ .alias = .r24, .format = .{ .integer = .doubleword } };
+ pub const x25: Register = .{ .alias = .r25, .format = .{ .integer = .doubleword } };
+ pub const x26: Register = .{ .alias = .r26, .format = .{ .integer = .doubleword } };
+ pub const x27: Register = .{ .alias = .r27, .format = .{ .integer = .doubleword } };
+ pub const x28: Register = .{ .alias = .r28, .format = .{ .integer = .doubleword } };
+ pub const x29: Register = .{ .alias = .r29, .format = .{ .integer = .doubleword } };
+ pub const x30: Register = .{ .alias = .r30, .format = .{ .integer = .doubleword } };
+ pub const xzr: Register = .{ .alias = .zr, .format = .{ .integer = .doubleword } };
+ pub const sp: Register = .{ .alias = .sp, .format = .{ .integer = .doubleword } };
+
+ pub const w0: Register = .{ .alias = .r0, .format = .{ .integer = .word } };
+ pub const w1: Register = .{ .alias = .r1, .format = .{ .integer = .word } };
+ pub const w2: Register = .{ .alias = .r2, .format = .{ .integer = .word } };
+ pub const w3: Register = .{ .alias = .r3, .format = .{ .integer = .word } };
+ pub const w4: Register = .{ .alias = .r4, .format = .{ .integer = .word } };
+ pub const w5: Register = .{ .alias = .r5, .format = .{ .integer = .word } };
+ pub const w6: Register = .{ .alias = .r6, .format = .{ .integer = .word } };
+ pub const w7: Register = .{ .alias = .r7, .format = .{ .integer = .word } };
+ pub const w8: Register = .{ .alias = .r8, .format = .{ .integer = .word } };
+ pub const w9: Register = .{ .alias = .r9, .format = .{ .integer = .word } };
+ pub const w10: Register = .{ .alias = .r10, .format = .{ .integer = .word } };
+ pub const w11: Register = .{ .alias = .r11, .format = .{ .integer = .word } };
+ pub const w12: Register = .{ .alias = .r12, .format = .{ .integer = .word } };
+ pub const w13: Register = .{ .alias = .r13, .format = .{ .integer = .word } };
+ pub const w14: Register = .{ .alias = .r14, .format = .{ .integer = .word } };
+ pub const w15: Register = .{ .alias = .r15, .format = .{ .integer = .word } };
+ pub const w16: Register = .{ .alias = .r16, .format = .{ .integer = .word } };
+ pub const w17: Register = .{ .alias = .r17, .format = .{ .integer = .word } };
+ pub const w18: Register = .{ .alias = .r18, .format = .{ .integer = .word } };
+ pub const w19: Register = .{ .alias = .r19, .format = .{ .integer = .word } };
+ pub const w20: Register = .{ .alias = .r20, .format = .{ .integer = .word } };
+ pub const w21: Register = .{ .alias = .r21, .format = .{ .integer = .word } };
+ pub const w22: Register = .{ .alias = .r22, .format = .{ .integer = .word } };
+ pub const w23: Register = .{ .alias = .r23, .format = .{ .integer = .word } };
+ pub const w24: Register = .{ .alias = .r24, .format = .{ .integer = .word } };
+ pub const w25: Register = .{ .alias = .r25, .format = .{ .integer = .word } };
+ pub const w26: Register = .{ .alias = .r26, .format = .{ .integer = .word } };
+ pub const w27: Register = .{ .alias = .r27, .format = .{ .integer = .word } };
+ pub const w28: Register = .{ .alias = .r28, .format = .{ .integer = .word } };
+ pub const w29: Register = .{ .alias = .r29, .format = .{ .integer = .word } };
+ pub const w30: Register = .{ .alias = .r30, .format = .{ .integer = .word } };
+ pub const wzr: Register = .{ .alias = .zr, .format = .{ .integer = .word } };
+ pub const wsp: Register = .{ .alias = .sp, .format = .{ .integer = .word } };
+
+ pub const ip0 = x16;
+ pub const ip1 = x17;
+ pub const fp = x29;
+ pub const lr = x30;
+ pub const pc: Register = .{ .alias = .pc, .format = .{ .integer = .doubleword } };
+
+ pub const q0: Register = .{ .alias = .v0, .format = .{ .scalar = .quad } };
+ pub const q1: Register = .{ .alias = .v1, .format = .{ .scalar = .quad } };
+ pub const q2: Register = .{ .alias = .v2, .format = .{ .scalar = .quad } };
+ pub const q3: Register = .{ .alias = .v3, .format = .{ .scalar = .quad } };
+ pub const q4: Register = .{ .alias = .v4, .format = .{ .scalar = .quad } };
+ pub const q5: Register = .{ .alias = .v5, .format = .{ .scalar = .quad } };
+ pub const q6: Register = .{ .alias = .v6, .format = .{ .scalar = .quad } };
+ pub const q7: Register = .{ .alias = .v7, .format = .{ .scalar = .quad } };
+ pub const q8: Register = .{ .alias = .v8, .format = .{ .scalar = .quad } };
+ pub const q9: Register = .{ .alias = .v9, .format = .{ .scalar = .quad } };
+ pub const q10: Register = .{ .alias = .v10, .format = .{ .scalar = .quad } };
+ pub const q11: Register = .{ .alias = .v11, .format = .{ .scalar = .quad } };
+ pub const q12: Register = .{ .alias = .v12, .format = .{ .scalar = .quad } };
+ pub const q13: Register = .{ .alias = .v13, .format = .{ .scalar = .quad } };
+ pub const q14: Register = .{ .alias = .v14, .format = .{ .scalar = .quad } };
+ pub const q15: Register = .{ .alias = .v15, .format = .{ .scalar = .quad } };
+ pub const q16: Register = .{ .alias = .v16, .format = .{ .scalar = .quad } };
+ pub const q17: Register = .{ .alias = .v17, .format = .{ .scalar = .quad } };
+ pub const q18: Register = .{ .alias = .v18, .format = .{ .scalar = .quad } };
+ pub const q19: Register = .{ .alias = .v19, .format = .{ .scalar = .quad } };
+ pub const q20: Register = .{ .alias = .v20, .format = .{ .scalar = .quad } };
+ pub const q21: Register = .{ .alias = .v21, .format = .{ .scalar = .quad } };
+ pub const q22: Register = .{ .alias = .v22, .format = .{ .scalar = .quad } };
+ pub const q23: Register = .{ .alias = .v23, .format = .{ .scalar = .quad } };
+ pub const q24: Register = .{ .alias = .v24, .format = .{ .scalar = .quad } };
+ pub const q25: Register = .{ .alias = .v25, .format = .{ .scalar = .quad } };
+ pub const q26: Register = .{ .alias = .v26, .format = .{ .scalar = .quad } };
+ pub const q27: Register = .{ .alias = .v27, .format = .{ .scalar = .quad } };
+ pub const q28: Register = .{ .alias = .v28, .format = .{ .scalar = .quad } };
+ pub const q29: Register = .{ .alias = .v29, .format = .{ .scalar = .quad } };
+ pub const q30: Register = .{ .alias = .v30, .format = .{ .scalar = .quad } };
+ pub const q31: Register = .{ .alias = .v31, .format = .{ .scalar = .quad } };
+
+ pub const d0: Register = .{ .alias = .v0, .format = .{ .scalar = .double } };
+ pub const d1: Register = .{ .alias = .v1, .format = .{ .scalar = .double } };
+ pub const d2: Register = .{ .alias = .v2, .format = .{ .scalar = .double } };
+ pub const d3: Register = .{ .alias = .v3, .format = .{ .scalar = .double } };
+ pub const d4: Register = .{ .alias = .v4, .format = .{ .scalar = .double } };
+ pub const d5: Register = .{ .alias = .v5, .format = .{ .scalar = .double } };
+ pub const d6: Register = .{ .alias = .v6, .format = .{ .scalar = .double } };
+ pub const d7: Register = .{ .alias = .v7, .format = .{ .scalar = .double } };
+ pub const d8: Register = .{ .alias = .v8, .format = .{ .scalar = .double } };
+ pub const d9: Register = .{ .alias = .v9, .format = .{ .scalar = .double } };
+ pub const d10: Register = .{ .alias = .v10, .format = .{ .scalar = .double } };
+ pub const d11: Register = .{ .alias = .v11, .format = .{ .scalar = .double } };
+ pub const d12: Register = .{ .alias = .v12, .format = .{ .scalar = .double } };
+ pub const d13: Register = .{ .alias = .v13, .format = .{ .scalar = .double } };
+ pub const d14: Register = .{ .alias = .v14, .format = .{ .scalar = .double } };
+ pub const d15: Register = .{ .alias = .v15, .format = .{ .scalar = .double } };
+ pub const d16: Register = .{ .alias = .v16, .format = .{ .scalar = .double } };
+ pub const d17: Register = .{ .alias = .v17, .format = .{ .scalar = .double } };
+ pub const d18: Register = .{ .alias = .v18, .format = .{ .scalar = .double } };
+ pub const d19: Register = .{ .alias = .v19, .format = .{ .scalar = .double } };
+ pub const d20: Register = .{ .alias = .v20, .format = .{ .scalar = .double } };
+ pub const d21: Register = .{ .alias = .v21, .format = .{ .scalar = .double } };
+ pub const d22: Register = .{ .alias = .v22, .format = .{ .scalar = .double } };
+ pub const d23: Register = .{ .alias = .v23, .format = .{ .scalar = .double } };
+ pub const d24: Register = .{ .alias = .v24, .format = .{ .scalar = .double } };
+ pub const d25: Register = .{ .alias = .v25, .format = .{ .scalar = .double } };
+ pub const d26: Register = .{ .alias = .v26, .format = .{ .scalar = .double } };
+ pub const d27: Register = .{ .alias = .v27, .format = .{ .scalar = .double } };
+ pub const d28: Register = .{ .alias = .v28, .format = .{ .scalar = .double } };
+ pub const d29: Register = .{ .alias = .v29, .format = .{ .scalar = .double } };
+ pub const d30: Register = .{ .alias = .v30, .format = .{ .scalar = .double } };
+ pub const d31: Register = .{ .alias = .v31, .format = .{ .scalar = .double } };
+
+ pub const s0: Register = .{ .alias = .v0, .format = .{ .scalar = .single } };
+ pub const s1: Register = .{ .alias = .v1, .format = .{ .scalar = .single } };
+ pub const s2: Register = .{ .alias = .v2, .format = .{ .scalar = .single } };
+ pub const s3: Register = .{ .alias = .v3, .format = .{ .scalar = .single } };
+ pub const s4: Register = .{ .alias = .v4, .format = .{ .scalar = .single } };
+ pub const s5: Register = .{ .alias = .v5, .format = .{ .scalar = .single } };
+ pub const s6: Register = .{ .alias = .v6, .format = .{ .scalar = .single } };
+ pub const s7: Register = .{ .alias = .v7, .format = .{ .scalar = .single } };
+ pub const s8: Register = .{ .alias = .v8, .format = .{ .scalar = .single } };
+ pub const s9: Register = .{ .alias = .v9, .format = .{ .scalar = .single } };
+ pub const s10: Register = .{ .alias = .v10, .format = .{ .scalar = .single } };
+ pub const s11: Register = .{ .alias = .v11, .format = .{ .scalar = .single } };
+ pub const s12: Register = .{ .alias = .v12, .format = .{ .scalar = .single } };
+ pub const s13: Register = .{ .alias = .v13, .format = .{ .scalar = .single } };
+ pub const s14: Register = .{ .alias = .v14, .format = .{ .scalar = .single } };
+ pub const s15: Register = .{ .alias = .v15, .format = .{ .scalar = .single } };
+ pub const s16: Register = .{ .alias = .v16, .format = .{ .scalar = .single } };
+ pub const s17: Register = .{ .alias = .v17, .format = .{ .scalar = .single } };
+ pub const s18: Register = .{ .alias = .v18, .format = .{ .scalar = .single } };
+ pub const s19: Register = .{ .alias = .v19, .format = .{ .scalar = .single } };
+ pub const s20: Register = .{ .alias = .v20, .format = .{ .scalar = .single } };
+ pub const s21: Register = .{ .alias = .v21, .format = .{ .scalar = .single } };
+ pub const s22: Register = .{ .alias = .v22, .format = .{ .scalar = .single } };
+ pub const s23: Register = .{ .alias = .v23, .format = .{ .scalar = .single } };
+ pub const s24: Register = .{ .alias = .v24, .format = .{ .scalar = .single } };
+ pub const s25: Register = .{ .alias = .v25, .format = .{ .scalar = .single } };
+ pub const s26: Register = .{ .alias = .v26, .format = .{ .scalar = .single } };
+ pub const s27: Register = .{ .alias = .v27, .format = .{ .scalar = .single } };
+ pub const s28: Register = .{ .alias = .v28, .format = .{ .scalar = .single } };
+ pub const s29: Register = .{ .alias = .v29, .format = .{ .scalar = .single } };
+ pub const s30: Register = .{ .alias = .v30, .format = .{ .scalar = .single } };
+ pub const s31: Register = .{ .alias = .v31, .format = .{ .scalar = .single } };
+
+ pub const h0: Register = .{ .alias = .v0, .format = .{ .scalar = .half } };
+ pub const h1: Register = .{ .alias = .v1, .format = .{ .scalar = .half } };
+ pub const h2: Register = .{ .alias = .v2, .format = .{ .scalar = .half } };
+ pub const h3: Register = .{ .alias = .v3, .format = .{ .scalar = .half } };
+ pub const h4: Register = .{ .alias = .v4, .format = .{ .scalar = .half } };
+ pub const h5: Register = .{ .alias = .v5, .format = .{ .scalar = .half } };
+ pub const h6: Register = .{ .alias = .v6, .format = .{ .scalar = .half } };
+ pub const h7: Register = .{ .alias = .v7, .format = .{ .scalar = .half } };
+ pub const h8: Register = .{ .alias = .v8, .format = .{ .scalar = .half } };
+ pub const h9: Register = .{ .alias = .v9, .format = .{ .scalar = .half } };
+ pub const h10: Register = .{ .alias = .v10, .format = .{ .scalar = .half } };
+ pub const h11: Register = .{ .alias = .v11, .format = .{ .scalar = .half } };
+ pub const h12: Register = .{ .alias = .v12, .format = .{ .scalar = .half } };
+ pub const h13: Register = .{ .alias = .v13, .format = .{ .scalar = .half } };
+ pub const h14: Register = .{ .alias = .v14, .format = .{ .scalar = .half } };
+ pub const h15: Register = .{ .alias = .v15, .format = .{ .scalar = .half } };
+ pub const h16: Register = .{ .alias = .v16, .format = .{ .scalar = .half } };
+ pub const h17: Register = .{ .alias = .v17, .format = .{ .scalar = .half } };
+ pub const h18: Register = .{ .alias = .v18, .format = .{ .scalar = .half } };
+ pub const h19: Register = .{ .alias = .v19, .format = .{ .scalar = .half } };
+ pub const h20: Register = .{ .alias = .v20, .format = .{ .scalar = .half } };
+ pub const h21: Register = .{ .alias = .v21, .format = .{ .scalar = .half } };
+ pub const h22: Register = .{ .alias = .v22, .format = .{ .scalar = .half } };
+ pub const h23: Register = .{ .alias = .v23, .format = .{ .scalar = .half } };
+ pub const h24: Register = .{ .alias = .v24, .format = .{ .scalar = .half } };
+ pub const h25: Register = .{ .alias = .v25, .format = .{ .scalar = .half } };
+ pub const h26: Register = .{ .alias = .v26, .format = .{ .scalar = .half } };
+ pub const h27: Register = .{ .alias = .v27, .format = .{ .scalar = .half } };
+ pub const h28: Register = .{ .alias = .v28, .format = .{ .scalar = .half } };
+ pub const h29: Register = .{ .alias = .v29, .format = .{ .scalar = .half } };
+ pub const h30: Register = .{ .alias = .v30, .format = .{ .scalar = .half } };
+ pub const h31: Register = .{ .alias = .v31, .format = .{ .scalar = .half } };
+
+ pub const b0: Register = .{ .alias = .v0, .format = .{ .scalar = .byte } };
+ pub const b1: Register = .{ .alias = .v1, .format = .{ .scalar = .byte } };
+ pub const b2: Register = .{ .alias = .v2, .format = .{ .scalar = .byte } };
+ pub const b3: Register = .{ .alias = .v3, .format = .{ .scalar = .byte } };
+ pub const b4: Register = .{ .alias = .v4, .format = .{ .scalar = .byte } };
+ pub const b5: Register = .{ .alias = .v5, .format = .{ .scalar = .byte } };
+ pub const b6: Register = .{ .alias = .v6, .format = .{ .scalar = .byte } };
+ pub const b7: Register = .{ .alias = .v7, .format = .{ .scalar = .byte } };
+ pub const b8: Register = .{ .alias = .v8, .format = .{ .scalar = .byte } };
+ pub const b9: Register = .{ .alias = .v9, .format = .{ .scalar = .byte } };
+ pub const b10: Register = .{ .alias = .v10, .format = .{ .scalar = .byte } };
+ pub const b11: Register = .{ .alias = .v11, .format = .{ .scalar = .byte } };
+ pub const b12: Register = .{ .alias = .v12, .format = .{ .scalar = .byte } };
+ pub const b13: Register = .{ .alias = .v13, .format = .{ .scalar = .byte } };
+ pub const b14: Register = .{ .alias = .v14, .format = .{ .scalar = .byte } };
+ pub const b15: Register = .{ .alias = .v15, .format = .{ .scalar = .byte } };
+ pub const b16: Register = .{ .alias = .v16, .format = .{ .scalar = .byte } };
+ pub const b17: Register = .{ .alias = .v17, .format = .{ .scalar = .byte } };
+ pub const b18: Register = .{ .alias = .v18, .format = .{ .scalar = .byte } };
+ pub const b19: Register = .{ .alias = .v19, .format = .{ .scalar = .byte } };
+ pub const b20: Register = .{ .alias = .v20, .format = .{ .scalar = .byte } };
+ pub const b21: Register = .{ .alias = .v21, .format = .{ .scalar = .byte } };
+ pub const b22: Register = .{ .alias = .v22, .format = .{ .scalar = .byte } };
+ pub const b23: Register = .{ .alias = .v23, .format = .{ .scalar = .byte } };
+ pub const b24: Register = .{ .alias = .v24, .format = .{ .scalar = .byte } };
+ pub const b25: Register = .{ .alias = .v25, .format = .{ .scalar = .byte } };
+ pub const b26: Register = .{ .alias = .v26, .format = .{ .scalar = .byte } };
+ pub const b27: Register = .{ .alias = .v27, .format = .{ .scalar = .byte } };
+ pub const b28: Register = .{ .alias = .v28, .format = .{ .scalar = .byte } };
+ pub const b29: Register = .{ .alias = .v29, .format = .{ .scalar = .byte } };
+ pub const b30: Register = .{ .alias = .v30, .format = .{ .scalar = .byte } };
+ pub const b31: Register = .{ .alias = .v31, .format = .{ .scalar = .byte } };
+
+ pub const fpcr: Register = .{ .alias = .fpcr, .format = .{ .integer = .doubleword } };
+ pub const fpsr: Register = .{ .alias = .fpsr, .format = .{ .integer = .doubleword } };
+
+ pub const z0: Register = .{ .alias = .v0, .format = .{ .scalar = .scalable } };
+ pub const z1: Register = .{ .alias = .v1, .format = .{ .scalar = .scalable } };
+ pub const z2: Register = .{ .alias = .v2, .format = .{ .scalar = .scalable } };
+ pub const z3: Register = .{ .alias = .v3, .format = .{ .scalar = .scalable } };
+ pub const z4: Register = .{ .alias = .v4, .format = .{ .scalar = .scalable } };
+ pub const z5: Register = .{ .alias = .v5, .format = .{ .scalar = .scalable } };
+ pub const z6: Register = .{ .alias = .v6, .format = .{ .scalar = .scalable } };
+ pub const z7: Register = .{ .alias = .v7, .format = .{ .scalar = .scalable } };
+ pub const z8: Register = .{ .alias = .v8, .format = .{ .scalar = .scalable } };
+ pub const z9: Register = .{ .alias = .v9, .format = .{ .scalar = .scalable } };
+ pub const z10: Register = .{ .alias = .v10, .format = .{ .scalar = .scalable } };
+ pub const z11: Register = .{ .alias = .v11, .format = .{ .scalar = .scalable } };
+ pub const z12: Register = .{ .alias = .v12, .format = .{ .scalar = .scalable } };
+ pub const z13: Register = .{ .alias = .v13, .format = .{ .scalar = .scalable } };
+ pub const z14: Register = .{ .alias = .v14, .format = .{ .scalar = .scalable } };
+ pub const z15: Register = .{ .alias = .v15, .format = .{ .scalar = .scalable } };
+ pub const z16: Register = .{ .alias = .v16, .format = .{ .scalar = .scalable } };
+ pub const z17: Register = .{ .alias = .v17, .format = .{ .scalar = .scalable } };
+ pub const z18: Register = .{ .alias = .v18, .format = .{ .scalar = .scalable } };
+ pub const z19: Register = .{ .alias = .v19, .format = .{ .scalar = .scalable } };
+ pub const z20: Register = .{ .alias = .v20, .format = .{ .scalar = .scalable } };
+ pub const z21: Register = .{ .alias = .v21, .format = .{ .scalar = .scalable } };
+ pub const z22: Register = .{ .alias = .v22, .format = .{ .scalar = .scalable } };
+ pub const z23: Register = .{ .alias = .v23, .format = .{ .scalar = .scalable } };
+ pub const z24: Register = .{ .alias = .v24, .format = .{ .scalar = .scalable } };
+ pub const z25: Register = .{ .alias = .v25, .format = .{ .scalar = .scalable } };
+ pub const z26: Register = .{ .alias = .v26, .format = .{ .scalar = .scalable } };
+ pub const z27: Register = .{ .alias = .v27, .format = .{ .scalar = .scalable } };
+ pub const z28: Register = .{ .alias = .v28, .format = .{ .scalar = .scalable } };
+ pub const z29: Register = .{ .alias = .v29, .format = .{ .scalar = .scalable } };
+ pub const z30: Register = .{ .alias = .v30, .format = .{ .scalar = .scalable } };
+ pub const z31: Register = .{ .alias = .v31, .format = .{ .scalar = .scalable } };
+
+ pub const p0: Register = .{ .alias = .v0, .format = .{ .scalar = .predicate } };
+ pub const p1: Register = .{ .alias = .v1, .format = .{ .scalar = .predicate } };
+ pub const p2: Register = .{ .alias = .v2, .format = .{ .scalar = .predicate } };
+ pub const p3: Register = .{ .alias = .v3, .format = .{ .scalar = .predicate } };
+ pub const p4: Register = .{ .alias = .v4, .format = .{ .scalar = .predicate } };
+ pub const p5: Register = .{ .alias = .v5, .format = .{ .scalar = .predicate } };
+ pub const p6: Register = .{ .alias = .v6, .format = .{ .scalar = .predicate } };
+ pub const p7: Register = .{ .alias = .v7, .format = .{ .scalar = .predicate } };
+ pub const p8: Register = .{ .alias = .v8, .format = .{ .scalar = .predicate } };
+ pub const p9: Register = .{ .alias = .v9, .format = .{ .scalar = .predicate } };
+ pub const p10: Register = .{ .alias = .v10, .format = .{ .scalar = .predicate } };
+ pub const p11: Register = .{ .alias = .v11, .format = .{ .scalar = .predicate } };
+ pub const p12: Register = .{ .alias = .v12, .format = .{ .scalar = .predicate } };
+ pub const p13: Register = .{ .alias = .v13, .format = .{ .scalar = .predicate } };
+ pub const p14: Register = .{ .alias = .v14, .format = .{ .scalar = .predicate } };
+ pub const p15: Register = .{ .alias = .v15, .format = .{ .scalar = .predicate } };
+
+ pub const ffr: Register = .{ .alias = .ffr, .format = .{ .integer = .doubleword } };
+
+ pub const Encoded = enum(u5) {
+ _,
+
+ pub fn decodeInteger(enc: Encoded, sf_enc: IntegerSize, opts: struct { sp: bool = false }) Register {
+ return switch (sf_enc) {
+ .word => switch (@intFromEnum(enc)) {
+ 0 => .w0,
+ 1 => .w1,
+ 2 => .w2,
+ 3 => .w3,
+ 4 => .w4,
+ 5 => .w5,
+ 6 => .w6,
+ 7 => .w7,
+ 8 => .w8,
+ 9 => .w9,
+ 10 => .w10,
+ 11 => .w11,
+ 12 => .w12,
+ 13 => .w13,
+ 14 => .w14,
+ 15 => .w15,
+ 16 => .w16,
+ 17 => .w17,
+ 18 => .w18,
+ 19 => .w19,
+ 20 => .w20,
+ 21 => .w21,
+ 22 => .w22,
+ 23 => .w23,
+ 24 => .w24,
+ 25 => .w25,
+ 26 => .w26,
+ 27 => .w27,
+ 28 => .w28,
+ 29 => .w29,
+ 30 => .w30,
+ 31 => if (opts.sp) .wsp else .wzr,
+ },
+ .doubleword => switch (@intFromEnum(enc)) {
+ 0 => .x0,
+ 1 => .x1,
+ 2 => .x2,
+ 3 => .x3,
+ 4 => .x4,
+ 5 => .x5,
+ 6 => .x6,
+ 7 => .x7,
+ 8 => .x8,
+ 9 => .x9,
+ 10 => .x10,
+ 11 => .x11,
+ 12 => .x12,
+ 13 => .x13,
+ 14 => .x14,
+ 15 => .x15,
+ 16 => .x16,
+ 17 => .x17,
+ 18 => .x18,
+ 19 => .x19,
+ 20 => .x20,
+ 21 => .x21,
+ 22 => .x22,
+ 23 => .x23,
+ 24 => .x24,
+ 25 => .x25,
+ 26 => .x26,
+ 27 => .x27,
+ 28 => .x28,
+ 29 => .x29,
+ 30 => .x30,
+ 31 => if (opts.sp) .sp else .xzr,
+ },
+ };
+ }
+
+ pub fn decodeVector(enc: Encoded, vs_enc: VectorSize) Register {
+ return switch (vs_enc) {
+ .byte => switch (@intFromEnum(enc)) {
+ 0 => .b0,
+ 1 => .b1,
+ 2 => .b2,
+ 3 => .b3,
+ 4 => .b4,
+ 5 => .b5,
+ 6 => .b6,
+ 7 => .b7,
+ 8 => .b8,
+ 9 => .b9,
+ 10 => .b10,
+ 11 => .b11,
+ 12 => .b12,
+ 13 => .b13,
+ 14 => .b14,
+ 15 => .b15,
+ 16 => .b16,
+ 17 => .b17,
+ 18 => .b18,
+ 19 => .b19,
+ 20 => .b20,
+ 21 => .b21,
+ 22 => .b22,
+ 23 => .b23,
+ 24 => .b24,
+ 25 => .b25,
+ 26 => .b26,
+ 27 => .b27,
+ 28 => .b28,
+ 29 => .b29,
+ 30 => .b30,
+ 31 => .b31,
+ },
+ .half => switch (@intFromEnum(enc)) {
+ 0 => .h0,
+ 1 => .h1,
+ 2 => .h2,
+ 3 => .h3,
+ 4 => .h4,
+ 5 => .h5,
+ 6 => .h6,
+ 7 => .h7,
+ 8 => .h8,
+ 9 => .h9,
+ 10 => .h10,
+ 11 => .h11,
+ 12 => .h12,
+ 13 => .h13,
+ 14 => .h14,
+ 15 => .h15,
+ 16 => .h16,
+ 17 => .h17,
+ 18 => .h18,
+ 19 => .h19,
+ 20 => .h20,
+ 21 => .h21,
+ 22 => .h22,
+ 23 => .h23,
+ 24 => .h24,
+ 25 => .h25,
+ 26 => .h26,
+ 27 => .h27,
+ 28 => .h28,
+ 29 => .h29,
+ 30 => .h30,
+ 31 => .h31,
+ },
+ .single => switch (@intFromEnum(enc)) {
+ 0 => .s0,
+ 1 => .s1,
+ 2 => .s2,
+ 3 => .s3,
+ 4 => .s4,
+ 5 => .s5,
+ 6 => .s6,
+ 7 => .s7,
+ 8 => .s8,
+ 9 => .s9,
+ 10 => .s10,
+ 11 => .s11,
+ 12 => .s12,
+ 13 => .s13,
+ 14 => .s14,
+ 15 => .s15,
+ 16 => .s16,
+ 17 => .s17,
+ 18 => .s18,
+ 19 => .s19,
+ 20 => .s20,
+ 21 => .s21,
+ 22 => .s22,
+ 23 => .s23,
+ 24 => .s24,
+ 25 => .s25,
+ 26 => .s26,
+ 27 => .s27,
+ 28 => .s28,
+ 29 => .s29,
+ 30 => .s30,
+ 31 => .s31,
+ },
+ .double => switch (@intFromEnum(enc)) {
+ 0 => .d0,
+ 1 => .d1,
+ 2 => .d2,
+ 3 => .d3,
+ 4 => .d4,
+ 5 => .d5,
+ 6 => .d6,
+ 7 => .d7,
+ 8 => .d8,
+ 9 => .d9,
+ 10 => .d10,
+ 11 => .d11,
+ 12 => .d12,
+ 13 => .d13,
+ 14 => .d14,
+ 15 => .d15,
+ 16 => .d16,
+ 17 => .d17,
+ 18 => .d18,
+ 19 => .d19,
+ 20 => .d20,
+ 21 => .d21,
+ 22 => .d22,
+ 23 => .d23,
+ 24 => .d24,
+ 25 => .d25,
+ 26 => .d26,
+ 27 => .d27,
+ 28 => .d28,
+ 29 => .d29,
+ 30 => .d30,
+ 31 => .d31,
+ },
+ .quad => switch (@intFromEnum(enc)) {
+ 0 => .q0,
+ 1 => .q1,
+ 2 => .q2,
+ 3 => .q3,
+ 4 => .q4,
+ 5 => .q5,
+ 6 => .q6,
+ 7 => .q7,
+ 8 => .q8,
+ 9 => .q9,
+ 10 => .q10,
+ 11 => .q11,
+ 12 => .q12,
+ 13 => .q13,
+ 14 => .q14,
+ 15 => .q15,
+ 16 => .q16,
+ 17 => .q17,
+ 18 => .q18,
+ 19 => .q19,
+ 20 => .q20,
+ 21 => .q21,
+ 22 => .q22,
+ 23 => .q23,
+ 24 => .q24,
+ 25 => .q25,
+ 26 => .q26,
+ 27 => .q27,
+ 28 => .q28,
+ 29 => .q29,
+ 30 => .q30,
+ 31 => .q31,
+ },
+ .scalable => switch (@intFromEnum(enc)) {
+ 0 => .z0,
+ 1 => .z1,
+ 2 => .z2,
+ 3 => .z3,
+ 4 => .z4,
+ 5 => .z5,
+ 6 => .z6,
+ 7 => .z7,
+ 8 => .z8,
+ 9 => .z9,
+ 10 => .z10,
+ 11 => .z11,
+ 12 => .z12,
+ 13 => .z13,
+ 14 => .z14,
+ 15 => .z15,
+ 16 => .z16,
+ 17 => .z17,
+ 18 => .z18,
+ 19 => .z19,
+ 20 => .z20,
+ 21 => .z21,
+ 22 => .z22,
+ 23 => .z23,
+ 24 => .z24,
+ 25 => .z25,
+ 26 => .z26,
+ 27 => .z27,
+ 28 => .z28,
+ 29 => .z29,
+ 30 => .z30,
+ 31 => .z31,
+ },
+ .predicate => switch (@as(u4, @intCast(@intFromEnum(enc)))) {
+ 0 => .p0,
+ 1 => .p1,
+ 2 => .p2,
+ 3 => .p3,
+ 4 => .p4,
+ 5 => .p5,
+ 6 => .p6,
+ 7 => .p7,
+ 8 => .p8,
+ 9 => .p9,
+ 10 => .p10,
+ 11 => .p11,
+ 12 => .p12,
+ 13 => .p13,
+ 14 => .p14,
+ 15 => .p15,
+ },
+ };
+ }
+ };
+
+ /// One tag per set of aliasing registers.
+ pub const Alias = enum(u7) {
+ r0,
+ r1,
+ r2,
+ r3,
+ r4,
+ r5,
+ r6,
+ r7,
+ r8,
+ r9,
+ r10,
+ r11,
+ r12,
+ r13,
+ r14,
+ r15,
+ r16,
+ r17,
+ r18,
+ r19,
+ r20,
+ r21,
+ r22,
+ r23,
+ r24,
+ r25,
+ r26,
+ r27,
+ r28,
+ r29,
+ r30,
+ zr,
+ sp,
+
+ pc,
+
+ v0,
+ v1,
+ v2,
+ v3,
+ v4,
+ v5,
+ v6,
+ v7,
+ v8,
+ v9,
+ v10,
+ v11,
+ v12,
+ v13,
+ v14,
+ v15,
+ v16,
+ v17,
+ v18,
+ v19,
+ v20,
+ v21,
+ v22,
+ v23,
+ v24,
+ v25,
+ v26,
+ v27,
+ v28,
+ v29,
+ v30,
+ v31,
+
+ fpcr,
+ fpsr,
+
+ p0,
+ p1,
+ p2,
+ p3,
+ p4,
+ p5,
+ p6,
+ p7,
+ p8,
+ p9,
+ p10,
+ p11,
+ p12,
+ p13,
+ p14,
+ p15,
+
+ ffr,
+
+ pub const ip0: Alias = .r16;
+ pub const ip1: Alias = .r17;
+ pub const fp: Alias = .r29;
+ pub const lr: Alias = .r30;
+
+ pub fn r(ra: Alias) Register {
+ assert(@intFromEnum(ra) >= @intFromEnum(Alias.r0) and @intFromEnum(ra) <= @intFromEnum(Alias.pc));
+ return .{ .alias = ra, .format = .alias };
+ }
+ pub fn x(ra: Alias) Register {
+ assert(@intFromEnum(ra) >= @intFromEnum(Alias.r0) and @intFromEnum(ra) <= @intFromEnum(Alias.sp));
+ return .{ .alias = ra, .format = .{ .integer = .doubleword } };
+ }
+ pub fn w(ra: Alias) Register {
+ assert(@intFromEnum(ra) >= @intFromEnum(Alias.r0) and @intFromEnum(ra) <= @intFromEnum(Alias.sp));
+ return .{ .alias = ra, .format = .{ .integer = .word } };
+ }
+ pub fn v(ra: Alias) Register {
+ assert(@intFromEnum(ra) >= @intFromEnum(Alias.v0) and @intFromEnum(ra) <= @intFromEnum(Alias.v31));
+ return .{ .alias = ra, .format = .alias };
+ }
+ pub fn q(ra: Alias) Register {
+ assert(@intFromEnum(ra) >= @intFromEnum(Alias.v0) and @intFromEnum(ra) <= @intFromEnum(Alias.v31));
+ return .{ .alias = ra, .format = .{ .scalar = .quad } };
+ }
+ pub fn d(ra: Alias) Register {
+ assert(@intFromEnum(ra) >= @intFromEnum(Alias.v0) and @intFromEnum(ra) <= @intFromEnum(Alias.v31));
+ return .{ .alias = ra, .format = .{ .scalar = .double } };
+ }
+ pub fn s(ra: Alias) Register {
+ assert(@intFromEnum(ra) >= @intFromEnum(Alias.v0) and @intFromEnum(ra) <= @intFromEnum(Alias.v31));
+ return .{ .alias = ra, .format = .{ .scalar = .single } };
+ }
+ pub fn h(ra: Alias) Register {
+ assert(@intFromEnum(ra) >= @intFromEnum(Alias.v0) and @intFromEnum(ra) <= @intFromEnum(Alias.v31));
+ return .{ .alias = ra, .format = .{ .scalar = .half } };
+ }
+ pub fn b(ra: Alias) Register {
+ assert(@intFromEnum(ra) >= @intFromEnum(Alias.v0) and @intFromEnum(ra) <= @intFromEnum(Alias.v31));
+ return .{ .alias = ra, .format = .{ .scalar = .byte } };
+ }
+ pub fn z(ra: Alias) Register {
+ assert(@intFromEnum(ra) >= @intFromEnum(Alias.v0) and @intFromEnum(ra) <= @intFromEnum(Alias.v31));
+ return .{ .alias = ra, .format = .{ .scalar = .scalable } };
+ }
+ pub fn p(ra: Alias) Register {
+ assert(@intFromEnum(ra) >= @intFromEnum(Alias.p0) and @intFromEnum(ra) <= @intFromEnum(Alias.p15));
+ return .{ .alias = ra, .format = .{ .scalar = .predicate } };
+ }
+ pub fn @"2d"(ra: Alias) Register {
+ assert(@intFromEnum(ra) >= @intFromEnum(Alias.v0) and @intFromEnum(ra) <= @intFromEnum(Alias.v31));
+ return .{ .alias = ra, .format = .{ .vector = .@"2d" } };
+ }
+ pub fn @"4s"(ra: Alias) Register {
+ assert(@intFromEnum(ra) >= @intFromEnum(Alias.v0) and @intFromEnum(ra) <= @intFromEnum(Alias.v31));
+ return .{ .alias = ra, .format = .{ .vector = .@"4s" } };
+ }
+ pub fn @"8h"(ra: Alias) Register {
+ assert(@intFromEnum(ra) >= @intFromEnum(Alias.v0) and @intFromEnum(ra) <= @intFromEnum(Alias.v31));
+ return .{ .alias = ra, .format = .{ .vector = .@"8h" } };
+ }
+ pub fn @"16b"(ra: Alias) Register {
+ assert(@intFromEnum(ra) >= @intFromEnum(Alias.v0) and @intFromEnum(ra) <= @intFromEnum(Alias.v31));
+ return .{ .alias = ra, .format = .{ .vector = .@"16b" } };
+ }
+ pub fn @"1d"(ra: Alias) Register {
+ assert(@intFromEnum(ra) >= @intFromEnum(Alias.v0) and @intFromEnum(ra) <= @intFromEnum(Alias.v31));
+ return .{ .alias = ra, .format = .{ .vector = .@"1d" } };
+ }
+ pub fn @"2s"(ra: Alias) Register {
+ assert(@intFromEnum(ra) >= @intFromEnum(Alias.v0) and @intFromEnum(ra) <= @intFromEnum(Alias.v31));
+ return .{ .alias = ra, .format = .{ .vector = .@"2s" } };
+ }
+ pub fn @"4h"(ra: Alias) Register {
+ assert(@intFromEnum(ra) >= @intFromEnum(Alias.v0) and @intFromEnum(ra) <= @intFromEnum(Alias.v31));
+ return .{ .alias = ra, .format = .{ .vector = .@"4h" } };
+ }
+ pub fn @"8b"(ra: Alias) Register {
+ assert(@intFromEnum(ra) >= @intFromEnum(Alias.v0) and @intFromEnum(ra) <= @intFromEnum(Alias.v31));
+ return .{ .alias = ra, .format = .{ .vector = .@"8b" } };
+ }
+ pub fn @"d[]"(ra: Alias, index: u1) Register {
+ assert(@intFromEnum(ra) >= @intFromEnum(Alias.v0) and @intFromEnum(ra) <= @intFromEnum(Alias.v31));
+ return .{ .alias = ra, .format = .{ .element = .{ .size = .double, .index = index } } };
+ }
+ pub fn @"s[]"(ra: Alias, index: u2) Register {
+ assert(@intFromEnum(ra) >= @intFromEnum(Alias.v0) and @intFromEnum(ra) <= @intFromEnum(Alias.v31));
+ return .{ .alias = ra, .format = .{ .element = .{ .size = .single, .index = index } } };
+ }
+ pub fn @"h[]"(ra: Alias, index: u3) Register {
+ assert(@intFromEnum(ra) >= @intFromEnum(Alias.v0) and @intFromEnum(ra) <= @intFromEnum(Alias.v31));
+ return .{ .alias = ra, .format = .{ .element = .{ .size = .half, .index = index } } };
+ }
+ pub fn @"b[]"(ra: Alias, index: u4) Register {
+ assert(@intFromEnum(ra) >= @intFromEnum(Alias.v0) and @intFromEnum(ra) <= @intFromEnum(Alias.v31));
+ return .{ .alias = ra, .format = .{ .element = .{ .size = .byte, .index = index } } };
+ }
+
+ pub fn isVector(ra: Alias) bool {
+ return switch (ra) {
+ .r0,
+ .r1,
+ .r2,
+ .r3,
+ .r4,
+ .r5,
+ .r6,
+ .r7,
+ .r8,
+ .r9,
+ .r10,
+ .r11,
+ .r12,
+ .r13,
+ .r14,
+ .r15,
+ .r16,
+ .r17,
+ .r18,
+ .r19,
+ .r20,
+ .r21,
+ .r22,
+ .r23,
+ .r24,
+ .r25,
+ .r26,
+ .r27,
+ .r28,
+ .r29,
+ .r30,
+ .zr,
+ .sp,
+
+ .pc,
+
+ .fpcr,
+ .fpsr,
+
+ .ffr,
+ => false,
+
+ .v0,
+ .v1,
+ .v2,
+ .v3,
+ .v4,
+ .v5,
+ .v6,
+ .v7,
+ .v8,
+ .v9,
+ .v10,
+ .v11,
+ .v12,
+ .v13,
+ .v14,
+ .v15,
+ .v16,
+ .v17,
+ .v18,
+ .v19,
+ .v20,
+ .v21,
+ .v22,
+ .v23,
+ .v24,
+ .v25,
+ .v26,
+ .v27,
+ .v28,
+ .v29,
+ .v30,
+ .v31,
+
+ .p0,
+ .p1,
+ .p2,
+ .p3,
+ .p4,
+ .p5,
+ .p6,
+ .p7,
+ .p8,
+ .p9,
+ .p10,
+ .p11,
+ .p12,
+ .p13,
+ .p14,
+ .p15,
+ => true,
+ };
+ }
+
+ pub fn encode(ra: Alias, comptime opts: struct { sp: bool = false, V: bool = false }) Encoded {
+ return @enumFromInt(@as(u5, switch (ra) {
+ .r0 => if (opts.V) unreachable else 0,
+ .r1 => if (opts.V) unreachable else 1,
+ .r2 => if (opts.V) unreachable else 2,
+ .r3 => if (opts.V) unreachable else 3,
+ .r4 => if (opts.V) unreachable else 4,
+ .r5 => if (opts.V) unreachable else 5,
+ .r6 => if (opts.V) unreachable else 6,
+ .r7 => if (opts.V) unreachable else 7,
+ .r8 => if (opts.V) unreachable else 8,
+ .r9 => if (opts.V) unreachable else 9,
+ .r10 => if (opts.V) unreachable else 10,
+ .r11 => if (opts.V) unreachable else 11,
+ .r12 => if (opts.V) unreachable else 12,
+ .r13 => if (opts.V) unreachable else 13,
+ .r14 => if (opts.V) unreachable else 14,
+ .r15 => if (opts.V) unreachable else 15,
+ .r16 => if (opts.V) unreachable else 16,
+ .r17 => if (opts.V) unreachable else 17,
+ .r18 => if (opts.V) unreachable else 18,
+ .r19 => if (opts.V) unreachable else 19,
+ .r20 => if (opts.V) unreachable else 20,
+ .r21 => if (opts.V) unreachable else 21,
+ .r22 => if (opts.V) unreachable else 22,
+ .r23 => if (opts.V) unreachable else 23,
+ .r24 => if (opts.V) unreachable else 24,
+ .r25 => if (opts.V) unreachable else 25,
+ .r26 => if (opts.V) unreachable else 26,
+ .r27 => if (opts.V) unreachable else 27,
+ .r28 => if (opts.V) unreachable else 28,
+ .r29 => if (opts.V) unreachable else 29,
+ .r30 => if (opts.V) unreachable else 30,
+ .zr => if (opts.sp or opts.V) unreachable else 31,
+ .sp => if (opts.sp and !opts.V) 31 else unreachable,
+ .pc => unreachable,
+ .v0 => if (opts.V) 0 else unreachable,
+ .v1 => if (opts.V) 1 else unreachable,
+ .v2 => if (opts.V) 2 else unreachable,
+ .v3 => if (opts.V) 3 else unreachable,
+ .v4 => if (opts.V) 4 else unreachable,
+ .v5 => if (opts.V) 5 else unreachable,
+ .v6 => if (opts.V) 6 else unreachable,
+ .v7 => if (opts.V) 7 else unreachable,
+ .v8 => if (opts.V) 8 else unreachable,
+ .v9 => if (opts.V) 9 else unreachable,
+ .v10 => if (opts.V) 10 else unreachable,
+ .v11 => if (opts.V) 11 else unreachable,
+ .v12 => if (opts.V) 12 else unreachable,
+ .v13 => if (opts.V) 13 else unreachable,
+ .v14 => if (opts.V) 14 else unreachable,
+ .v15 => if (opts.V) 15 else unreachable,
+ .v16 => if (opts.V) 16 else unreachable,
+ .v17 => if (opts.V) 17 else unreachable,
+ .v18 => if (opts.V) 18 else unreachable,
+ .v19 => if (opts.V) 19 else unreachable,
+ .v20 => if (opts.V) 20 else unreachable,
+ .v21 => if (opts.V) 21 else unreachable,
+ .v22 => if (opts.V) 22 else unreachable,
+ .v23 => if (opts.V) 23 else unreachable,
+ .v24 => if (opts.V) 24 else unreachable,
+ .v25 => if (opts.V) 25 else unreachable,
+ .v26 => if (opts.V) 26 else unreachable,
+ .v27 => if (opts.V) 27 else unreachable,
+ .v28 => if (opts.V) 28 else unreachable,
+ .v29 => if (opts.V) 29 else unreachable,
+ .v30 => if (opts.V) 30 else unreachable,
+ .v31 => if (opts.V) 31 else unreachable,
+ .fpcr, .fpsr => unreachable,
+ .p0, .p1, .p2, .p3, .p4, .p5, .p6, .p7, .p8, .p9, .p10, .p11, .p12, .p13, .p14, .p15 => unreachable,
+ .ffr => unreachable,
+ }));
+ }
+ };
+
+ pub fn isVector(reg: Register) bool {
+ return reg.alias.isVector();
+ }
+
+ pub fn size(reg: Register) ?u5 {
+ return format: switch (reg.format) {
+ .alias => unreachable,
+ .integer => |sf| switch (sf) {
+ .word => 4,
+ .doubleword => 8,
+ },
+ .vector => |vs| switch (vs) {
+ .byte => 1,
+ .word => 2,
+ .single => 4,
+ .double => 8,
+ .quad => 16,
+ .scalable, .predicate => null,
+ },
+ .arrangement => |arrangement| switch (arrangement) {
+ .@"2d", .@"4s", .@"8h", .@"16b" => 16,
+ .@"1d", .@"2s", .@"4h", .@"8b" => 8,
+ },
+ .element => |element| continue :format .{ .vector = element.size },
+ };
+ }
+
+ pub fn parse(reg: []const u8) ?Register {
+ return if (reg.len == 0) null else switch (reg[0]) {
+ else => null,
+ 'r' => if (std.fmt.parseInt(u5, reg[1..], 10)) |n| switch (n) {
+ 0...30 => .{
+ .alias = @enumFromInt(@intFromEnum(Alias.r0) + n),
+ .format = .alias,
+ },
+ 31 => null,
+ } else |_| null,
+ 'x' => if (std.fmt.parseInt(u5, reg[1..], 10)) |n| switch (n) {
+ 0...30 => .{
+ .alias = @enumFromInt(@intFromEnum(Alias.r0) + n),
+ .format = .{ .integer = .doubleword },
+ },
+ 31 => null,
+ } else |_| if (std.mem.eql(u8, reg, "xzr")) .xzr else null,
+ 'w' => if (std.fmt.parseInt(u5, reg[1..], 10)) |n| switch (n) {
+ 0...30 => .{
+ .alias = @enumFromInt(@intFromEnum(Alias.r0) + n),
+ .format = .{ .integer = .word },
+ },
+ 31 => null,
+ } else |_| if (std.mem.eql(u8, reg, "wzr"))
+ .wzr
+ else if (std.mem.eql(u8, reg, "wsp"))
+ .wsp
+ else
+ null,
+ 'i' => return if (std.mem.eql(u8, reg, "ip") or std.mem.eql(u8, reg, "ip0"))
+ .ip0
+ else if (std.mem.eql(u8, reg, "ip1"))
+ .ip1
+ else
+ null,
+ 'f' => return if (std.mem.eql(u8, reg, "fp")) .fp else null,
+ 'p' => return if (std.mem.eql(u8, reg, "pc")) .pc else null,
+ 'v' => if (std.fmt.parseInt(u5, reg[1..], 10)) |n| .{
+ .alias = @enumFromInt(@intFromEnum(Alias.v0) + n),
+ .format = .alias,
+ } else |_| null,
+ 'q' => if (std.fmt.parseInt(u5, reg[1..], 10)) |n| .{
+ .alias = @enumFromInt(@intFromEnum(Alias.v0) + n),
+ .format = .{ .scalar = .quad },
+ } else |_| null,
+ 'd' => if (std.fmt.parseInt(u5, reg[1..], 10)) |n| .{
+ .alias = @enumFromInt(@intFromEnum(Alias.v0) + n),
+ .format = .{ .scalar = .double },
+ } else |_| null,
+ 's' => if (std.fmt.parseInt(u5, reg[1..], 10)) |n| .{
+ .alias = @enumFromInt(@intFromEnum(Alias.v0) + n),
+ .format = .{ .scalar = .single },
+ } else |_| if (std.mem.eql(u8, reg, "sp")) .sp else null,
+ 'h' => if (std.fmt.parseInt(u5, reg[1..], 10)) |n| .{
+ .alias = @enumFromInt(@intFromEnum(Alias.v0) + n),
+ .format = .{ .scalar = .half },
+ } else |_| null,
+ 'b' => if (std.fmt.parseInt(u5, reg[1..], 10)) |n| .{
+ .alias = @enumFromInt(@intFromEnum(Alias.v0) + n),
+ .format = .{ .scalar = .byte },
+ } else |_| null,
+ };
+ }
+
+ pub fn fmt(reg: Register) aarch64.Disassemble.RegisterFormatter {
+ return reg.fmtCase(.lower);
+ }
+ pub fn fmtCase(reg: Register, case: aarch64.Disassemble.Case) aarch64.Disassemble.RegisterFormatter {
+ return .{ .reg = reg, .case = case };
+ }
+};
+
+/// C1.2.4 Condition code
+pub const ConditionCode = enum(u4) {
+ /// integer: Equal
+ /// floating-point: Equal
+ /// Z == 1
+ eq = 0b0000,
+ /// integer: Not equal
+ /// floating-point: Not equal or unordered
+ /// Z == 0
+ ne = 0b0001,
+ /// integer: Unsigned higher or same
+ /// floating-point: Greater than, equal, or unordered
+ /// C == 1
+ hs = 0b0010,
+ /// integer: Unsigned lower
+ /// floating-point: Less than
+ /// C == 0
+ lo = 0b0011,
+ /// integer: Minus, negative
+ /// floating-point: Less than
+ /// N == 1
+ mi = 0b0100,
+ /// integer: Plus, positive or zero
+ /// floating-point: Greater than, equal, or unordered
+ /// N == 0
+ pl = 0b0101,
+ /// integer: Overflow
+ /// floating-point: Unordered
+ /// V == 1
+ vs = 0b0110,
+ /// integer: No overflow
+ /// floating-point: Ordered
+ /// V == 0
+ vc = 0b0111,
+ /// integer: Unsigned higher
+ /// floating-point: Greater than, or unordered
+ /// C == 1 and Z == 0
+ hi = 0b1000,
+ /// integer: Unsigned lower or same
+ /// floating-point: Less than or equal
+ /// C == 0 or Z == 1
+ ls = 0b1001,
+ /// integer: Signed greater than or equal
+ /// floating-point: Greater than or equal
+ /// N == V
+ ge = 0b1010,
+ /// integer: Signed less than
+ /// floating-point: Less than, or unordered
+ /// N != V
+ lt = 0b1011,
+ /// integer: Signed greater than
+ /// floating-point: Greater than
+ /// Z == 0 and N == V
+ gt = 0b1100,
+ /// integer: Signed less than or equal
+ /// floating-point: Less than, equal, or unordered
+ /// Z == 1 or N != V
+ le = 0b1101,
+ /// integer: Always
+ /// floating-point: Always
+ /// true
+ al = 0b1110,
+ /// integer: Always
+ /// floating-point: Always
+ /// true
+ nv = 0b1111,
+ /// Carry set
+ /// C == 1
+ pub const cs: ConditionCode = .hs;
+ /// Carry clear
+ /// C == 0
+ pub const cc: ConditionCode = .lo;
+
+ pub fn invert(cond: ConditionCode) ConditionCode {
+ return @enumFromInt(@intFromEnum(cond) ^ 0b0001);
+ }
+};
+
+/// C4.1 A64 instruction set encoding
+pub const Instruction = packed union {
+ group: Group,
+ reserved: Reserved,
+ sme: Sme,
+ sve: Sve,
+ data_processing_immediate: DataProcessingImmediate,
+ branch_exception_generating_system: BranchExceptionGeneratingSystem,
+ load_store: LoadStore,
+ data_processing_register: DataProcessingRegister,
+ data_processing_vector: DataProcessingVector,
+
+ /// Table C4-1 Main encoding table for the A64 instruction set
+ pub const Group = packed struct {
+ encoded0: u25,
+ op1: u4,
+ encoded29: u2,
+ op0: u1,
+ };
+
+ /// C4.1.1 Reserved
+ pub const Reserved = packed union {
+ group: @This().Group,
+ udf: Udf,
+
+ /// Table C4-2 Encoding table for the Reserved group
+ pub const Group = packed struct {
+ encoded0: u16,
+ op1: u9,
+ decoded25: u4 = 0b0000,
+ op0: u2,
+ decoded31: u1 = 0b0,
+ };
+
+ /// C6.2.387 UDF
+ pub const Udf = packed struct {
+ imm16: u16,
+ decoded16: u16 = 0b0000000000000000,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ udf: Udf,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.op0) {
+ 0b00 => switch (inst.group.op1) {
+ 0b000000000 => .{ .udf = inst.udf },
+ else => .unallocated,
+ },
+ else => .unallocated,
+ };
+ }
+ };
+
+ /// C4.1.2 SME encodings
+ pub const Sme = packed union {
+ group: @This().Group,
+
+ /// Table C4-3 Encodings table for the SME encodings group
+ pub const Group = packed struct {
+ encoded0: u2,
+ op2: u3,
+ encoded5: u5,
+ op1: u15,
+ decoded25: u4 = 0b0000,
+ op0: u2,
+ decoded31: u1 = 0b1,
+ };
+ };
+
+ /// C4.1.30 SVE encodings
+ pub const Sve = packed union {
+ group: @This().Group,
+
+ /// Table C4-31 Encoding table for the SVE encodings group
+ pub const Group = packed struct {
+ encoded0: u4,
+ op2: u1,
+ encoded5: u5,
+ op1: u15,
+ decoded25: u4 = 0b0010,
+ op0: u3,
+ };
+ };
+
+ /// C4.1.86 Data Processing -- Immediate
+ pub const DataProcessingImmediate = packed union {
+ group: @This().Group,
+ pc_relative_addressing: PcRelativeAddressing,
+ add_subtract_immediate: AddSubtractImmediate,
+ add_subtract_immediate_with_tags: AddSubtractImmediateWithTags,
+ logical_immediate: LogicalImmediate,
+ move_wide_immediate: MoveWideImmediate,
+ bitfield: Bitfield,
+ extract: Extract,
+
+ /// Table C4-87 Encoding table for the Data Processing -- Immediate group
+ pub const Group = packed struct {
+ encoded0: u23,
+ op0: u3,
+ decoded26: u3 = 0b100,
+ encoded29: u3,
+ };
+
+ /// PC-rel. addressing
+ pub const PcRelativeAddressing = packed union {
+ group: @This().Group,
+ adr: Adr,
+ adrp: Adrp,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ immhi: i19,
+ decoded24: u5 = 0b10000,
+ immlo: u2,
+ op: Op,
+ };
+
+ /// C6.2.10 ADR
+ pub const Adr = packed struct {
+ Rd: Register.Encoded,
+ immhi: i19,
+ decoded24: u5 = 0b10000,
+ immlo: u2,
+ op: Op = .adr,
+ };
+
+ /// C6.2.11 ADRP
+ pub const Adrp = packed struct {
+ Rd: Register.Encoded,
+ immhi: i19,
+ decoded24: u5 = 0b10000,
+ immlo: u2,
+ op: Op = .adrp,
+ };
+
+ pub const Op = enum(u1) {
+ adr = 0b0,
+ adrp = 0b1,
+ };
+ };
+
+ /// Add/subtract (immediate)
+ pub const AddSubtractImmediate = packed union {
+ group: @This().Group,
+ add: Add,
+ adds: Adds,
+ sub: Sub,
+ subs: Subs,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm12: u12,
+ sh: Shift,
+ decoded23: u6 = 0b100010,
+ S: bool,
+ op: AddSubtractOp,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.4 ADD (immediate)
+ pub const Add = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm12: u12,
+ sh: Shift,
+ decoded23: u6 = 0b100010,
+ S: bool = false,
+ op: AddSubtractOp = .add,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.8 ADDS (immediate)
+ pub const Adds = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm12: u12,
+ sh: Shift,
+ decoded23: u6 = 0b100010,
+ S: bool = true,
+ op: AddSubtractOp = .add,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.357 SUB (immediate)
+ pub const Sub = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm12: u12,
+ sh: Shift,
+ decoded23: u6 = 0b100010,
+ S: bool = false,
+ op: AddSubtractOp = .sub,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.363 SUBS (immediate)
+ pub const Subs = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm12: u12,
+ sh: Shift,
+ decoded23: u6 = 0b100010,
+ S: bool = true,
+ op: AddSubtractOp = .sub,
+ sf: Register.IntegerSize,
+ };
+
+ pub const Shift = enum(u1) {
+ @"0" = 0b0,
+ @"12" = 0b1,
+ };
+ };
+
+ /// Add/subtract (immediate, with tags)
+ pub const AddSubtractImmediateWithTags = packed union {
+ group: @This().Group,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ uimm4: u4,
+ op3: u2,
+ uimm6: u6,
+ o2: u1,
+ decoded23: u6 = 0b100011,
+ S: bool,
+ op: AddSubtractOp,
+ sf: Register.IntegerSize,
+ };
+ };
+
+ /// Logical (immediate)
+ pub const LogicalImmediate = packed union {
+ group: @This().Group,
+ @"and": And,
+ orr: Orr,
+ eor: Eor,
+ ands: Ands,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm: Bitmask,
+ decoded23: u6 = 0b100100,
+ opc: LogicalOpc,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.12 AND (immediate)
+ pub const And = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm: Bitmask,
+ decoded23: u6 = 0b100100,
+ opc: LogicalOpc = .@"and",
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.240 ORR (immediate)
+ pub const Orr = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm: Bitmask,
+ decoded23: u6 = 0b100100,
+ opc: LogicalOpc = .orr,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.119 EOR (immediate)
+ pub const Eor = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm: Bitmask,
+ decoded23: u6 = 0b100100,
+ opc: LogicalOpc = .eor,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.14 ANDS (immediate)
+ pub const Ands = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm: Bitmask,
+ decoded23: u6 = 0b100100,
+ opc: LogicalOpc = .ands,
+ sf: Register.IntegerSize,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ @"and": And,
+ orr: Orr,
+ eor: Eor,
+ ands: Ands,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return if (!inst.group.imm.validImmediate(inst.group.sf))
+ .unallocated
+ else switch (inst.group.opc) {
+ .@"and" => .{ .@"and" = inst.@"and" },
+ .orr => .{ .orr = inst.orr },
+ .eor => .{ .eor = inst.eor },
+ .ands => .{ .ands = inst.ands },
+ };
+ }
+ };
+
+ /// Move wide (immediate)
+ pub const MoveWideImmediate = packed union {
+ group: @This().Group,
+ movn: Movn,
+ movz: Movz,
+ movk: Movk,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ imm16: u16,
+ hw: Hw,
+ decoded23: u6 = 0b100101,
+ opc: Opc,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.226 MOVN
+ pub const Movn = packed struct {
+ Rd: Register.Encoded,
+ imm16: u16,
+ hw: Hw,
+ decoded23: u6 = 0b100101,
+ opc: Opc = .movn,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.227 MOVZ
+ pub const Movz = packed struct {
+ Rd: Register.Encoded,
+ imm16: u16,
+ hw: Hw,
+ decoded23: u6 = 0b100101,
+ opc: Opc = .movz,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.225 MOVK
+ pub const Movk = packed struct {
+ Rd: Register.Encoded,
+ imm16: u16,
+ hw: Hw,
+ decoded23: u6 = 0b100101,
+ opc: Opc = .movk,
+ sf: Register.IntegerSize,
+ };
+
+ pub const Hw = enum(u2) {
+ @"0" = 0b00,
+ @"16" = 0b01,
+ @"32" = 0b10,
+ @"48" = 0b11,
+
+ pub fn int(hw: Hw) u6 {
+ return switch (hw) {
+ .@"0" => 0,
+ .@"16" => 16,
+ .@"32" => 32,
+ .@"48" => 48,
+ };
+ }
+
+ pub fn sf(hw: Hw) Register.IntegerSize {
+ return switch (hw) {
+ .@"0", .@"16" => .word,
+ .@"32", .@"48" => .doubleword,
+ };
+ }
+ };
+
+ pub const Opc = enum(u2) {
+ movn = 0b00,
+ movz = 0b10,
+ movk = 0b11,
+ _,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ movn: Movn,
+ movz: Movz,
+ movk: Movk,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return if (inst.group.sf == .word and inst.group.hw.sf() == .doubleword)
+ .unallocated
+ else switch (inst.group.opc) {
+ _ => .unallocated,
+ .movn => .{ .movn = inst.movn },
+ .movz => .{ .movz = inst.movz },
+ .movk => .{ .movk = inst.movk },
+ };
+ }
+ };
+
+ /// Bitfield
+ pub const Bitfield = packed union {
+ group: @This().Group,
+ sbfm: Sbfm,
+ bfm: Bfm,
+ ubfm: Ubfm,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm: Bitmask,
+ decoded23: u6 = 0b100110,
+ opc: Opc,
+ sf: Register.IntegerSize,
+ };
+
+ pub const Sbfm = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm: Bitmask,
+ decoded23: u6 = 0b100110,
+ opc: Opc = .sbfm,
+ sf: Register.IntegerSize,
+ };
+
+ pub const Bfm = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm: Bitmask,
+ decoded23: u6 = 0b100110,
+ opc: Opc = .bfm,
+ sf: Register.IntegerSize,
+ };
+
+ pub const Ubfm = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm: Bitmask,
+ decoded23: u6 = 0b100110,
+ opc: Opc = .ubfm,
+ sf: Register.IntegerSize,
+ };
+
+ pub const Opc = enum(u2) {
+ sbfm = 0b00,
+ bfm = 0b01,
+ ubfm = 0b10,
+ _,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ sbfm: Sbfm,
+ bfm: Bfm,
+ ubfm: Ubfm,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return if (!inst.group.imm.validBitfield(inst.group.sf))
+ .unallocated
+ else switch (inst.group.opc) {
+ _ => .unallocated,
+ .sbfm => .{ .sbfm = inst.sbfm },
+ .bfm => .{ .bfm = inst.bfm },
+ .ubfm => .{ .ubfm = inst.ubfm },
+ };
+ }
+ };
+
+ /// Extract
+ pub const Extract = packed union {
+ group: @This().Group,
+ extr: Extr,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imms: u6,
+ Rm: Register.Encoded,
+ o0: u1,
+ N: Register.IntegerSize,
+ decoded23: u6 = 0b100111,
+ op21: u2,
+ sf: Register.IntegerSize,
+ };
+
+ pub const Extr = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imms: u6,
+ Rm: Register.Encoded,
+ o0: u1 = 0b0,
+ N: Register.IntegerSize,
+ decoded23: u6 = 0b100111,
+ op21: u2 = 0b00,
+ sf: Register.IntegerSize,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ extr: Extr,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.op21) {
+ 0b01, 0b10...0b11 => .unallocated,
+ 0b00 => switch (inst.group.o0) {
+ 0b1 => .unallocated,
+ 0b0 => if ((inst.group.sf == .word and @as(u1, @truncate(inst.group.imms >> 5)) == 0b1) or
+ inst.group.sf != inst.group.N)
+ .unallocated
+ else
+ .{ .extr = inst.extr },
+ },
+ };
+ }
+ };
+
+ pub const Bitmask = packed struct {
+ imms: u6,
+ immr: u6,
+ N: Register.IntegerSize,
+
+ fn lenHsb(bitmask: Bitmask) u7 {
+ return @bitCast(packed struct {
+ not_imms: u6,
+ N: Register.IntegerSize,
+ }{ .not_imms = ~bitmask.imms, .N = bitmask.N });
+ }
+
+ fn validImmediate(bitmask: Bitmask, sf: Register.IntegerSize) bool {
+ if (sf == .word and bitmask.N == .doubleword) return false;
+ const len_hsb = bitmask.lenHsb();
+ return (len_hsb -% 1) & len_hsb != 0b0_000000;
+ }
+
+ fn validBitfield(bitmask: Bitmask, sf: Register.IntegerSize) bool {
+ if (sf != bitmask.N) return false;
+ if (sf == .word and (@as(u1, @truncate(bitmask.immr >> 5)) != 0b0 or
+ @as(u1, @truncate(bitmask.imms >> 5)) != 0b0)) return false;
+ const len_hsb = bitmask.lenHsb();
+ return len_hsb >= 0b0_000010;
+ }
+
+ fn decode(bitmask: Bitmask, sf: Register.IntegerSize) struct { u64, u64 } {
+ const esize = @as(u7, 1 << 6) >> @clz(bitmask.lenHsb());
+ const levels: u6 = @intCast(esize - 1);
+ const s = bitmask.imms & levels;
+ const r = bitmask.immr & levels;
+ const d = (s -% r) & levels;
+ const welem = @as(u64, std.math.maxInt(u64)) >> (63 - s);
+ const telem = @as(u64, std.math.maxInt(u64)) >> (63 - d);
+ const emask = @as(u64, std.math.maxInt(u64)) >> @intCast(64 - esize);
+ const rmask = @divExact(std.math.maxInt(u64), emask);
+ const wmask = std.math.rotr(u64, welem * rmask, r);
+ const tmask = telem * rmask;
+ return switch (sf) {
+ .word => .{ @as(u32, @truncate(wmask)), @as(u32, @truncate(tmask)) },
+ .doubleword => .{ wmask, tmask },
+ };
+ }
+
+ pub fn decodeImmediate(bitmask: Bitmask, sf: Register.IntegerSize) u64 {
+ assert(bitmask.validImmediate(sf));
+ const imm, _ = bitmask.decode(sf);
+ return imm;
+ }
+
+ pub fn decodeBitfield(bitmask: Bitmask, sf: Register.IntegerSize) struct { u64, u64 } {
+ assert(bitmask.validBitfield(sf));
+ return bitmask.decode(sf);
+ }
+
+ pub fn moveWidePreferred(bitmask: Bitmask, sf: Register.IntegerSize) bool {
+ const s = bitmask.imms;
+ const r = bitmask.immr;
+ const width: u7 = switch (sf) {
+ .word => 32,
+ .doubleword => 64,
+ };
+ if (sf != bitmask.N) return false;
+ if (sf == .word and @as(u1, @truncate(s >> 5)) != 0b0) return false;
+ if (s < 16) return (-%r % 16) <= (15 - s);
+ if (s >= width - 15) return (r % 16) <= (s - (width - 15));
+ return false;
+ }
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ pc_relative_addressing: PcRelativeAddressing,
+ add_subtract_immediate: AddSubtractImmediate,
+ add_subtract_immediate_with_tags: AddSubtractImmediateWithTags,
+ logical_immediate: LogicalImmediate,
+ move_wide_immediate: MoveWideImmediate,
+ bitfield: Bitfield,
+ extract: Extract,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.op0) {
+ 0b000, 0b001 => .{ .pc_relative_addressing = inst.pc_relative_addressing },
+ 0b010 => .{ .add_subtract_immediate = inst.add_subtract_immediate },
+ 0b011 => .{ .add_subtract_immediate_with_tags = inst.add_subtract_immediate_with_tags },
+ 0b100 => .{ .logical_immediate = inst.logical_immediate },
+ 0b101 => .{ .move_wide_immediate = inst.move_wide_immediate },
+ 0b110 => .{ .bitfield = inst.bitfield },
+ 0b111 => .{ .extract = inst.extract },
+ };
+ }
+ };
+
+ /// C4.1.87 Branches, Exception Generating and System instructions
+ pub const BranchExceptionGeneratingSystem = packed union {
+ group: @This().Group,
+ conditional_branch_immediate: ConditionalBranchImmediate,
+ exception_generating: ExceptionGenerating,
+ system_register_argument: SystemRegisterArgument,
+ hints: Hints,
+ barriers: Barriers,
+ pstate: Pstate,
+ system_result: SystemResult,
+ system: System,
+ system_register_move: SystemRegisterMove,
+ unconditional_branch_register: UnconditionalBranchRegister,
+ unconditional_branch_immediate: UnconditionalBranchImmediate,
+ compare_branch_immediate: CompareBranchImmediate,
+ test_branch_immediate: TestBranchImmediate,
+
+ /// Table C4-88 Encoding table for the Branches, Exception Generating and System instructions group
+ pub const Group = packed struct {
+ op2: u5,
+ encoded5: u7,
+ op1: u14,
+ decoded26: u3 = 0b101,
+ op0: u3,
+ };
+
+ /// Conditional branch (immediate)
+ pub const ConditionalBranchImmediate = packed union {
+ group: @This().Group,
+ b: B,
+ bc: Bc,
+
+ pub const Group = packed struct {
+ cond: ConditionCode,
+ o0: u1,
+ imm19: i19,
+ o1: u1,
+ decoded25: u7 = 0b0101010,
+ };
+
+ /// C6.2.26 B.cond
+ pub const B = packed struct {
+ cond: ConditionCode,
+ o0: u1 = 0b0,
+ imm19: i19,
+ o1: u1 = 0b0,
+ decoded25: u7 = 0b0101010,
+ };
+
+ /// C6.2.27 BC.cond
+ pub const Bc = packed struct {
+ cond: ConditionCode,
+ o0: u1 = 0b1,
+ imm19: i19,
+ o1: u1 = 0b0,
+ decoded25: u7 = 0b0101010,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ b: B,
+ bc: Bc,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.o1) {
+ 0b0 => switch (inst.group.o0) {
+ 0b0 => .{ .b = inst.b },
+ 0b1 => .{ .bc = inst.bc },
+ },
+ 0b1 => .unallocated,
+ };
+ }
+ };
+
+ /// Exception generating
+ pub const ExceptionGenerating = packed union {
+ group: @This().Group,
+ svc: Svc,
+ hvc: Hvc,
+ smc: Smc,
+ brk: Brk,
+ hlt: Hlt,
+ tcancel: Tcancel,
+ dcps1: Dcps1,
+ dcps2: Dcps2,
+ dcps3: Dcps3,
+
+ pub const Group = packed struct {
+ LL: u2,
+ op2: u3,
+ imm16: u16,
+ opc: u3,
+ decoded24: u8 = 0b11010100,
+ };
+
+ /// C6.2.365 SVC
+ pub const Svc = packed struct {
+ decoded0: u2 = 0b01,
+ decoded2: u3 = 0b000,
+ imm16: u16,
+ decoded21: u3 = 0b000,
+ decoded24: u8 = 0b11010100,
+ };
+
+ /// C6.2.128 HVC
+ pub const Hvc = packed struct {
+ decoded0: u2 = 0b10,
+ decoded2: u3 = 0b000,
+ imm16: u16,
+ decoded21: u3 = 0b000,
+ decoded24: u8 = 0b11010100,
+ };
+
+ /// C6.2.283 SMC
+ pub const Smc = packed struct {
+ decoded0: u2 = 0b11,
+ decoded2: u3 = 0b000,
+ imm16: u16,
+ decoded21: u3 = 0b000,
+ decoded24: u8 = 0b11010100,
+ };
+
+ /// C6.2.40 BRK
+ pub const Brk = packed struct {
+ decoded0: u2 = 0b00,
+ decoded2: u3 = 0b000,
+ imm16: u16,
+ decoded21: u3 = 0b001,
+ decoded24: u8 = 0b11010100,
+ };
+
+ /// C6.2.127 HLT
+ pub const Hlt = packed struct {
+ decoded0: u2 = 0b00,
+ decoded2: u3 = 0b000,
+ imm16: u16,
+ decoded21: u3 = 0b010,
+ decoded24: u8 = 0b11010100,
+ };
+
+ /// C6.2.376 TCANCEL
+ pub const Tcancel = packed struct {
+ decoded0: u2 = 0b00,
+ decoded2: u3 = 0b000,
+ imm16: u16,
+ decoded21: u3 = 0b011,
+ decoded24: u8 = 0b11010100,
+ };
+
+ /// C6.2.110 DCPS1
+ pub const Dcps1 = packed struct {
+ LL: u2 = 0b01,
+ decoded2: u3 = 0b000,
+ imm16: u16,
+ decoded21: u3 = 0b101,
+ decoded24: u8 = 0b11010100,
+ };
+
+ /// C6.2.110 DCPS2
+ pub const Dcps2 = packed struct {
+ LL: u2 = 0b10,
+ decoded2: u3 = 0b000,
+ imm16: u16,
+ decoded21: u3 = 0b101,
+ decoded24: u8 = 0b11010100,
+ };
+
+ /// C6.2.110 DCPS3
+ pub const Dcps3 = packed struct {
+ LL: u2 = 0b11,
+ decoded2: u3 = 0b000,
+ imm16: u16,
+ decoded21: u3 = 0b101,
+ decoded24: u8 = 0b11010100,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ svc: Svc,
+ hvc: Hvc,
+ smc: Smc,
+ brk: Brk,
+ hlt: Hlt,
+ tcancel: Tcancel,
+ dcps1: Dcps1,
+ dcps2: Dcps2,
+ dcps3: Dcps3,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.op2) {
+ 0b001 => .unallocated,
+ 0b010...0b011 => .unallocated,
+ 0b100...0b111 => .unallocated,
+ 0b000 => switch (inst.group.opc) {
+ 0b000 => switch (inst.group.LL) {
+ 0b00 => .unallocated,
+ 0b01 => .{ .svc = inst.svc },
+ 0b10 => .{ .hvc = inst.hvc },
+ 0b11 => .{ .smc = inst.smc },
+ },
+ 0b001 => switch (inst.group.LL) {
+ 0b01 => .unallocated,
+ 0b00 => .{ .brk = inst.brk },
+ 0b10...0b11 => .unallocated,
+ },
+ 0b010 => switch (inst.group.LL) {
+ 0b01 => .unallocated,
+ 0b00 => .{ .hlt = inst.hlt },
+ 0b10...0b11 => .unallocated,
+ },
+ 0b011 => switch (inst.group.LL) {
+ 0b00 => .{ .tcancel = inst.tcancel },
+ 0b01 => .unallocated,
+ 0b10...0b11 => .unallocated,
+ },
+ 0b100 => .unallocated,
+ 0b101 => switch (inst.group.LL) {
+ 0b00 => .unallocated,
+ 0b01 => .{ .dcps1 = inst.dcps1 },
+ 0b10 => .{ .dcps2 = inst.dcps2 },
+ 0b11 => .{ .dcps3 = inst.dcps3 },
+ },
+ 0b110 => .unallocated,
+ 0b111 => .unallocated,
+ },
+ };
+ }
+ };
+
+ /// System instructions with register argument
+ pub const SystemRegisterArgument = packed struct {
+ Rt: Register.Encoded,
+ op2: u3,
+ CRm: u4,
+ decoded12: u20 = 0b11010101000000110001,
+ };
+
+ /// Hints
+ pub const Hints = packed union {
+ group: @This().Group,
+ hint: Hint,
+ nop: Nop,
+ yield: Yield,
+ wfe: Wfe,
+ wfi: Wfi,
+ sev: Sev,
+ sevl: Sevl,
+
+ pub const Group = packed struct {
+ decoded0: u5 = 0b11111,
+ op2: u3,
+ CRm: u4,
+ decoded12: u20 = 0b11010101000000110010,
+ };
+
+ /// C6.2.126 HINT
+ pub const Hint = packed struct {
+ decoded0: u5 = 0b11111,
+ op2: u3,
+ CRm: u4,
+ decoded12: u4 = 0b0010,
+ decoded16: u3 = 0b011,
+ decoded19: u2 = 0b00,
+ decoded21: u1 = 0b0,
+ decoded22: u10 = 0b1101010100,
+ };
+
+ /// C6.2.238 NOP
+ pub const Nop = packed struct {
+ decoded0: u5 = 0b11111,
+ op2: u3 = 0b000,
+ CRm: u4 = 0b0000,
+ decoded12: u4 = 0b0010,
+ decoded16: u3 = 0b011,
+ decoded19: u2 = 0b00,
+ decoded21: u1 = 0b0,
+ decoded22: u10 = 0b1101010100,
+ };
+
+ /// C6.2.402 YIELD
+ pub const Yield = packed struct {
+ decoded0: u5 = 0b11111,
+ op2: u3 = 0b001,
+ CRm: u4 = 0b0000,
+ decoded12: u4 = 0b0010,
+ decoded16: u3 = 0b011,
+ decoded19: u2 = 0b00,
+ decoded21: u1 = 0b0,
+ decoded22: u10 = 0b1101010100,
+ };
+
+ /// C6.2.396 WFE
+ pub const Wfe = packed struct {
+ decoded0: u5 = 0b11111,
+ op2: u3 = 0b010,
+ CRm: u4 = 0b0000,
+ decoded12: u4 = 0b0010,
+ decoded16: u3 = 0b011,
+ decoded19: u2 = 0b00,
+ decoded21: u1 = 0b0,
+ decoded22: u10 = 0b1101010100,
+ };
+
+ /// C6.2.398 WFI
+ pub const Wfi = packed struct {
+ decoded0: u5 = 0b11111,
+ op2: u3 = 0b011,
+ CRm: u4 = 0b0000,
+ decoded12: u4 = 0b0010,
+ decoded16: u3 = 0b011,
+ decoded19: u2 = 0b00,
+ decoded21: u1 = 0b0,
+ decoded22: u10 = 0b1101010100,
+ };
+
+ /// C6.2.280 SEV
+ pub const Sev = packed struct {
+ decoded0: u5 = 0b11111,
+ op2: u3 = 0b100,
+ CRm: u4 = 0b0000,
+ decoded12: u4 = 0b0010,
+ decoded16: u3 = 0b011,
+ decoded19: u2 = 0b00,
+ decoded21: u1 = 0b0,
+ decoded22: u10 = 0b1101010100,
+ };
+
+ /// C6.2.280 SEVL
+ pub const Sevl = packed struct {
+ decoded0: u5 = 0b11111,
+ op2: u3 = 0b101,
+ CRm: u4 = 0b0000,
+ decoded12: u4 = 0b0010,
+ decoded16: u3 = 0b011,
+ decoded19: u2 = 0b00,
+ decoded21: u1 = 0b0,
+ decoded22: u10 = 0b1101010100,
+ };
+
+ pub const Decoded = union(enum) {
+ hint: Hint,
+ nop: Nop,
+ yield: Yield,
+ wfe: Wfe,
+ wfi: Wfi,
+ sev: Sev,
+ sevl: Sevl,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.CRm) {
+ else => .{ .hint = inst.hint },
+ 0b0000 => switch (inst.group.op2) {
+ else => .{ .hint = inst.hint },
+ 0b000 => .{ .nop = inst.nop },
+ 0b001 => .{ .yield = inst.yield },
+ 0b010 => .{ .wfe = inst.wfe },
+ 0b011 => .{ .wfi = inst.wfi },
+ 0b100 => .{ .sev = inst.sev },
+ 0b101 => .{ .sevl = inst.sevl },
+ },
+ };
+ }
+ };
+
+ /// Barriers
+ pub const Barriers = packed union {
+ group: @This().Group,
+ clrex: Clrex,
+ dsb: Dsb,
+ dmb: Dmb,
+ isb: Isb,
+ sb: Sb,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ op2: u3,
+ CRm: u4,
+ decoded12: u4 = 0b0011,
+ decoded16: u3 = 0b011,
+ decoded19: u2 = 0b00,
+ decoded21: u1 = 0b0,
+ decoded22: u10 = 0b1101010100,
+ };
+
+ /// C6.2.56 CLREX
+ pub const Clrex = packed struct {
+ Rt: Register.Encoded = @enumFromInt(0b11111),
+ op2: u3 = 0b010,
+ CRm: u4,
+ decoded12: u4 = 0b0011,
+ decoded16: u3 = 0b011,
+ decoded19: u2 = 0b00,
+ decoded21: u1 = 0b0,
+ decoded22: u10 = 0b1101010100,
+ };
+
+ /// C6.2.116 DSB
+ pub const Dsb = packed struct {
+ Rt: Register.Encoded = @enumFromInt(0b11111),
+ opc: u2 = 0b00,
+ decoded7: u1 = 0b1,
+ CRm: Option,
+ decoded12: u4 = 0b0011,
+ decoded16: u3 = 0b011,
+ decoded19: u2 = 0b00,
+ decoded21: u1 = 0b0,
+ decoded22: u10 = 0b1101010100,
+ };
+
+ /// C6.2.114 DMB
+ pub const Dmb = packed struct {
+ Rt: Register.Encoded = @enumFromInt(0b11111),
+ opc: u2 = 0b01,
+ decoded7: u1 = 0b1,
+ CRm: Option,
+ decoded12: u4 = 0b0011,
+ decoded16: u3 = 0b011,
+ decoded19: u2 = 0b00,
+ decoded21: u1 = 0b0,
+ decoded22: u10 = 0b1101010100,
+ };
+
+ /// C6.2.131 ISB
+ pub const Isb = packed struct {
+ Rt: Register.Encoded = @enumFromInt(0b11111),
+ opc: u2 = 0b10,
+ decoded7: u1 = 0b1,
+ CRm: Option,
+ decoded12: u4 = 0b0011,
+ decoded16: u3 = 0b011,
+ decoded19: u2 = 0b00,
+ decoded21: u1 = 0b0,
+ decoded22: u10 = 0b1101010100,
+ };
+
+ /// C6.2.264 SB
+ pub const Sb = packed struct {
+ Rt: Register.Encoded = @enumFromInt(0b11111),
+ opc: u2 = 0b11,
+ decoded7: u1 = 0b1,
+ CRm: u4 = 0b0000,
+ decoded12: u4 = 0b0011,
+ decoded16: u3 = 0b011,
+ decoded19: u2 = 0b00,
+ decoded21: u1 = 0b0,
+ decoded22: u10 = 0b1101010100,
+ };
+
+ pub const Option = enum(u4) {
+ oshld = 0b0001,
+ oshst = 0b0010,
+ osh = 0b0011,
+ nshld = 0b0101,
+ nshst = 0b0110,
+ nsh = 0b0111,
+ ishld = 0b1001,
+ ishst = 0b1010,
+ ish = 0b1011,
+ ld = 0b1101,
+ st = 0b1110,
+ sy = 0b1111,
+ _,
+ };
+ };
+
+ /// PSTATE
+ pub const Pstate = packed struct {
+ Rt: Register.Encoded,
+ op2: u3,
+ CRm: u4,
+ decoded12: u4 = 0b0100,
+ op1: u3,
+ decoded19: u13 = 0b1101010100000,
+ };
+
+ /// System with result
+ pub const SystemResult = packed struct {
+ Rt: Register.Encoded,
+ op2: u3,
+ CRm: u4,
+ CRn: u4,
+ op1: u3,
+ decoded19: u13 = 0b1101010100100,
+ };
+
+ /// System instructions
+ pub const System = packed union {
+ group: @This().Group,
+ sys: Sys,
+ sysl: Sysl,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ op2: u3,
+ CRm: u4,
+ CRn: u4,
+ op1: u3,
+ decoded19: u2 = 0b01,
+ L: L,
+ decoded22: u10 = 0b1101010100,
+ };
+
+ /// C6.2.372 SYS
+ pub const Sys = packed struct {
+ Rt: Register.Encoded,
+ op2: u3,
+ CRm: u4,
+ CRn: u4,
+ op1: u3,
+ decoded19: u2 = 0b01,
+ L: L = .sys,
+ decoded22: u10 = 0b1101010100,
+ };
+
+ /// C6.2.373 SYSL
+ pub const Sysl = packed struct {
+ Rt: Register.Encoded,
+ op2: u3,
+ CRm: u4,
+ CRn: u4,
+ op1: u3,
+ decoded19: u2 = 0b01,
+ L: L = .sysl,
+ decoded22: u10 = 0b1101010100,
+ };
+
+ const L = enum(u1) {
+ sys = 0b0,
+ sysl = 0b1,
+ };
+
+ pub const Decoded = union(enum) {
+ sys: Sys,
+ sysl: Sysl,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.L) {
+ .sys => .{ .sys = inst.sys },
+ .sysl => .{ .sysl = inst.sysl },
+ };
+ }
+ };
+
+ /// System register move
+ pub const SystemRegisterMove = packed union {
+ group: @This().Group,
+ msr: Msr,
+ mrs: Mrs,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ op2: u3,
+ CRm: u4,
+ CRn: u4,
+ op1: u3,
+ o0: u1,
+ decoded20: u1 = 0b1,
+ L: L,
+ decoded22: u10 = 0b1101010100,
+ };
+
+ /// C6.2.230 MSR (register)
+ pub const Msr = packed struct {
+ Rt: Register.Encoded,
+ op2: u3,
+ CRm: u4,
+ CRn: u4,
+ op1: u3,
+ o0: u1,
+ decoded20: u1 = 0b1,
+ L: L = .msr,
+ decoded22: u10 = 0b1101010100,
+ };
+
+ /// C6.2.228 MRS
+ pub const Mrs = packed struct {
+ Rt: Register.Encoded,
+ op2: u3,
+ CRm: u4,
+ CRn: u4,
+ op1: u3,
+ o0: u1,
+ decoded20: u1 = 0b1,
+ L: L = .mrs,
+ decoded22: u10 = 0b1101010100,
+ };
+
+ pub const L = enum(u1) {
+ msr = 0b0,
+ mrs = 0b1,
+ };
+
+ pub const Decoded = union(enum) {
+ msr: Msr,
+ mrs: Mrs,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.L) {
+ .msr => .{ .msr = inst.msr },
+ .mrs => .{ .mrs = inst.mrs },
+ };
+ }
+ };
+
+ /// Unconditional branch (register)
+ pub const UnconditionalBranchRegister = packed union {
+ group: @This().Group,
+ br: Br,
+ blr: Blr,
+ ret: Ret,
+
+ pub const Group = packed struct {
+ op4: u5,
+ Rn: Register.Encoded,
+ op3: u6,
+ op2: u5,
+ opc: u4,
+ decoded25: u7 = 0b1101011,
+ };
+
+ /// C6.2.37 BR
+ pub const Br = packed struct {
+ Rm: Register.Encoded = @enumFromInt(0),
+ Rn: Register.Encoded,
+ M: bool = false,
+ A: bool = false,
+ decoded12: u4 = 0b0000,
+ decoded16: u5 = 0b11111,
+ op: u2 = 0b00,
+ decoded23: u1 = 0b0,
+ Z: bool = false,
+ decoded25: u7 = 0b1101011,
+ };
+
+ /// C6.2.35 BLR
+ pub const Blr = packed struct {
+ Rm: Register.Encoded = @enumFromInt(0),
+ Rn: Register.Encoded,
+ M: bool = false,
+ A: bool = false,
+ decoded12: u4 = 0b0000,
+ decoded16: u5 = 0b11111,
+ op: u2 = 0b01,
+ decoded23: u1 = 0b0,
+ Z: bool = false,
+ decoded25: u7 = 0b1101011,
+ };
+
+ /// C6.2.254 RET
+ pub const Ret = packed struct {
+ Rm: Register.Encoded = @enumFromInt(0),
+ Rn: Register.Encoded,
+ M: bool = false,
+ A: bool = false,
+ decoded12: u4 = 0b0000,
+ decoded16: u5 = 0b11111,
+ op: u2 = 0b10,
+ decoded23: u1 = 0b0,
+ Z: bool = false,
+ decoded25: u7 = 0b1101011,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ br: Br,
+ blr: Blr,
+ ret: Ret,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.op2) {
+ else => .unallocated,
+ 0b11111 => switch (inst.group.opc) {
+ 0b0000 => switch (inst.group.op4) {
+ else => .unallocated,
+ 0b00000 => .{ .br = inst.br },
+ },
+ 0b0001 => switch (inst.group.op4) {
+ else => .unallocated,
+ 0b00000 => .{ .blr = inst.blr },
+ },
+ 0b0010 => switch (inst.group.op4) {
+ else => .unallocated,
+ 0b00000 => .{ .ret = inst.ret },
+ },
+ else => .unallocated,
+ },
+ };
+ }
+ };
+
+ /// Unconditional branch (immediate)
+ pub const UnconditionalBranchImmediate = packed union {
+ group: @This().Group,
+ b: B,
+ bl: Bl,
+
+ pub const Group = packed struct {
+ imm26: i26,
+ decoded26: u5 = 0b00101,
+ op: Op,
+ };
+
+ /// C6.2.25 B
+ pub const B = packed struct {
+ imm26: i26,
+ decoded26: u5 = 0b00101,
+ op: Op = .b,
+ };
+
+ /// C6.2.34 BL
+ pub const Bl = packed struct {
+ imm26: i26,
+ decoded26: u5 = 0b00101,
+ op: Op = .bl,
+ };
+
+ pub const Op = enum(u1) {
+ b = 0b0,
+ bl = 0b1,
+ };
+ };
+
+ /// Compare and branch (immediate)
+ pub const CompareBranchImmediate = packed union {
+ group: @This().Group,
+ cbz: Cbz,
+ cbnz: Cbnz,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ imm19: i19,
+ op: Op,
+ decoded25: u6 = 0b011010,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.47 CBZ
+ pub const Cbz = packed struct {
+ Rt: Register.Encoded,
+ imm19: i19,
+ op: Op = .cbz,
+ decoded25: u6 = 0b011010,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.46 CBNZ
+ pub const Cbnz = packed struct {
+ Rt: Register.Encoded,
+ imm19: i19,
+ op: Op = .cbnz,
+ decoded25: u6 = 0b011010,
+ sf: Register.IntegerSize,
+ };
+
+ pub const Op = enum(u1) {
+ cbz = 0b0,
+ cbnz = 0b1,
+ };
+ };
+
+ /// Test and branch (immediate)
+ pub const TestBranchImmediate = packed union {
+ group: @This().Group,
+ tbz: Tbz,
+ tbnz: Tbnz,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ imm14: i14,
+ b40: u5,
+ op: Op,
+ decoded25: u6 = 0b011011,
+ b5: u1,
+ };
+
+ /// C6.2.375 TBZ
+ pub const Tbz = packed struct {
+ Rt: Register.Encoded,
+ imm14: i14,
+ b40: u5,
+ op: Op = .tbz,
+ decoded25: u6 = 0b011011,
+ b5: u1,
+ };
+
+ /// C6.2.374 TBNZ
+ pub const Tbnz = packed struct {
+ Rt: Register.Encoded,
+ imm14: i14,
+ b40: u5,
+ op: Op = .tbnz,
+ decoded25: u6 = 0b011011,
+ b5: u1,
+ };
+
+ pub const Op = enum(u1) {
+ tbz = 0b0,
+ tbnz = 0b1,
+ };
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ conditional_branch_immediate: ConditionalBranchImmediate,
+ exception_generating: ExceptionGenerating,
+ system_register_argument: SystemRegisterArgument,
+ hints: Hints,
+ barriers: Barriers,
+ pstate: Pstate,
+ system_result: SystemResult,
+ system: System,
+ system_register_move: SystemRegisterMove,
+ unconditional_branch_register: UnconditionalBranchRegister,
+ unconditional_branch_immediate: UnconditionalBranchImmediate,
+ compare_branch_immediate: CompareBranchImmediate,
+ test_branch_immediate: TestBranchImmediate,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.op0) {
+ 0b010 => switch (inst.group.op1) {
+ 0b000000000000000...0b01111111111111 => .{ .conditional_branch_immediate = inst.conditional_branch_immediate },
+ else => .unallocated,
+ },
+ 0b110 => switch (inst.group.op1) {
+ 0b00000000000000...0b00111111111111 => .{ .exception_generating = inst.exception_generating },
+ 0b01000000110001 => .{ .system_register_argument = inst.system_register_argument },
+ 0b01000000110010 => switch (inst.group.op2) {
+ 0b11111 => .{ .hints = inst.hints },
+ else => .unallocated,
+ },
+ 0b01000000110011 => .{ .barriers = inst.barriers },
+ 0b01000000000100,
+ 0b01000000010100,
+ 0b01000000100100,
+ 0b01000000110100,
+ 0b01000001000100,
+ 0b01000001010100,
+ 0b01000001100100,
+ 0b01000001110100,
+ => .{ .pstate = inst.pstate },
+ 0b01001000000000...0b01001001111111 => .{ .system_result = inst.system_result },
+ 0b01000010000000...0b01000011111111, 0b01001010000000...0b01001011111111 => .{ .system = inst.system },
+ 0b01000100000000...0b01000111111111, 0b01001100000000...0b01001111111111 => .{ .system_register_move = inst.system_register_move },
+ 0b10000000000000...0b11111111111111 => .{ .unconditional_branch_register = inst.unconditional_branch_register },
+ else => .unallocated,
+ },
+ 0b000, 0b100 => .{ .unconditional_branch_immediate = inst.unconditional_branch_immediate },
+ 0b001, 0b101 => switch (inst.group.op1) {
+ 0b00000000000000...0b01111111111111 => .{ .compare_branch_immediate = inst.compare_branch_immediate },
+ 0b10000000000000...0b11111111111111 => .{ .test_branch_immediate = inst.test_branch_immediate },
+ },
+ else => .unallocated,
+ };
+ }
+ };
+
+ /// C4.1.88 Loads and Stores
+ pub const LoadStore = packed union {
+ group: @This().Group,
+ register_literal: RegisterLiteral,
+ memory: Memory,
+ no_allocate_pair_offset: NoAllocatePairOffset,
+ register_pair_post_indexed: RegisterPairPostIndexed,
+ register_pair_offset: RegisterPairOffset,
+ register_pair_pre_indexed: RegisterPairPreIndexed,
+ register_unscaled_immediate: RegisterUnscaledImmediate,
+ register_immediate_post_indexed: RegisterImmediatePostIndexed,
+ register_unprivileged: RegisterUnprivileged,
+ register_immediate_pre_indexed: RegisterImmediatePreIndexed,
+ register_register_offset: RegisterRegisterOffset,
+ register_unsigned_immediate: RegisterUnsignedImmediate,
+
+ /// Table C4-89 Encoding table for the Loads and Stores group
+ pub const Group = packed struct {
+ encoded0: u10,
+ op4: u2,
+ encoded12: u4,
+ op3: u6,
+ encoded22: u1,
+ op2: u2,
+ decoded25: u1 = 0b0,
+ op1: bool,
+ decoded27: u1 = 0b1,
+ op0: u4,
+ };
+
+ /// Load register (literal)
+ pub const RegisterLiteral = packed union {
+ group: @This().Group,
+ integer: Integer,
+ vector: Vector,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ imm19: i19,
+ decoded24: u2 = 0b00,
+ V: bool,
+ decoded27: u3 = 0b011,
+ opc: u2,
+ };
+
+ pub const Integer = packed union {
+ group: @This().Group,
+ ldr: Ldr,
+ ldrsw: Ldrsw,
+ prfm: Prfm,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ imm19: i19,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b011,
+ opc: u2,
+ };
+
+ /// C6.2.167 LDR (literal)
+ pub const Ldr = packed struct {
+ Rt: Register.Encoded,
+ imm19: i19,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b011,
+ sf: Register.IntegerSize,
+ opc1: u1 = 0b0,
+ };
+
+ /// C6.2.179 LDRSW (literal)
+ pub const Ldrsw = packed struct {
+ Rt: Register.Encoded,
+ imm19: i19,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b011,
+ opc: u2 = 0b10,
+ };
+
+ /// C6.2.248 PRFM (literal)
+ pub const Prfm = packed struct {
+ prfop: PrfOp,
+ imm19: i19,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b011,
+ opc: u2 = 0b11,
+ };
+ };
+
+ pub const Vector = packed union {
+ group: @This().Group,
+ ldr: Ldr,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ imm19: i19,
+ decoded24: u2 = 0b00,
+ V: bool = true,
+ decoded27: u3 = 0b011,
+ opc: VectorSize,
+ };
+
+ /// C7.2.192 LDR (literal, SIMD&FP)
+ pub const Ldr = packed struct {
+ Rt: Register.Encoded,
+ imm19: i19,
+ decoded24: u2 = 0b00,
+ V: bool = true,
+ decoded27: u3 = 0b011,
+ opc: VectorSize,
+ };
+ };
+
+ pub const Decoded = union(enum) {
+ integer: Integer,
+ vector: Vector,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.V) {
+ false => .{ .integer = inst.integer },
+ true => .{ .vector = inst.vector },
+ };
+ }
+ };
+
+ /// Memory Copy and Memory Set
+ pub const Memory = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b01,
+ op2: u4,
+ Rs: Register.Encoded,
+ decoded21: u1 = 0b0,
+ op1: u2,
+ decoded24: u2 = 0b01,
+ o0: u1,
+ decoded27: u3 = 0b011,
+ size: IntegerSize,
+ };
+
+ /// Load/store no-allocate pair (offset)
+ pub const NoAllocatePairOffset = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L,
+ decoded23: u3 = 0b000,
+ V: bool,
+ decoded27: u3 = 0b101,
+ opc: u2,
+ };
+
+ /// Load/store register pair (post-indexed)
+ pub const RegisterPairPostIndexed = packed union {
+ group: @This().Group,
+ integer: Integer,
+ vector: Vector,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L,
+ decoded23: u3 = 0b001,
+ V: bool,
+ decoded27: u3 = 0b101,
+ opc: u2,
+ };
+
+ pub const Integer = packed union {
+ group: @This().Group,
+ stp: Stp,
+ ldp: Ldp,
+ ldpsw: Ldpsw,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L,
+ decoded23: u3 = 0b001,
+ V: bool = false,
+ decoded27: u3 = 0b101,
+ opc: u2,
+ };
+
+ /// C6.2.321 STP
+ pub const Stp = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L = .store,
+ decoded23: u3 = 0b001,
+ V: bool = false,
+ decoded27: u3 = 0b101,
+ opc0: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.164 LDP
+ pub const Ldp = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L = .load,
+ decoded23: u3 = 0b001,
+ V: bool = false,
+ decoded27: u3 = 0b101,
+ opc0: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.165 LDPSW
+ pub const Ldpsw = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L = .load,
+ decoded23: u3 = 0b001,
+ V: bool = false,
+ decoded27: u3 = 0b101,
+ opc: u2 = 0b01,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ stp: Stp,
+ ldp: Ldp,
+ ldpsw: Ldpsw,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.opc) {
+ 0b00, 0b10 => switch (inst.group.L) {
+ .store => .{ .stp = inst.stp },
+ .load => .{ .ldp = inst.ldp },
+ },
+ 0b01 => switch (inst.group.L) {
+ else => .unallocated,
+ .load => .{ .ldpsw = inst.ldpsw },
+ },
+ else => .unallocated,
+ };
+ }
+ };
+
+ pub const Vector = packed union {
+ group: @This().Group,
+ stp: Stp,
+ ldp: Ldp,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L,
+ decoded23: u3 = 0b001,
+ V: bool = true,
+ decoded27: u3 = 0b101,
+ opc: VectorSize,
+ };
+
+ /// C7.2.330 STP (SIMD&FP)
+ pub const Stp = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L = .store,
+ decoded23: u3 = 0b001,
+ V: bool = true,
+ decoded27: u3 = 0b101,
+ opc: VectorSize,
+ };
+
+ /// C7.2.190 LDP (SIMD&FP)
+ pub const Ldp = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L = .load,
+ decoded23: u3 = 0b001,
+ V: bool = true,
+ decoded27: u3 = 0b101,
+ opc: VectorSize,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ stp: Stp,
+ ldp: Ldp,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.opc) {
+ .single, .double, .quad => switch (inst.group.L) {
+ .store => .{ .stp = inst.stp },
+ .load => .{ .ldp = inst.ldp },
+ },
+ _ => .unallocated,
+ };
+ }
+ };
+
+ pub const Decoded = union(enum) {
+ integer: Integer,
+ vector: Vector,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.V) {
+ false => .{ .integer = inst.integer },
+ true => .{ .vector = inst.vector },
+ };
+ }
+ };
+
+ /// Load/store register pair (offset)
+ pub const RegisterPairOffset = packed union {
+ group: @This().Group,
+ integer: Integer,
+ vector: Vector,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L,
+ decoded23: u3 = 0b010,
+ V: bool,
+ decoded27: u3 = 0b101,
+ opc: u2,
+ };
+
+ pub const Integer = packed union {
+ group: @This().Group,
+ stp: Stp,
+ ldp: Ldp,
+ ldpsw: Ldpsw,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L,
+ decoded23: u3 = 0b010,
+ V: bool = false,
+ decoded27: u3 = 0b101,
+ opc: u2,
+ };
+
+ /// C6.2.321 STP
+ pub const Stp = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L = .store,
+ decoded23: u3 = 0b010,
+ V: bool = false,
+ decoded27: u3 = 0b101,
+ opc0: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.164 LDP
+ pub const Ldp = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L = .load,
+ decoded23: u3 = 0b010,
+ V: bool = false,
+ decoded27: u3 = 0b101,
+ opc0: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.165 LDPSW
+ pub const Ldpsw = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L = .load,
+ decoded23: u3 = 0b010,
+ V: bool = false,
+ decoded27: u3 = 0b101,
+ opc: u2 = 0b01,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ stp: Stp,
+ ldp: Ldp,
+ ldpsw: Ldpsw,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.opc) {
+ 0b00, 0b10 => switch (inst.group.L) {
+ .store => .{ .stp = inst.stp },
+ .load => .{ .ldp = inst.ldp },
+ },
+ 0b01 => switch (inst.group.L) {
+ else => .unallocated,
+ .load => .{ .ldpsw = inst.ldpsw },
+ },
+ else => .unallocated,
+ };
+ }
+ };
+
+ pub const Vector = packed union {
+ group: @This().Group,
+ stp: Stp,
+ ldp: Ldp,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L,
+ decoded23: u3 = 0b010,
+ V: bool = true,
+ decoded27: u3 = 0b101,
+ opc: VectorSize,
+ };
+
+ /// C7.2.330 STP (SIMD&FP)
+ pub const Stp = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L = .store,
+ decoded23: u3 = 0b010,
+ V: bool = true,
+ decoded27: u3 = 0b101,
+ opc: VectorSize,
+ };
+
+ /// C7.2.190 LDP (SIMD&FP)
+ pub const Ldp = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L = .load,
+ decoded23: u3 = 0b010,
+ V: bool = true,
+ decoded27: u3 = 0b101,
+ opc: VectorSize,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ stp: Stp,
+ ldp: Ldp,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.opc) {
+ .single, .double, .quad => switch (inst.group.L) {
+ .store => .{ .stp = inst.stp },
+ .load => .{ .ldp = inst.ldp },
+ },
+ _ => .unallocated,
+ };
+ }
+ };
+
+ pub const Decoded = union(enum) {
+ integer: Integer,
+ vector: Vector,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.V) {
+ false => .{ .integer = inst.integer },
+ true => .{ .vector = inst.vector },
+ };
+ }
+ };
+
+ /// Load/store register pair (pre-indexed)
+ pub const RegisterPairPreIndexed = packed union {
+ group: @This().Group,
+ integer: Integer,
+ vector: Vector,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L,
+ decoded23: u3 = 0b011,
+ V: bool,
+ decoded27: u3 = 0b101,
+ opc: u2,
+ };
+
+ pub const Integer = packed union {
+ group: @This().Group,
+ stp: Stp,
+ ldp: Ldp,
+ ldpsw: Ldpsw,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L,
+ decoded23: u3 = 0b011,
+ V: bool = false,
+ decoded27: u3 = 0b101,
+ opc: u2,
+ };
+
+ /// C6.2.321 STP
+ pub const Stp = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L = .store,
+ decoded23: u3 = 0b011,
+ V: bool = false,
+ decoded27: u3 = 0b101,
+ opc0: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.164 LDP
+ pub const Ldp = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L = .load,
+ decoded23: u3 = 0b011,
+ V: bool = false,
+ decoded27: u3 = 0b101,
+ opc0: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.165 LDPSW
+ pub const Ldpsw = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L = .load,
+ decoded23: u3 = 0b011,
+ V: bool = false,
+ decoded27: u3 = 0b101,
+ opc0: u2 = 0b01,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ stp: Stp,
+ ldp: Ldp,
+ ldpsw: Ldpsw,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.opc) {
+ 0b00, 0b10 => switch (inst.group.L) {
+ .store => .{ .stp = inst.stp },
+ .load => .{ .ldp = inst.ldp },
+ },
+ 0b01 => switch (inst.group.L) {
+ else => .unallocated,
+ .load => .{ .ldpsw = inst.ldpsw },
+ },
+ else => .unallocated,
+ };
+ }
+ };
+
+ pub const Vector = packed union {
+ group: @This().Group,
+ stp: Stp,
+ ldp: Ldp,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L,
+ decoded23: u3 = 0b011,
+ V: bool = true,
+ decoded27: u3 = 0b101,
+ opc: VectorSize,
+ };
+
+ /// C7.2.330 STP (SIMD&FP)
+ pub const Stp = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L = .store,
+ decoded23: u3 = 0b011,
+ V: bool = true,
+ decoded27: u3 = 0b101,
+ opc: VectorSize,
+ };
+
+ /// C7.2.190 LDP (SIMD&FP)
+ pub const Ldp = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ Rt2: Register.Encoded,
+ imm7: i7,
+ L: L = .load,
+ decoded23: u3 = 0b011,
+ V: bool = true,
+ decoded27: u3 = 0b101,
+ opc: VectorSize,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ stp: Stp,
+ ldp: Ldp,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.opc) {
+ .single, .double, .quad => switch (inst.group.L) {
+ .store => .{ .stp = inst.stp },
+ .load => .{ .ldp = inst.ldp },
+ },
+ _ => .unallocated,
+ };
+ }
+ };
+
+ pub const Decoded = union(enum) {
+ integer: Integer,
+ vector: Vector,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.V) {
+ false => .{ .integer = inst.integer },
+ true => .{ .vector = inst.vector },
+ };
+ }
+ };
+
+ /// Load/store register (unscaled immediate)
+ pub const RegisterUnscaledImmediate = packed union {
+ group: @This().Group,
+ integer: Integer,
+ vector: Vector,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b00,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2,
+ decoded24: u2 = 0b00,
+ V: bool,
+ decoded27: u3 = 0b111,
+ size: u2,
+ };
+
+ pub const Integer = packed union {
+ group: @This().Group,
+ sturb: Sturb,
+ ldurb: Ldurb,
+ ldursb: Ldursb,
+ sturh: Sturh,
+ ldurh: Ldurh,
+ ldursh: Ldursh,
+ stur: Stur,
+ ldur: Ldur,
+ ldursw: Ldursw,
+ prfum: Prfum,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b00,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize,
+ };
+
+ /// C6.2.347 STURB
+ pub const Sturb = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b00,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2 = 0b00,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .byte,
+ };
+
+ /// C6.2.203 LDURB
+ pub const Ldurb = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b00,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2 = 0b01,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .byte,
+ };
+
+ /// C6.2.205 LDURSB
+ pub const Ldursb = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b00,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc0: u1,
+ opc1: u1 = 0b1,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .byte,
+ };
+
+ /// C6.2.348 STURH
+ pub const Sturh = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b00,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2 = 0b00,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .halfword,
+ };
+
+ /// C6.2.204 LDURH
+ pub const Ldurh = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b00,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2 = 0b01,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .halfword,
+ };
+
+ /// C6.2.206 LDURSH
+ pub const Ldursh = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b00,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc0: u1,
+ opc1: u1 = 0b1,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .halfword,
+ };
+
+ /// C6.2.346 STUR
+ pub const Stur = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b00,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2 = 0b00,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ sf: Register.IntegerSize,
+ size1: u1 = 0b1,
+ };
+
+ /// C6.2.202 LDUR
+ pub const Ldur = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b00,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2 = 0b01,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ sf: Register.IntegerSize,
+ size1: u1 = 0b1,
+ };
+
+ /// C6.2.207 LDURSW
+ pub const Ldursw = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b00,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2 = 0b10,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .word,
+ };
+
+ /// C6.2.250 PRFUM
+ pub const Prfum = packed struct {
+ prfop: PrfOp,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b00,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2 = 0b10,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .doubleword,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ sturb: Sturb,
+ ldurb: Ldurb,
+ ldursb: Ldursb,
+ sturh: Sturh,
+ ldurh: Ldurh,
+ ldursh: Ldursh,
+ stur: Stur,
+ ldur: Ldur,
+ ldursw: Ldursw,
+ prfum: Prfum,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.size) {
+ .byte => switch (inst.group.V) {
+ false => switch (inst.group.opc) {
+ 0b00 => .{ .sturb = inst.sturb },
+ 0b01 => .{ .ldurb = inst.ldurb },
+ 0b10, 0b11 => .{ .ldursb = inst.ldursb },
+ },
+ true => .unallocated,
+ },
+ .halfword => switch (inst.group.V) {
+ false => switch (inst.group.opc) {
+ 0b00 => .{ .sturh = inst.sturh },
+ 0b01 => .{ .ldurh = inst.ldurh },
+ 0b10, 0b11 => .{ .ldursh = inst.ldursh },
+ },
+ true => .unallocated,
+ },
+ .word => switch (inst.group.V) {
+ false => switch (inst.group.opc) {
+ 0b00 => .{ .stur = inst.stur },
+ 0b01 => .{ .ldur = inst.ldur },
+ 0b10 => .{ .ldursw = inst.ldursw },
+ 0b11 => .unallocated,
+ },
+ true => .unallocated,
+ },
+ .doubleword => switch (inst.group.V) {
+ false => switch (inst.group.opc) {
+ 0b00 => .{ .stur = inst.stur },
+ 0b01 => .{ .ldur = inst.ldur },
+ 0b10 => .{ .prfum = inst.prfum },
+ 0b11 => .unallocated,
+ },
+ true => .unallocated,
+ },
+ };
+ }
+ };
+
+ pub const Vector = packed union {
+ group: @This().Group,
+ stur: Stur,
+ ldur: Ldur,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b00,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc0: L,
+ opc1: Opc1,
+ decoded24: u2 = 0b00,
+ V: bool = true,
+ decoded27: u3 = 0b111,
+ size: Size,
+ };
+
+ /// C7.2.333 STUR (SIMD&FP)
+ pub const Stur = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b00,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc0: L = .store,
+ opc1: Opc1,
+ decoded24: u2 = 0b00,
+ V: bool = true,
+ decoded27: u3 = 0b111,
+ size: Size,
+ };
+
+ /// C7.2.194 LDUR (SIMD&FP)
+ pub const Ldur = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b00,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc0: L = .load,
+ opc1: Opc1,
+ decoded24: u2 = 0b00,
+ V: bool = true,
+ decoded27: u3 = 0b111,
+ size: Size,
+ };
+
+ pub const Opc1 = packed struct {
+ encoded: u1,
+
+ pub fn encode(vs: Register.VectorSize) Opc1 {
+ return .{ .encoded = switch (vs) {
+ .byte, .half, .single, .double => 0b0,
+ .quad => 0b1,
+ else => unreachable,
+ } };
+ }
+
+ pub fn decode(enc_opc1: Opc1, enc_size: Size) Register.VectorSize {
+ return switch (enc_size.encoded) {
+ 0b00 => switch (enc_opc1.encoded) {
+ 0b0 => .byte,
+ 0b1 => .quad,
+ },
+ 0b01 => switch (enc_opc1.encoded) {
+ 0b0 => .half,
+ 0b1 => unreachable,
+ },
+ 0b10 => switch (enc_opc1.encoded) {
+ 0b0 => .single,
+ 0b1 => unreachable,
+ },
+ 0b11 => switch (enc_opc1.encoded) {
+ 0b0 => .double,
+ 0b1 => unreachable,
+ },
+ };
+ }
+ };
+
+ pub const Size = packed struct {
+ encoded: u2,
+
+ pub fn encode(vs: Register.VectorSize) Size {
+ return .{ .encoded = switch (vs) {
+ .byte, .quad => 0b00,
+ .half => 0b01,
+ .single => 0b10,
+ .double => 0b11,
+ else => unreachable,
+ } };
+ }
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ stur: Stur,
+ ldur: Ldur,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.size.encoded) {
+ 0b00 => switch (inst.group.opc0) {
+ .store => .{ .stur = inst.stur },
+ .load => .{ .ldur = inst.ldur },
+ },
+ 0b01, 0b10, 0b11 => switch (inst.group.opc1.encoded) {
+ 0b0 => switch (inst.group.opc0) {
+ .store => .{ .stur = inst.stur },
+ .load => .{ .ldur = inst.ldur },
+ },
+ 0b1 => .unallocated,
+ },
+ };
+ }
+ };
+
+ pub const Decoded = union(enum) {
+ integer: Integer,
+ vector: Vector,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.V) {
+ false => .{ .integer = inst.integer },
+ true => .{ .vector = inst.vector },
+ };
+ }
+ };
+
+ /// Load/store register (immediate post-indexed)
+ pub const RegisterImmediatePostIndexed = packed union {
+ group: @This().Group,
+ integer: Integer,
+ vector: Vector,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b01,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2,
+ decoded24: u2 = 0b00,
+ V: bool,
+ decoded27: u3 = 0b111,
+ size: u2,
+ };
+
+ pub const Integer = packed union {
+ group: @This().Group,
+ strb: Strb,
+ ldrb: Ldrb,
+ ldrsb: Ldrsb,
+ strh: Strh,
+ ldrh: Ldrh,
+ ldrsh: Ldrsh,
+ str: Str,
+ ldr: Ldr,
+ ldrsw: Ldrsw,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b01,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize,
+ };
+
+ /// C6.2.324 STRB (immediate)
+ pub const Strb = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b01,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2 = 0b00,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .byte,
+ };
+
+ /// C6.2.170 LDRB (immediate)
+ pub const Ldrb = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b01,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2 = 0b01,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .byte,
+ };
+
+ /// C6.2.174 LDRSB (immediate)
+ pub const Ldrsb = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b01,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc0: u1,
+ opc1: u1 = 0b1,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .byte,
+ };
+
+ /// C6.2.326 STRH (immediate)
+ pub const Strh = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b01,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2 = 0b00,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .halfword,
+ };
+
+ /// C6.2.172 LDRH (immediate)
+ pub const Ldrh = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b01,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2 = 0b01,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .halfword,
+ };
+
+ /// C6.2.176 LDRSH (immediate)
+ pub const Ldrsh = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b01,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc0: u1,
+ opc1: u1 = 0b1,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .halfword,
+ };
+
+ /// C6.2.322 STR (immediate)
+ pub const Str = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b01,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2 = 0b00,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ sf: Register.IntegerSize,
+ size1: u1 = 0b1,
+ };
+
+ /// C6.2.166 LDR (immediate)
+ pub const Ldr = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b01,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2 = 0b01,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ sf: Register.IntegerSize,
+ size1: u1 = 0b1,
+ };
+
+ /// C6.2.178 LDRSW (immediate)
+ pub const Ldrsw = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b01,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2 = 0b10,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .word,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ strb: Strb,
+ ldrb: Ldrb,
+ ldrsb: Ldrsb,
+ strh: Strh,
+ ldrh: Ldrh,
+ ldrsh: Ldrsh,
+ str: Str,
+ ldr: Ldr,
+ ldrsw: Ldrsw,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.size) {
+ .byte => switch (inst.group.V) {
+ false => switch (inst.group.opc) {
+ 0b00 => .{ .strb = inst.strb },
+ 0b01 => .{ .ldrb = inst.ldrb },
+ 0b10, 0b11 => .{ .ldrsb = inst.ldrsb },
+ },
+ true => .unallocated,
+ },
+ .halfword => switch (inst.group.V) {
+ false => switch (inst.group.opc) {
+ 0b00 => .{ .strh = inst.strh },
+ 0b01 => .{ .ldrh = inst.ldrh },
+ 0b10, 0b11 => .{ .ldrsh = inst.ldrsh },
+ },
+ true => .unallocated,
+ },
+ .word => switch (inst.group.V) {
+ false => switch (inst.group.opc) {
+ 0b00 => .{ .str = inst.str },
+ 0b01 => .{ .ldr = inst.ldr },
+ 0b10 => .{ .ldrsw = inst.ldrsw },
+ 0b11 => .unallocated,
+ },
+ true => .unallocated,
+ },
+ .doubleword => switch (inst.group.V) {
+ false => switch (inst.group.opc) {
+ 0b00 => .{ .str = inst.str },
+ 0b01 => .{ .ldr = inst.ldr },
+ 0b10, 0b11 => .unallocated,
+ },
+ true => .unallocated,
+ },
+ };
+ }
+ };
+
+ pub const Vector = packed union {
+ group: @This().Group,
+ str: Str,
+ ldr: Ldr,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b01,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc0: L,
+ opc1: Opc1,
+ decoded24: u2 = 0b00,
+ V: bool = true,
+ decoded27: u3 = 0b111,
+ size: Size,
+ };
+
+ /// C7.2.331 STR (immediate, SIMD&FP)
+ pub const Str = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b01,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc0: L = .store,
+ opc1: Opc1,
+ decoded24: u2 = 0b00,
+ V: bool = true,
+ decoded27: u3 = 0b111,
+ size: Size,
+ };
+
+ /// C7.2.191 LDR (immediate, SIMD&FP)
+ pub const Ldr = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b01,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc0: L = .load,
+ opc1: Opc1,
+ decoded24: u2 = 0b00,
+ V: bool = true,
+ decoded27: u3 = 0b111,
+ size: Size,
+ };
+
+ pub const Opc1 = packed struct {
+ encoded: u1,
+
+ pub fn encode(vs: Register.VectorSize) Opc1 {
+ return .{ .encoded = switch (vs) {
+ .byte, .half, .single, .double => 0b0,
+ .quad => 0b1,
+ else => unreachable,
+ } };
+ }
+
+ pub fn decode(enc_opc1: Opc1, enc_size: Size) Register.VectorSize {
+ return switch (enc_size.encoded) {
+ 0b00 => switch (enc_opc1.encoded) {
+ 0b0 => .byte,
+ 0b1 => .quad,
+ },
+ 0b01 => switch (enc_opc1.encoded) {
+ 0b0 => .half,
+ 0b1 => unreachable,
+ },
+ 0b10 => switch (enc_opc1.encoded) {
+ 0b0 => .single,
+ 0b1 => unreachable,
+ },
+ 0b11 => switch (enc_opc1.encoded) {
+ 0b0 => .double,
+ 0b1 => unreachable,
+ },
+ };
+ }
+ };
+
+ pub const Size = packed struct {
+ encoded: u2,
+
+ pub fn encode(vs: Register.VectorSize) Size {
+ return .{ .encoded = switch (vs) {
+ .byte, .quad => 0b00,
+ .half => 0b01,
+ .single => 0b10,
+ .double => 0b11,
+ else => unreachable,
+ } };
+ }
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ str: Str,
+ ldr: Ldr,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.size.encoded) {
+ 0b00 => switch (inst.group.opc0) {
+ .store => .{ .str = inst.str },
+ .load => .{ .ldr = inst.ldr },
+ },
+ 0b01, 0b10, 0b11 => switch (inst.group.opc1.encoded) {
+ 0b0 => switch (inst.group.opc0) {
+ .store => .{ .str = inst.str },
+ .load => .{ .ldr = inst.ldr },
+ },
+ 0b1 => .unallocated,
+ },
+ };
+ }
+ };
+
+ pub const Decoded = union(enum) {
+ integer: Integer,
+ vector: Vector,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.V) {
+ false => .{ .integer = inst.integer },
+ true => .{ .vector = inst.vector },
+ };
+ }
+ };
+
+ /// Load/store register (unprivileged)
+ pub const RegisterUnprivileged = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2,
+ decoded24: u2 = 0b00,
+ V: bool,
+ decoded27: u3 = 0b111,
+ size: IntegerSize,
+ };
+
+ /// Load/store register (immediate pre-indexed)
+ pub const RegisterImmediatePreIndexed = packed union {
+ group: @This().Group,
+ integer: Integer,
+ vector: Vector,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b11,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2,
+ decoded24: u2 = 0b00,
+ V: bool,
+ decoded27: u3 = 0b111,
+ size: u2,
+ };
+
+ pub const Integer = packed union {
+ group: @This().Group,
+ strb: Strb,
+ ldrb: Ldrb,
+ ldrsb: Ldrsb,
+ strh: Strh,
+ ldrh: Ldrh,
+ ldrsh: Ldrsh,
+ str: Str,
+ ldr: Ldr,
+ ldrsw: Ldrsw,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b11,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize,
+ };
+
+ /// C6.2.324 STRB (immediate)
+ pub const Strb = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b11,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2 = 0b00,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .byte,
+ };
+
+ /// C6.2.170 LDRB (immediate)
+ pub const Ldrb = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b11,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2 = 0b01,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .byte,
+ };
+
+ /// C6.2.174 LDRSB (immediate)
+ pub const Ldrsb = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b11,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc0: u1,
+ opc1: u1 = 0b1,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .byte,
+ };
+
+ /// C6.2.326 STRH (immediate)
+ pub const Strh = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b11,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2 = 0b00,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .halfword,
+ };
+
+ /// C6.2.172 LDRH (immediate)
+ pub const Ldrh = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b11,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2 = 0b01,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .halfword,
+ };
+
+ /// C6.2.176 LDRSH (immediate)
+ pub const Ldrsh = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b11,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc0: u1,
+ opc1: u1 = 0b1,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .halfword,
+ };
+
+ /// C6.2.322 STR (immediate)
+ pub const Str = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b11,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2 = 0b00,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ sf: Register.IntegerSize,
+ size1: u1 = 0b1,
+ };
+
+ /// C6.2.166 LDR (immediate)
+ pub const Ldr = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b11,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2 = 0b01,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ sf: Register.IntegerSize,
+ size1: u1 = 0b1,
+ };
+
+ /// C6.2.178 LDRSW (immediate)
+ pub const Ldrsw = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b11,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc: u2 = 0b10,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .word,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ strb: Strb,
+ ldrb: Ldrb,
+ ldrsb: Ldrsb,
+ strh: Strh,
+ ldrh: Ldrh,
+ ldrsh: Ldrsh,
+ str: Str,
+ ldr: Ldr,
+ ldrsw: Ldrsw,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.size) {
+ .byte => switch (inst.group.opc) {
+ 0b00 => .{ .strb = inst.strb },
+ 0b01 => .{ .ldrb = inst.ldrb },
+ 0b10, 0b11 => .{ .ldrsb = inst.ldrsb },
+ },
+ .halfword => switch (inst.group.opc) {
+ 0b00 => .{ .strh = inst.strh },
+ 0b01 => .{ .ldrh = inst.ldrh },
+ 0b10, 0b11 => .{ .ldrsh = inst.ldrsh },
+ },
+ .word => switch (inst.group.opc) {
+ 0b00 => .{ .str = inst.str },
+ 0b01 => .{ .ldr = inst.ldr },
+ 0b10 => .{ .ldrsw = inst.ldrsw },
+ 0b11 => .unallocated,
+ },
+ .doubleword => switch (inst.group.opc) {
+ 0b00 => .{ .str = inst.str },
+ 0b01 => .{ .ldr = inst.ldr },
+ 0b10, 0b11 => .unallocated,
+ },
+ };
+ }
+ };
+
+ pub const Vector = packed union {
+ group: @This().Group,
+ str: Str,
+ ldr: Ldr,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b11,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc0: L,
+ opc1: Opc1,
+ decoded24: u2 = 0b00,
+ V: bool = true,
+ decoded27: u3 = 0b111,
+ size: Size,
+ };
+
+ /// C7.2.331 STR (immediate, SIMD&FP)
+ pub const Str = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b11,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc0: L = .store,
+ opc1: Opc1,
+ decoded24: u2 = 0b00,
+ V: bool = true,
+ decoded27: u3 = 0b111,
+ size: Size,
+ };
+
+ /// C7.2.191 LDR (immediate, SIMD&FP)
+ pub const Ldr = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b11,
+ imm9: i9,
+ decoded21: u1 = 0b0,
+ opc0: L = .load,
+ opc1: Opc1,
+ decoded24: u2 = 0b00,
+ V: bool = true,
+ decoded27: u3 = 0b111,
+ size: Size,
+ };
+
+ pub const Opc1 = packed struct {
+ encoded: u1,
+
+ pub fn encode(vs: Register.VectorSize) Opc1 {
+ return .{ .encoded = switch (vs) {
+ .byte, .half, .single, .double => 0b0,
+ .quad => 0b1,
+ else => unreachable,
+ } };
+ }
+
+ pub fn decode(enc_opc1: Opc1, enc_size: Size) Register.VectorSize {
+ return switch (enc_size.encoded) {
+ 0b00 => switch (enc_opc1.encoded) {
+ 0b0 => .byte,
+ 0b1 => .quad,
+ },
+ 0b01 => switch (enc_opc1.encoded) {
+ 0b0 => .half,
+ 0b1 => unreachable,
+ },
+ 0b10 => switch (enc_opc1.encoded) {
+ 0b0 => .single,
+ 0b1 => unreachable,
+ },
+ 0b11 => switch (enc_opc1.encoded) {
+ 0b0 => .double,
+ 0b1 => unreachable,
+ },
+ };
+ }
+ };
+
+ pub const Size = packed struct {
+ encoded: u2,
+
+ pub fn encode(vs: Register.VectorSize) Size {
+ return .{ .encoded = switch (vs) {
+ .byte, .quad => 0b00,
+ .half => 0b01,
+ .single => 0b10,
+ .double => 0b11,
+ else => unreachable,
+ } };
+ }
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ str: Str,
+ ldr: Ldr,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.size.encoded) {
+ 0b00 => switch (inst.group.opc0) {
+ .store => .{ .str = inst.str },
+ .load => .{ .ldr = inst.ldr },
+ },
+ 0b01, 0b10, 0b11 => switch (inst.group.opc1.encoded) {
+ 0b0 => switch (inst.group.opc0) {
+ .store => .{ .str = inst.str },
+ .load => .{ .ldr = inst.ldr },
+ },
+ 0b1 => .unallocated,
+ },
+ };
+ }
+ };
+
+ pub const Decoded = union(enum) {
+ integer: Integer,
+ vector: Vector,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.V) {
+ false => .{ .integer = inst.integer },
+ true => .{ .vector = inst.vector },
+ };
+ }
+ };
+
+ /// Load/store register (register offset)
+ pub const RegisterRegisterOffset = packed union {
+ group: @This().Group,
+ integer: Integer,
+ vector: Vector,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ S: bool,
+ option: Option,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ opc: u2,
+ decoded24: u2 = 0b00,
+ V: bool,
+ decoded27: u3 = 0b111,
+ size: u2,
+ };
+
+ pub const Integer = packed union {
+ group: @This().Group,
+ strb: Strb,
+ ldrb: Ldrb,
+ ldrsb: Ldrsb,
+ strh: Strh,
+ ldrh: Ldrh,
+ ldrsh: Ldrsh,
+ str: Str,
+ ldr: Ldr,
+ ldrsw: Ldrsw,
+ prfm: Prfm,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ S: bool,
+ option: Option,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ opc: u2,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize,
+ };
+
+ /// C6.2.325 STRB (register)
+ pub const Strb = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ S: bool,
+ option: Option,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ opc: u2 = 0b00,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .byte,
+ };
+
+ /// C6.2.171 LDRB (register)
+ pub const Ldrb = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ S: bool,
+ option: Option,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ opc: u2 = 0b01,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .byte,
+ };
+
+ /// C6.2.175 LDRSB (register)
+ pub const Ldrsb = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ S: bool,
+ option: Option,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ opc0: u1,
+ opc1: u1 = 0b1,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .byte,
+ };
+
+ /// C6.2.327 STRH (register)
+ pub const Strh = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ S: bool,
+ option: Option,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ opc: u2 = 0b00,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .halfword,
+ };
+
+ /// C6.2.173 LDRH (register)
+ pub const Ldrh = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ S: bool,
+ option: Option,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ opc: u2 = 0b01,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .halfword,
+ };
+
+ /// C6.2.177 LDRSH (register)
+ pub const Ldrsh = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ S: bool,
+ option: Option,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ opc0: u1,
+ opc1: u1 = 0b1,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .halfword,
+ };
+
+ /// C6.2.323 STR (register)
+ pub const Str = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ S: bool,
+ option: Option,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ opc: u2 = 0b00,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ sf: Register.IntegerSize,
+ size1: u1 = 0b1,
+ };
+
+ /// C6.2.168 LDR (register)
+ pub const Ldr = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ S: bool,
+ option: Option,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ opc: u2 = 0b01,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ sf: Register.IntegerSize,
+ size1: u1 = 0b1,
+ };
+
+ /// C6.2.180 LDRSW (register)
+ pub const Ldrsw = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ S: bool,
+ option: Option,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ opc: u2 = 0b10,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .word,
+ };
+
+ /// C6.2.249 PRFM (register)
+ pub const Prfm = packed struct {
+ prfop: PrfOp,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ S: bool,
+ option: Option,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ opc: u2 = 0b10,
+ decoded24: u2 = 0b00,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .doubleword,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ strb: Strb,
+ ldrb: Ldrb,
+ ldrsb: Ldrsb,
+ strh: Strh,
+ ldrh: Ldrh,
+ ldrsh: Ldrsh,
+ str: Str,
+ ldr: Ldr,
+ ldrsw: Ldrsw,
+ prfm: Prfm,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.size) {
+ .byte => switch (inst.group.V) {
+ false => switch (inst.group.opc) {
+ 0b00 => .{ .strb = inst.strb },
+ 0b01 => .{ .ldrb = inst.ldrb },
+ 0b10, 0b11 => .{ .ldrsb = inst.ldrsb },
+ },
+ true => .unallocated,
+ },
+ .halfword => switch (inst.group.V) {
+ false => switch (inst.group.opc) {
+ 0b00 => .{ .strh = inst.strh },
+ 0b01 => .{ .ldrh = inst.ldrh },
+ 0b10, 0b11 => .{ .ldrsh = inst.ldrsh },
+ },
+ true => .unallocated,
+ },
+ .word => switch (inst.group.V) {
+ false => switch (inst.group.opc) {
+ 0b00 => .{ .str = inst.str },
+ 0b01 => .{ .ldr = inst.ldr },
+ 0b10 => .{ .ldrsw = inst.ldrsw },
+ 0b11 => .unallocated,
+ },
+ true => .unallocated,
+ },
+ .doubleword => switch (inst.group.V) {
+ false => switch (inst.group.opc) {
+ 0b00 => .{ .str = inst.str },
+ 0b01 => .{ .ldr = inst.ldr },
+ 0b10 => .{ .prfm = inst.prfm },
+ 0b11 => .unallocated,
+ },
+ true => .unallocated,
+ },
+ };
+ }
+ };
+
+ pub const Vector = packed union {
+ group: @This().Group,
+ str: Str,
+ ldr: Ldr,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ S: bool,
+ option: Option,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ opc: u2,
+ decoded24: u2 = 0b00,
+ V: bool = true,
+ decoded27: u3 = 0b111,
+ size: Size,
+ };
+
+ /// C7.2.332 STR (register, SIMD&FP)
+ pub const Str = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ S: bool,
+ option: Option,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ opc0: L = .store,
+ opc1: Opc1,
+ decoded24: u2 = 0b00,
+ V: bool = true,
+ decoded27: u3 = 0b111,
+ size: Size,
+ };
+
+ /// C7.2.193 LDR (register, SIMD&FP)
+ pub const Ldr = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ S: bool,
+ option: Option,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ opc0: L = .load,
+ opc1: Opc1,
+ decoded24: u2 = 0b00,
+ V: bool = true,
+ decoded27: u3 = 0b111,
+ size: Size,
+ };
+
+ pub const Opc1 = packed struct {
+ encoded: u1,
+
+ pub fn encode(vs: Register.VectorSize) Opc1 {
+ return .{ .encoded = switch (vs) {
+ .byte, .half, .single, .double => 0b0,
+ .quad => 0b1,
+ else => unreachable,
+ } };
+ }
+
+ pub fn decode(enc_opc1: Opc1, enc_size: Size) Register.VectorSize {
+ return switch (enc_size.encoded) {
+ 0b00 => switch (enc_opc1.encoded) {
+ 0b0 => .byte,
+ 0b1 => .quad,
+ },
+ 0b01 => switch (enc_opc1.encoded) {
+ 0b0 => .half,
+ 0b1 => unreachable,
+ },
+ 0b10 => switch (enc_opc1.encoded) {
+ 0b0 => .single,
+ 0b1 => unreachable,
+ },
+ 0b11 => switch (enc_opc1.encoded) {
+ 0b0 => .double,
+ 0b1 => unreachable,
+ },
+ };
+ }
+ };
+
+ pub const Size = packed struct {
+ encoded: u2,
+
+ pub fn encode(vs: Register.VectorSize) Size {
+ return .{ .encoded = switch (vs) {
+ .byte, .quad => 0b00,
+ .half => 0b01,
+ .single => 0b10,
+ .double => 0b11,
+ else => unreachable,
+ } };
+ }
+ };
+ };
+
+ pub const Option = enum(u3) {
+ uxtw = 0b010,
+ lsl = 0b011,
+ sxtw = 0b110,
+ sxtx = 0b111,
+ _,
+
+ pub fn sf(option: Option) Register.IntegerSize {
+ return switch (option) {
+ .uxtw, .sxtw => .word,
+ .lsl, .sxtx => .doubleword,
+ _ => unreachable,
+ };
+ }
+ };
+
+ pub const Extend = union(Option) {
+ uxtw: Amount,
+ lsl: Amount,
+ sxtw: Amount,
+ sxtx: Amount,
+
+ pub const Amount = u3;
+ };
+
+ pub const Decoded = union(enum) {
+ integer: Integer,
+ vector: Vector,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.V) {
+ false => .{ .integer = inst.integer },
+ true => .{ .vector = inst.vector },
+ };
+ }
+ };
+
+ /// Load/store register (unsigned immediate)
+ pub const RegisterUnsignedImmediate = packed union {
+ group: @This().Group,
+ integer: Integer,
+ vector: Vector,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ imm12: u12,
+ opc: u2,
+ decoded24: u2 = 0b01,
+ V: bool,
+ decoded27: u3 = 0b111,
+ size: u2,
+ };
+
+ pub const Integer = packed union {
+ group: @This().Group,
+ strb: Strb,
+ ldrb: Ldrb,
+ ldrsb: Ldrsb,
+ strh: Strh,
+ ldrh: Ldrh,
+ ldrsh: Ldrsh,
+ str: Str,
+ ldr: Ldr,
+ ldrsw: Ldrsw,
+ prfm: Prfm,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ imm12: u12,
+ opc: u2,
+ decoded24: u2 = 0b01,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize,
+ };
+
+ /// C6.2.324 STRB (immediate)
+ pub const Strb = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ imm12: u12,
+ opc: u2 = 0b00,
+ decoded24: u2 = 0b01,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .byte,
+ };
+
+ /// C6.2.170 LDRB (immediate)
+ pub const Ldrb = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ imm12: u12,
+ opc: u2 = 0b01,
+ decoded24: u2 = 0b01,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .byte,
+ };
+
+ /// C6.2.174 LDRSB (immediate)
+ pub const Ldrsb = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ imm12: u12,
+ opc0: u1,
+ opc1: u1 = 0b1,
+ decoded24: u2 = 0b01,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .byte,
+ };
+
+ /// C6.2.326 STRH (immediate)
+ pub const Strh = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ imm12: u12,
+ opc: u2 = 0b00,
+ decoded24: u2 = 0b01,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .halfword,
+ };
+
+ /// C6.2.172 LDRH (immediate)
+ pub const Ldrh = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ imm12: u12,
+ opc: u2 = 0b01,
+ decoded24: u2 = 0b01,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .halfword,
+ };
+
+ /// C6.2.176 LDRSH (immediate)
+ pub const Ldrsh = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ imm12: u12,
+ opc0: u1,
+ opc1: u1 = 0b1,
+ decoded24: u2 = 0b01,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .halfword,
+ };
+
+ /// C6.2.322 STR (immediate)
+ pub const Str = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ imm12: u12,
+ opc: u2 = 0b00,
+ decoded24: u2 = 0b01,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ sf: Register.IntegerSize,
+ size1: u1 = 0b1,
+ };
+
+ /// C6.2.166 LDR (immediate)
+ pub const Ldr = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ imm12: u12,
+ opc: u2 = 0b01,
+ decoded24: u2 = 0b01,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ sf: Register.IntegerSize,
+ size1: u1 = 0b1,
+ };
+
+ /// C6.2.178 LDRSW (immediate)
+ pub const Ldrsw = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ imm12: u12,
+ opc: u2 = 0b10,
+ decoded24: u2 = 0b01,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .word,
+ };
+
+ /// C6.2.247 PRFM (immediate)
+ pub const Prfm = packed struct {
+ prfop: PrfOp,
+ Rn: Register.Encoded,
+ imm12: u12,
+ opc: u2 = 0b10,
+ decoded24: u2 = 0b01,
+ V: bool = false,
+ decoded27: u3 = 0b111,
+ size: IntegerSize = .doubleword,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ strb: Strb,
+ ldrb: Ldrb,
+ ldrsb: Ldrsb,
+ strh: Strh,
+ ldrh: Ldrh,
+ ldrsh: Ldrsh,
+ str: Str,
+ ldr: Ldr,
+ ldrsw: Ldrsw,
+ prfm: Prfm,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.size) {
+ .byte => switch (inst.group.V) {
+ false => switch (inst.group.opc) {
+ 0b00 => .{ .strb = inst.strb },
+ 0b01 => .{ .ldrb = inst.ldrb },
+ 0b10, 0b11 => .{ .ldrsb = inst.ldrsb },
+ },
+ true => .unallocated,
+ },
+ .halfword => switch (inst.group.V) {
+ false => switch (inst.group.opc) {
+ 0b00 => .{ .strh = inst.strh },
+ 0b01 => .{ .ldrh = inst.ldrh },
+ 0b10, 0b11 => .{ .ldrsh = inst.ldrsh },
+ },
+ true => .unallocated,
+ },
+ .word => switch (inst.group.V) {
+ false => switch (inst.group.opc) {
+ 0b00 => .{ .str = inst.str },
+ 0b01 => .{ .ldr = inst.ldr },
+ 0b10 => .{ .ldrsw = inst.ldrsw },
+ 0b11 => .unallocated,
+ },
+ true => .unallocated,
+ },
+ .doubleword => switch (inst.group.V) {
+ false => switch (inst.group.opc) {
+ 0b00 => .{ .str = inst.str },
+ 0b01 => .{ .ldr = inst.ldr },
+ 0b10 => .{ .prfm = inst.prfm },
+ 0b11 => .unallocated,
+ },
+ true => .unallocated,
+ },
+ };
+ }
+ };
+
+ pub const Vector = packed union {
+ group: @This().Group,
+ str: Str,
+ ldr: Ldr,
+
+ pub const Group = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ imm12: u12,
+ opc0: L,
+ opc1: Opc1,
+ decoded24: u2 = 0b01,
+ V: bool = true,
+ decoded27: u3 = 0b111,
+ size: Size,
+ };
+
+ /// C7.2.331 STR (immediate, SIMD&FP)
+ pub const Str = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ imm12: u12,
+ opc0: L = .store,
+ opc1: Opc1,
+ decoded24: u2 = 0b01,
+ V: bool = true,
+ decoded27: u3 = 0b111,
+ size: Size,
+ };
+
+ /// C7.2.191 LDR (immediate, SIMD&FP)
+ pub const Ldr = packed struct {
+ Rt: Register.Encoded,
+ Rn: Register.Encoded,
+ imm12: u12,
+ opc0: L = .load,
+ opc1: Opc1,
+ decoded24: u2 = 0b01,
+ V: bool = true,
+ decoded27: u3 = 0b111,
+ size: Size,
+ };
+
+ pub const Opc1 = packed struct {
+ encoded: u1,
+
+ pub fn encode(vs: Register.VectorSize) Opc1 {
+ return .{ .encoded = switch (vs) {
+ .byte, .half, .single, .double => 0b0,
+ .quad => 0b1,
+ else => unreachable,
+ } };
+ }
+
+ pub fn decode(enc_opc1: Opc1, enc_size: Size) Register.VectorSize {
+ return switch (enc_size.encoded) {
+ 0b00 => switch (enc_opc1.encoded) {
+ 0b0 => .byte,
+ 0b1 => .quad,
+ },
+ 0b01 => switch (enc_opc1.encoded) {
+ 0b0 => .half,
+ 0b1 => unreachable,
+ },
+ 0b10 => switch (enc_opc1.encoded) {
+ 0b0 => .single,
+ 0b1 => unreachable,
+ },
+ 0b11 => switch (enc_opc1.encoded) {
+ 0b0 => .double,
+ 0b1 => unreachable,
+ },
+ };
+ }
+ };
+
+ pub const Size = packed struct {
+ encoded: u2,
+
+ pub fn encode(vs: Register.VectorSize) Size {
+ return .{ .encoded = switch (vs) {
+ .byte, .quad => 0b00,
+ .half => 0b01,
+ .single => 0b10,
+ .double => 0b11,
+ else => unreachable,
+ } };
+ }
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ str: Str,
+ ldr: Ldr,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.size.encoded) {
+ 0b00 => switch (inst.group.opc0) {
+ .store => .{ .str = inst.str },
+ .load => .{ .ldr = inst.ldr },
+ },
+ 0b01, 0b10, 0b11 => switch (inst.group.opc1.encoded) {
+ 0b0 => switch (inst.group.opc0) {
+ .store => .{ .str = inst.str },
+ .load => .{ .ldr = inst.ldr },
+ },
+ 0b1 => .unallocated,
+ },
+ };
+ }
+ };
+
+ pub const Decoded = union(enum) {
+ integer: Integer,
+ vector: Vector,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.V) {
+ false => .{ .integer = inst.integer },
+ true => .{ .vector = inst.vector },
+ };
+ }
+ };
+
+ pub const L = enum(u1) {
+ store = 0b0,
+ load = 0b1,
+ };
+
+ pub const IntegerSize = enum(u2) {
+ byte = 0b00,
+ halfword = 0b01,
+ word = 0b10,
+ doubleword = 0b11,
+ };
+
+ pub const VectorSize = enum(u2) {
+ single = 0b00,
+ double = 0b01,
+ quad = 0b10,
+ _,
+
+ pub fn decode(vs: VectorSize) Register.VectorSize {
+ return switch (vs) {
+ .single => .single,
+ .double => .double,
+ .quad => .quad,
+ _ => unreachable,
+ };
+ }
+
+ pub fn encode(vs: Register.VectorSize) VectorSize {
+ return switch (vs) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .quad => .quad,
+ };
+ }
+ };
+
+ pub const PrfOp = packed struct {
+ policy: Policy,
+ target: Target,
+ type: Type,
+
+ pub const Policy = enum(u1) {
+ keep = 0b0,
+ strm = 0b1,
+ };
+
+ pub const Target = enum(u2) {
+ l1 = 0b00,
+ l2 = 0b01,
+ l3 = 0b10,
+ _,
+ };
+
+ pub const Type = enum(u2) {
+ pld = 0b00,
+ pli = 0b01,
+ pst = 0b10,
+ _,
+ };
+
+ pub const pldl1keep: PrfOp = .{ .type = .pld, .target = .l1, .policy = .keep };
+ pub const pldl1strm: PrfOp = .{ .type = .pld, .target = .l1, .policy = .strm };
+ pub const pldl2keep: PrfOp = .{ .type = .pld, .target = .l2, .policy = .keep };
+ pub const pldl2strm: PrfOp = .{ .type = .pld, .target = .l2, .policy = .strm };
+ pub const pldl3keep: PrfOp = .{ .type = .pld, .target = .l3, .policy = .keep };
+ pub const pldl3strm: PrfOp = .{ .type = .pld, .target = .l3, .policy = .strm };
+ pub const plil1keep: PrfOp = .{ .type = .pli, .target = .l1, .policy = .keep };
+ pub const plil1strm: PrfOp = .{ .type = .pli, .target = .l1, .policy = .strm };
+ pub const plil2keep: PrfOp = .{ .type = .pli, .target = .l2, .policy = .keep };
+ pub const plil2strm: PrfOp = .{ .type = .pli, .target = .l2, .policy = .strm };
+ pub const plil3keep: PrfOp = .{ .type = .pli, .target = .l3, .policy = .keep };
+ pub const plil3strm: PrfOp = .{ .type = .pli, .target = .l3, .policy = .strm };
+ pub const pstl1keep: PrfOp = .{ .type = .pst, .target = .l1, .policy = .keep };
+ pub const pstl1strm: PrfOp = .{ .type = .pst, .target = .l1, .policy = .strm };
+ pub const pstl2keep: PrfOp = .{ .type = .pst, .target = .l2, .policy = .keep };
+ pub const pstl2strm: PrfOp = .{ .type = .pst, .target = .l2, .policy = .strm };
+ pub const pstl3keep: PrfOp = .{ .type = .pst, .target = .l3, .policy = .keep };
+ pub const pstl3strm: PrfOp = .{ .type = .pst, .target = .l3, .policy = .strm_ };
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ register_literal: RegisterLiteral,
+ memory: Memory,
+ no_allocate_pair_offset: NoAllocatePairOffset,
+ register_pair_post_indexed: RegisterPairPostIndexed,
+ register_pair_offset: RegisterPairOffset,
+ register_pair_pre_indexed: RegisterPairPreIndexed,
+ register_unscaled_immediate: RegisterUnscaledImmediate,
+ register_immediate_post_indexed: RegisterImmediatePostIndexed,
+ register_unprivileged: RegisterUnprivileged,
+ register_immediate_pre_indexed: RegisterImmediatePreIndexed,
+ register_register_offset: RegisterRegisterOffset,
+ register_unsigned_immediate: RegisterUnsignedImmediate,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.op0) {
+ else => .unallocated,
+ 0b0010, 0b0110, 0b1010, 0b1110 => switch (inst.group.op2) {
+ 0b00 => .{ .no_allocate_pair_offset = inst.no_allocate_pair_offset },
+ 0b01 => .{ .register_pair_post_indexed = inst.register_pair_post_indexed },
+ 0b10 => .{ .register_pair_offset = inst.register_pair_offset },
+ 0b11 => .{ .register_pair_pre_indexed = inst.register_pair_pre_indexed },
+ },
+ 0b0011, 0b0111, 0b1011, 0b1111 => switch (inst.group.op2) {
+ 0b00...0b01 => switch (inst.group.op3) {
+ 0b000000...0b011111 => switch (inst.group.op4) {
+ 0b00 => .{ .register_unscaled_immediate = inst.register_unscaled_immediate },
+ 0b01 => .{ .register_immediate_post_indexed = inst.register_immediate_post_indexed },
+ 0b10 => .{ .register_unprivileged = inst.register_unprivileged },
+ 0b11 => .{ .register_immediate_pre_indexed = inst.register_immediate_pre_indexed },
+ },
+ 0b100000...0b111111 => switch (inst.group.op4) {
+ 0b00 => .unallocated,
+ 0b10 => .{ .register_register_offset = inst.register_register_offset },
+ 0b01, 0b11 => .unallocated,
+ },
+ },
+ 0b10...0b11 => .{ .register_unsigned_immediate = inst.register_unsigned_immediate },
+ },
+ };
+ }
+ };
+
+ /// C4.1.89 Data Processing -- Register
+ pub const DataProcessingRegister = packed union {
+ group: @This().Group,
+ data_processing_two_source: DataProcessingTwoSource,
+ data_processing_one_source: DataProcessingOneSource,
+ logical_shifted_register: LogicalShiftedRegister,
+ add_subtract_shifted_register: AddSubtractShiftedRegister,
+ add_subtract_extended_register: AddSubtractExtendedRegister,
+ add_subtract_with_carry: AddSubtractWithCarry,
+ rotate_right_into_flags: RotateRightIntoFlags,
+ evaluate_into_flags: EvaluateIntoFlags,
+ conditional_compare_register: ConditionalCompareRegister,
+ conditional_compare_immediate: ConditionalCompareImmediate,
+ conditional_select: ConditionalSelect,
+ data_processing_three_source: DataProcessingThreeSource,
+
+ /// Table C4-90 Encoding table for the Data Processing -- Register group
+ pub const Group = packed struct {
+ encoded0: u10,
+ op3: u6,
+ encoded16: u5,
+ op2: u4,
+ decoded25: u3 = 0b101,
+ op1: u1,
+ encoded29: u1,
+ op0: u1,
+ encoded31: u1,
+ };
+
+ /// Data-processing (2 source)
+ pub const DataProcessingTwoSource = packed union {
+ group: @This().Group,
+ udiv: Udiv,
+ sdiv: Sdiv,
+ lslv: Lslv,
+ lsrv: Lsrv,
+ asrv: Asrv,
+ rorv: Rorv,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ opcode: u6,
+ Rm: Register.Encoded,
+ decoded21: u8 = 0b11010110,
+ S: bool,
+ decoded30: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.388 UDIV
+ pub const Udiv = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ o1: DivOp = .udiv,
+ decoded11: u5 = 0b00001,
+ Rm: Register.Encoded,
+ decoded21: u8 = 0b11010110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.270 SDIV
+ pub const Sdiv = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ o1: DivOp = .sdiv,
+ decoded11: u5 = 0b00001,
+ Rm: Register.Encoded,
+ decoded21: u8 = 0b11010110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.214 LSLV
+ pub const Lslv = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ op2: ShiftOp = .lslv,
+ decoded12: u4 = 0b0010,
+ Rm: Register.Encoded,
+ decoded21: u8 = 0b11010110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.217 LSRV
+ pub const Lsrv = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ op2: ShiftOp = .lsrv,
+ decoded12: u4 = 0b0010,
+ Rm: Register.Encoded,
+ decoded21: u8 = 0b11010110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.18 ASRV
+ pub const Asrv = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ op2: ShiftOp = .asrv,
+ decoded12: u4 = 0b0010,
+ Rm: Register.Encoded,
+ decoded21: u8 = 0b11010110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.263 RORV
+ pub const Rorv = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ op2: ShiftOp = .rorv,
+ decoded12: u4 = 0b0010,
+ Rm: Register.Encoded,
+ decoded21: u8 = 0b11010110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ pub const DivOp = enum(u1) {
+ udiv = 0b0,
+ sdiv = 0b1,
+ };
+
+ pub const ShiftOp = enum(u2) {
+ lslv = 0b00,
+ lsrv = 0b01,
+ asrv = 0b10,
+ rorv = 0b11,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ udiv: Udiv,
+ sdiv: Sdiv,
+ lslv: Lslv,
+ lsrv: Lsrv,
+ asrv: Asrv,
+ rorv: Rorv,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.S) {
+ false => switch (inst.group.opcode) {
+ else => .unallocated,
+ 0b000010 => .{ .udiv = inst.udiv },
+ 0b000011 => .{ .sdiv = inst.sdiv },
+ 0b001000 => .{ .lslv = inst.lslv },
+ 0b001001 => .{ .lsrv = inst.lsrv },
+ 0b001010 => .{ .asrv = inst.asrv },
+ 0b001011 => .{ .rorv = inst.rorv },
+ },
+ true => .unallocated,
+ };
+ }
+ };
+
+ /// Data-processing (1 source)
+ pub const DataProcessingOneSource = packed union {
+ group: @This().Group,
+ rbit: Rbit,
+ rev16: Rev16,
+ rev32: Rev32,
+ rev: Rev,
+ clz: Clz,
+ cls: Cls,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ opcode: u6,
+ opcode2: u5,
+ decoded21: u8 = 0b11010110,
+ S: bool,
+ decoded30: u1 = 0b1,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.253 RBIT
+ pub const Rbit = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b00,
+ decoded12: u4 = 0b0000,
+ decoded16: u5 = 0b00000,
+ decoded21: u8 = 0b11010110,
+ S: bool = false,
+ decoded30: u1 = 0b1,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.257 REV16
+ pub const Rev16 = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ opc: u2 = 0b01,
+ decoded12: u4 = 0b0000,
+ decoded16: u5 = 0b00000,
+ decoded21: u8 = 0b11010110,
+ S: bool = false,
+ decoded30: u1 = 0b1,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.258 REV32
+ pub const Rev32 = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ opc: u2 = 0b10,
+ decoded12: u4 = 0b0000,
+ decoded16: u5 = 0b00000,
+ decoded21: u8 = 0b11010110,
+ S: bool = false,
+ decoded30: u1 = 0b1,
+ sf: Register.IntegerSize = .doubleword,
+ };
+
+ /// C6.2.256 REV
+ pub const Rev = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ opc0: Register.IntegerSize,
+ opc1: u1 = 0b1,
+ decoded12: u4 = 0b0000,
+ decoded16: u5 = 0b00000,
+ decoded21: u8 = 0b11010110,
+ S: bool = false,
+ decoded30: u1 = 0b1,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.58 CLZ
+ pub const Clz = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ op: u1 = 0b0,
+ decoded11: u5 = 0b00010,
+ decoded16: u5 = 0b00000,
+ decoded21: u8 = 0b11010110,
+ S: bool = false,
+ decoded30: u1 = 0b1,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.57 CLS
+ pub const Cls = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ op: u1 = 0b1,
+ decoded11: u5 = 0b00010,
+ decoded16: u5 = 0b00000,
+ decoded21: u8 = 0b11010110,
+ S: bool = false,
+ decoded30: u1 = 0b1,
+ sf: Register.IntegerSize,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ rbit: Rbit,
+ rev16: Rev16,
+ rev32: Rev32,
+ rev: Rev,
+ clz: Clz,
+ cls: Cls,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.S) {
+ true => .unallocated,
+ false => switch (inst.group.opcode2) {
+ else => .unallocated,
+ 0b00000 => switch (inst.group.opcode) {
+ else => .unallocated,
+ 0b000000 => .{ .rbit = inst.rbit },
+ 0b000001 => .{ .rev16 = inst.rev16 },
+ 0b000010 => switch (inst.group.sf) {
+ .word => .{ .rev = inst.rev },
+ .doubleword => .{ .rev32 = inst.rev32 },
+ },
+ 0b000011 => switch (inst.group.sf) {
+ .word => .unallocated,
+ .doubleword => .{ .rev = inst.rev },
+ },
+ 0b000100 => .{ .clz = inst.clz },
+ 0b000101 => .{ .cls = inst.cls },
+ },
+ },
+ };
+ }
+ };
+
+ /// Logical (shifted register)
+ pub const LogicalShiftedRegister = packed union {
+ group: @This().Group,
+ @"and": And,
+ bic: Bic,
+ orr: Orr,
+ orn: Orn,
+ eor: Eor,
+ eon: Eon,
+ ands: Ands,
+ bics: Bics,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm6: Shift.Amount,
+ Rm: Register.Encoded,
+ N: bool,
+ shift: Shift.Op,
+ decoded24: u5 = 0b01010,
+ opc: LogicalOpc,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.13 AND (shifted register)
+ pub const And = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm6: Shift.Amount,
+ Rm: Register.Encoded,
+ N: bool = false,
+ shift: Shift.Op,
+ decoded24: u5 = 0b01010,
+ opc: LogicalOpc = .@"and",
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.32 BIC (shifted register)
+ pub const Bic = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm6: Shift.Amount,
+ Rm: Register.Encoded,
+ N: bool = true,
+ shift: Shift.Op,
+ decoded24: u5 = 0b01010,
+ opc: LogicalOpc = .@"and",
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.241 ORR (shifted register)
+ pub const Orr = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm6: Shift.Amount,
+ Rm: Register.Encoded,
+ N: bool = false,
+ shift: Shift.Op,
+ decoded24: u5 = 0b01010,
+ opc: LogicalOpc = .orr,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.239 ORN (shifted register)
+ pub const Orn = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm6: Shift.Amount,
+ Rm: Register.Encoded,
+ N: bool = true,
+ shift: Shift.Op,
+ decoded24: u5 = 0b01010,
+ opc: LogicalOpc = .orr,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.120 EOR (shifted register)
+ pub const Eor = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm6: Shift.Amount,
+ Rm: Register.Encoded,
+ N: bool = false,
+ shift: Shift.Op,
+ decoded24: u5 = 0b01010,
+ opc: LogicalOpc = .eor,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.118 EON (shifted register)
+ pub const Eon = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm6: Shift.Amount,
+ Rm: Register.Encoded,
+ N: bool = true,
+ shift: Shift.Op,
+ decoded24: u5 = 0b01010,
+ opc: LogicalOpc = .eor,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.15 ANDS (shifted register)
+ pub const Ands = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm6: Shift.Amount,
+ Rm: Register.Encoded,
+ N: bool = false,
+ shift: Shift.Op,
+ decoded24: u5 = 0b01010,
+ opc: LogicalOpc = .ands,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.33 BICS (shifted register)
+ pub const Bics = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm6: Shift.Amount,
+ Rm: Register.Encoded,
+ N: bool = true,
+ shift: Shift.Op,
+ decoded24: u5 = 0b01010,
+ opc: LogicalOpc = .ands,
+ sf: Register.IntegerSize,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ @"and": And,
+ bic: Bic,
+ orr: Orr,
+ orn: Orn,
+ eor: Eor,
+ eon: Eon,
+ ands: Ands,
+ bics: Bics,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return if (inst.group.sf == .word and @as(u1, @truncate(inst.group.imm6 >> 5)) == 0b1)
+ .unallocated
+ else switch (inst.group.opc) {
+ .@"and" => switch (inst.group.N) {
+ false => .{ .@"and" = inst.@"and" },
+ true => .{ .bic = inst.bic },
+ },
+ .orr => switch (inst.group.N) {
+ false => .{ .orr = inst.orr },
+ true => .{ .orn = inst.orn },
+ },
+ .eor => switch (inst.group.N) {
+ false => .{ .eor = inst.eor },
+ true => .{ .eon = inst.eon },
+ },
+ .ands => switch (inst.group.N) {
+ false => .{ .ands = inst.ands },
+ true => .{ .bics = inst.bics },
+ },
+ };
+ }
+ };
+
+ /// Add/subtract (shifted register)
+ pub const AddSubtractShiftedRegister = packed union {
+ group: @This().Group,
+ add: Add,
+ adds: Adds,
+ sub: Sub,
+ subs: Subs,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm6: Shift.Amount,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b0,
+ shift: Shift.Op,
+ decoded24: u5 = 0b01011,
+ S: bool,
+ op: AddSubtractOp,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.5 ADD (shifted register)
+ pub const Add = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm6: Shift.Amount,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b0,
+ shift: Shift.Op,
+ decoded24: u5 = 0b01011,
+ S: bool = false,
+ op: AddSubtractOp = .add,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.9 ADDS (shifted register)
+ pub const Adds = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm6: Shift.Amount,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b0,
+ shift: Shift.Op,
+ decoded24: u5 = 0b01011,
+ S: bool = true,
+ op: AddSubtractOp = .add,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.5 SUB (shifted register)
+ pub const Sub = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm6: Shift.Amount,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b0,
+ shift: Shift.Op,
+ decoded24: u5 = 0b01011,
+ S: bool = false,
+ op: AddSubtractOp = .sub,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.9 SUBS (shifted register)
+ pub const Subs = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm6: Shift.Amount,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b0,
+ shift: Shift.Op,
+ decoded24: u5 = 0b01011,
+ S: bool = true,
+ op: AddSubtractOp = .sub,
+ sf: Register.IntegerSize,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ add: Add,
+ adds: Adds,
+ sub: Sub,
+ subs: Subs,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.shift) {
+ .ror => .unallocated,
+ .lsl, .lsr, .asr => if (inst.group.sf == .word and @as(u1, @truncate(inst.group.imm6 >> 5)) == 0b1)
+ .unallocated
+ else switch (inst.group.op) {
+ .add => switch (inst.group.S) {
+ false => .{ .add = inst.add },
+ true => .{ .adds = inst.adds },
+ },
+ .sub => switch (inst.group.S) {
+ false => .{ .sub = inst.sub },
+ true => .{ .subs = inst.subs },
+ },
+ },
+ };
+ }
+ };
+
+ /// Add/subtract (extended register)
+ pub const AddSubtractExtendedRegister = packed union {
+ group: @This().Group,
+ add: Add,
+ adds: Adds,
+ sub: Sub,
+ subs: Subs,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm3: Extend.Amount,
+ option: Option,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ opt: u2,
+ decoded24: u5 = 0b01011,
+ S: bool,
+ op: AddSubtractOp,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.3 ADD (extended register)
+ pub const Add = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm3: Extend.Amount,
+ option: Option,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ opt: u2 = 0b00,
+ decoded24: u5 = 0b01011,
+ S: bool = false,
+ op: AddSubtractOp = .add,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.7 ADDS (extended register)
+ pub const Adds = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm3: Extend.Amount,
+ option: Option,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ opt: u2 = 0b00,
+ decoded24: u5 = 0b01011,
+ S: bool = true,
+ op: AddSubtractOp = .add,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.356 SUB (extended register)
+ pub const Sub = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm3: Extend.Amount,
+ option: Option,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ opt: u2 = 0b00,
+ decoded24: u5 = 0b01011,
+ S: bool = false,
+ op: AddSubtractOp = .sub,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.362 SUBS (extended register)
+ pub const Subs = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ imm3: Extend.Amount,
+ option: Option,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ opt: u2 = 0b00,
+ decoded24: u5 = 0b01011,
+ S: bool = true,
+ op: AddSubtractOp = .sub,
+ sf: Register.IntegerSize,
+ };
+
+ pub const Option = enum(u3) {
+ uxtb = 0b000,
+ uxth = 0b001,
+ uxtw = 0b010,
+ uxtx = 0b011,
+ sxtb = 0b100,
+ sxth = 0b101,
+ sxtw = 0b110,
+ sxtx = 0b111,
+
+ pub fn sf(option: Option) Register.IntegerSize {
+ return switch (option) {
+ .uxtb, .uxth, .uxtw, .sxtb, .sxth, .sxtw => .word,
+ .uxtx, .sxtx => .doubleword,
+ };
+ }
+ };
+
+ pub const Extend = union(Option) {
+ uxtb: Amount,
+ uxth: Amount,
+ uxtw: Amount,
+ uxtx: Amount,
+ sxtb: Amount,
+ sxth: Amount,
+ sxtw: Amount,
+ sxtx: Amount,
+
+ pub const Amount = u3;
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ add: Add,
+ adds: Adds,
+ sub: Sub,
+ subs: Subs,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.imm3) {
+ 0b101 => .unallocated,
+ 0b110...0b111 => .unallocated,
+ 0b000...0b100 => switch (inst.group.opt) {
+ 0b01 => .unallocated,
+ 0b10...0b11 => .unallocated,
+ 0b00 => switch (inst.group.op) {
+ .add => switch (inst.group.S) {
+ false => .{ .add = inst.add },
+ true => .{ .adds = inst.adds },
+ },
+ .sub => switch (inst.group.S) {
+ false => .{ .sub = inst.sub },
+ true => .{ .subs = inst.subs },
+ },
+ },
+ },
+ };
+ }
+ };
+
+ /// Add/subtract (with carry)
+ pub const AddSubtractWithCarry = packed union {
+ group: @This().Group,
+ adc: Adc,
+ adcs: Adcs,
+ sbc: Sbc,
+ sbcs: Sbcs,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u6 = 0b000000,
+ Rm: Register.Encoded,
+ decoded21: u8 = 0b11010000,
+ S: bool,
+ op: Op,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.1 ADC
+ pub const Adc = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u6 = 0b000000,
+ Rm: Register.Encoded,
+ decoded21: u8 = 0b11010000,
+ S: bool = false,
+ op: Op = .adc,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.2 ADCS
+ pub const Adcs = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u6 = 0b000000,
+ Rm: Register.Encoded,
+ decoded21: u8 = 0b11010000,
+ S: bool = true,
+ op: Op = .adc,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.265 SBC
+ pub const Sbc = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u6 = 0b000000,
+ Rm: Register.Encoded,
+ decoded21: u8 = 0b11010000,
+ S: bool = false,
+ op: Op = .sbc,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.266 SBCS
+ pub const Sbcs = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u6 = 0b000000,
+ Rm: Register.Encoded,
+ decoded21: u8 = 0b11010000,
+ S: bool = true,
+ op: Op = .sbc,
+ sf: Register.IntegerSize,
+ };
+
+ pub const Op = enum(u1) {
+ adc = 0b0,
+ sbc = 0b1,
+ };
+
+ pub const Decoded = union(enum) {
+ adc: Adc,
+ adcs: Adcs,
+ sbc: Sbc,
+ sbcs: Sbcs,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.op) {
+ .adc => switch (inst.group.S) {
+ false => .{ .adc = inst.adc },
+ true => .{ .adcs = inst.adcs },
+ },
+ .sbc => switch (inst.group.S) {
+ false => .{ .sbc = inst.sbc },
+ true => .{ .sbcs = inst.sbcs },
+ },
+ };
+ }
+ };
+
+ /// Rotate right into flags
+ pub const RotateRightIntoFlags = packed union {
+ group: @This().Group,
+
+ pub const Group = packed struct {
+ mask: Nzcv,
+ o2: u1,
+ Rn: Register.Encoded,
+ decoded10: u5 = 0b0001,
+ imm6: u6,
+ decoded21: u8 = 0b11010000,
+ S: bool,
+ op: u1,
+ sf: Register.IntegerSize,
+ };
+ };
+
+ /// Evaluate into flags
+ pub const EvaluateIntoFlags = packed union {
+ group: @This().Group,
+
+ pub const Group = packed struct {
+ mask: Nzcv,
+ o3: u1,
+ Rn: Register.Encoded,
+ decoded10: u4 = 0b0010,
+ sz: enum(u1) {
+ byte = 0b0,
+ word = 0b1,
+ },
+ opcode2: u6,
+ decoded21: u8 = 0b11010000,
+ S: bool,
+ op: u1,
+ sf: Register.IntegerSize,
+ };
+ };
+
+ /// Conditional compare (register)
+ pub const ConditionalCompareRegister = packed union {
+ group: @This().Group,
+ ccmn: Ccmn,
+ ccmp: Ccmp,
+
+ pub const Group = packed struct {
+ nzcv: Nzcv,
+ o3: u1,
+ Rn: Register.Encoded,
+ o2: u1,
+ decoded11: u1 = 0b0,
+ cond: ConditionCode,
+ Rm: Register.Encoded,
+ decoded21: u8 = 0b11010010,
+ S: bool,
+ op: Op,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.49 CCMN (register)
+ pub const Ccmn = packed struct {
+ nzcv: Nzcv,
+ o3: u1 = 0b0,
+ Rn: Register.Encoded,
+ o2: u1 = 0b0,
+ decoded11: u1 = 0b0,
+ cond: ConditionCode,
+ Rm: Register.Encoded,
+ decoded21: u8 = 0b11010010,
+ S: bool = true,
+ op: Op = .ccmn,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.51 CCMP (register)
+ pub const Ccmp = packed struct {
+ nzcv: Nzcv,
+ o3: u1 = 0b0,
+ Rn: Register.Encoded,
+ o2: u1 = 0b0,
+ decoded11: u1 = 0b0,
+ cond: ConditionCode,
+ Rm: Register.Encoded,
+ decoded21: u8 = 0b11010010,
+ S: bool = true,
+ op: Op = .ccmp,
+ sf: Register.IntegerSize,
+ };
+
+ pub const Op = enum(u1) {
+ ccmn = 0b0,
+ ccmp = 0b1,
+ };
+ };
+
+ /// Conditional compare (immediate)
+ pub const ConditionalCompareImmediate = packed union {
+ group: @This().Group,
+ ccmn: Ccmn,
+ ccmp: Ccmp,
+
+ pub const Group = packed struct {
+ nzcv: Nzcv,
+ o3: u1,
+ Rn: Register.Encoded,
+ o2: u1,
+ decoded11: u1 = 0b1,
+ cond: ConditionCode,
+ imm5: u5,
+ decoded21: u8 = 0b11010010,
+ S: bool,
+ op: Op,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.48 CCMN (immediate)
+ pub const Ccmn = packed struct {
+ nzcv: Nzcv,
+ o3: u1 = 0b0,
+ Rn: Register.Encoded,
+ o2: u1 = 0b0,
+ decoded11: u1 = 0b1,
+ cond: ConditionCode,
+ imm5: u5,
+ decoded21: u8 = 0b11010010,
+ S: bool = true,
+ op: Op = .ccmn,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.50 CCMP (immediate)
+ pub const Ccmp = packed struct {
+ nzcv: Nzcv,
+ o3: u1 = 0b0,
+ Rn: Register.Encoded,
+ o2: u1 = 0b0,
+ decoded11: u1 = 0b1,
+ cond: ConditionCode,
+ imm5: u5,
+ decoded21: u8 = 0b11010010,
+ S: bool = true,
+ op: Op = .ccmp,
+ sf: Register.IntegerSize,
+ };
+
+ pub const Op = enum(u1) {
+ ccmn = 0b0,
+ ccmp = 0b1,
+ };
+ };
+
+ /// Conditional select
+ pub const ConditionalSelect = packed union {
+ group: @This().Group,
+ csel: Csel,
+ csinc: Csinc,
+ csinv: Csinv,
+ csneg: Csneg,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ op2: u2,
+ cond: ConditionCode,
+ Rm: Register.Encoded,
+ decoded21: u8 = 0b11010100,
+ S: bool,
+ op: u1,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.103 CSEL
+ pub const Csel = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ op2: u2 = 0b00,
+ cond: ConditionCode,
+ Rm: Register.Encoded,
+ decoded21: u8 = 0b11010100,
+ S: bool = false,
+ op: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.106 CSINC
+ pub const Csinc = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ op2: u2 = 0b01,
+ cond: ConditionCode,
+ Rm: Register.Encoded,
+ decoded21: u8 = 0b11010100,
+ S: bool = false,
+ op: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.107 CSINV
+ pub const Csinv = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ op2: u2 = 0b00,
+ cond: ConditionCode,
+ Rm: Register.Encoded,
+ decoded21: u8 = 0b11010100,
+ S: bool = false,
+ op: u1 = 0b1,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.108 CSNEG
+ pub const Csneg = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ op2: u2 = 0b01,
+ cond: ConditionCode,
+ Rm: Register.Encoded,
+ decoded21: u8 = 0b11010100,
+ S: bool = false,
+ op: u1 = 0b1,
+ sf: Register.IntegerSize,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ csel: Csel,
+ csinc: Csinc,
+ csinv: Csinv,
+ csneg: Csneg,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.S) {
+ true => .unallocated,
+ false => switch (inst.group.op) {
+ 0b0 => switch (inst.group.op2) {
+ 0b10...0b11 => .unallocated,
+ 0b00 => .{ .csel = inst.csel },
+ 0b01 => .{ .csinc = inst.csinc },
+ },
+ 0b1 => switch (inst.group.op2) {
+ 0b10...0b11 => .unallocated,
+ 0b00 => .{ .csinv = inst.csinv },
+ 0b01 => .{ .csneg = inst.csneg },
+ },
+ },
+ };
+ }
+ };
+
+ /// Data-processing (3 source)
+ pub const DataProcessingThreeSource = packed union {
+ group: @This().Group,
+ madd: Madd,
+ msub: Msub,
+ smaddl: Smaddl,
+ smsubl: Smsubl,
+ smulh: Smulh,
+ umaddl: Umaddl,
+ umsubl: Umsubl,
+ umulh: Umulh,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ Ra: Register.Encoded,
+ o0: AddSubtractOp,
+ Rm: Register.Encoded,
+ op31: u3,
+ decoded24: u5 = 0b11011,
+ op54: u2,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.218 MADD
+ pub const Madd = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ Ra: Register.Encoded,
+ o0: AddSubtractOp = .add,
+ Rm: Register.Encoded,
+ op31: u3 = 0b000,
+ decoded24: u5 = 0b11011,
+ op54: u2 = 0b00,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.231 MSUB
+ pub const Msub = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ Ra: Register.Encoded,
+ o0: AddSubtractOp = .sub,
+ Rm: Register.Encoded,
+ op31: u3 = 0b000,
+ decoded24: u5 = 0b11011,
+ op54: u2 = 0b00,
+ sf: Register.IntegerSize,
+ };
+
+ /// C6.2.282 SMADDL
+ pub const Smaddl = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ Ra: Register.Encoded,
+ o0: AddSubtractOp = .add,
+ Rm: Register.Encoded,
+ op21: u2 = 0b01,
+ U: bool = false,
+ decoded24: u5 = 0b11011,
+ op54: u2 = 0b00,
+ sf: Register.IntegerSize = .doubleword,
+ };
+
+ /// C6.2.287 SMSUBL
+ pub const Smsubl = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ Ra: Register.Encoded,
+ o0: AddSubtractOp = .sub,
+ Rm: Register.Encoded,
+ op21: u2 = 0b01,
+ U: bool = false,
+ decoded24: u5 = 0b11011,
+ op54: u2 = 0b00,
+ sf: Register.IntegerSize = .doubleword,
+ };
+
+ /// C6.2.288 SMULH
+ pub const Smulh = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ Ra: Register.Encoded = @enumFromInt(0b11111),
+ o0: AddSubtractOp = .add,
+ Rm: Register.Encoded,
+ op21: u2 = 0b10,
+ U: bool = false,
+ decoded24: u5 = 0b11011,
+ op54: u2 = 0b00,
+ sf: Register.IntegerSize = .doubleword,
+ };
+
+ /// C6.2.389 UMADDL
+ pub const Umaddl = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ Ra: Register.Encoded,
+ o0: AddSubtractOp = .add,
+ Rm: Register.Encoded,
+ op21: u2 = 0b01,
+ U: bool = true,
+ decoded24: u5 = 0b11011,
+ op54: u2 = 0b00,
+ sf: Register.IntegerSize = .doubleword,
+ };
+
+ /// C6.2.391 UMSUBL
+ pub const Umsubl = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ Ra: Register.Encoded,
+ o0: AddSubtractOp = .sub,
+ Rm: Register.Encoded,
+ op21: u2 = 0b01,
+ U: bool = true,
+ decoded24: u5 = 0b11011,
+ op54: u2 = 0b00,
+ sf: Register.IntegerSize = .doubleword,
+ };
+
+ /// C6.2.392 UMULH
+ pub const Umulh = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ Ra: Register.Encoded = @enumFromInt(0b11111),
+ o0: AddSubtractOp = .add,
+ Rm: Register.Encoded,
+ op21: u2 = 0b10,
+ U: bool = true,
+ decoded24: u5 = 0b11011,
+ op54: u2 = 0b00,
+ sf: Register.IntegerSize = .doubleword,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ madd: Madd,
+ msub: Msub,
+ smaddl: Smaddl,
+ smsubl: Smsubl,
+ smulh: Smulh,
+ umaddl: Umaddl,
+ umsubl: Umsubl,
+ umulh: Umulh,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.op54) {
+ 0b01, 0b10...0b11 => .unallocated,
+ 0b00 => switch (inst.group.op31) {
+ 0b011, 0b100, 0b111 => .unallocated,
+ 0b000 => switch (inst.group.o0) {
+ .add => .{ .madd = inst.madd },
+ .sub => .{ .msub = inst.msub },
+ },
+ 0b001 => switch (inst.group.sf) {
+ .word => .unallocated,
+ .doubleword => switch (inst.group.o0) {
+ .add => .{ .smaddl = inst.smaddl },
+ .sub => .{ .smsubl = inst.smsubl },
+ },
+ },
+ 0b010 => switch (inst.group.sf) {
+ .word => .unallocated,
+ .doubleword => switch (inst.group.o0) {
+ .add => .{ .smulh = inst.smulh },
+ .sub => .unallocated,
+ },
+ },
+ 0b101 => switch (inst.group.sf) {
+ .word => .unallocated,
+ .doubleword => switch (inst.group.o0) {
+ .add => .{ .umaddl = inst.umaddl },
+ .sub => .{ .umsubl = inst.umsubl },
+ },
+ },
+ 0b110 => switch (inst.group.sf) {
+ .word => .unallocated,
+ .doubleword => switch (inst.group.o0) {
+ .add => .{ .umulh = inst.umulh },
+ .sub => .unallocated,
+ },
+ },
+ },
+ };
+ }
+ };
+
+ pub const Shift = union(enum(u2)) {
+ lsl: Amount = 0b00,
+ lsr: Amount = 0b01,
+ asr: Amount = 0b10,
+ ror: Amount = 0b11,
+
+ pub const Op = @typeInfo(Shift).@"union".tag_type.?;
+ pub const Amount = u6;
+ pub const none: Shift = .{ .lsl = 0 };
+ };
+
+ pub const Nzcv = packed struct { v: bool, c: bool, z: bool, n: bool };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ data_processing_two_source: DataProcessingTwoSource,
+ data_processing_one_source: DataProcessingOneSource,
+ logical_shifted_register: LogicalShiftedRegister,
+ add_subtract_shifted_register: AddSubtractShiftedRegister,
+ add_subtract_extended_register: AddSubtractExtendedRegister,
+ add_subtract_with_carry: AddSubtractWithCarry,
+ rotate_right_into_flags: RotateRightIntoFlags,
+ evaluate_into_flags: EvaluateIntoFlags,
+ conditional_compare_register: ConditionalCompareRegister,
+ conditional_compare_immediate: ConditionalCompareImmediate,
+ conditional_select: ConditionalSelect,
+ data_processing_three_source: DataProcessingThreeSource,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.op1) {
+ 0b0 => switch (@as(u1, @truncate(inst.group.op2 >> 3))) {
+ 0b0 => .{ .logical_shifted_register = inst.logical_shifted_register },
+ 0b1 => switch (@as(u1, @truncate(inst.group.op2 >> 0))) {
+ 0b0 => .{ .add_subtract_shifted_register = inst.add_subtract_shifted_register },
+ 0b1 => .{ .add_subtract_extended_register = inst.add_subtract_extended_register },
+ },
+ },
+ 0b1 => switch (inst.group.op2) {
+ 0b0000 => switch (inst.group.op3) {
+ 0b000000 => .{ .add_subtract_with_carry = inst.add_subtract_with_carry },
+ 0b000001, 0b100001 => .{ .rotate_right_into_flags = inst.rotate_right_into_flags },
+ 0b000010, 0b010010, 0b100010, 0b110010 => .{ .evaluate_into_flags = inst.evaluate_into_flags },
+ else => .unallocated,
+ },
+ 0b0010 => switch (@as(u1, @truncate(inst.group.op3 >> 1))) {
+ 0b0 => .{ .conditional_compare_register = inst.conditional_compare_register },
+ 0b1 => .{ .conditional_compare_immediate = inst.conditional_compare_immediate },
+ },
+ 0b0100 => .{ .conditional_select = inst.conditional_select },
+ 0b0110 => switch (inst.group.op0) {
+ 0b0 => .{ .data_processing_two_source = inst.data_processing_two_source },
+ 0b1 => .{ .data_processing_one_source = inst.data_processing_one_source },
+ },
+ 0b1000...0b1111 => .{ .data_processing_three_source = inst.data_processing_three_source },
+ else => .unallocated,
+ },
+ };
+ }
+ };
+
+ /// C4.1.90 Data Processing -- Scalar Floating-Point and Advanced SIMD
+ pub const DataProcessingVector = packed union {
+ group: @This().Group,
+ simd_scalar_pairwise: SimdScalarPairwise,
+ simd_copy: SimdCopy,
+ simd_two_register_miscellaneous: SimdTwoRegisterMiscellaneous,
+ simd_across_lanes: SimdAcrossLanes,
+ simd_three_same: SimdThreeSame,
+ simd_modified_immediate: SimdModifiedImmediate,
+ convert_float_integer: ConvertFloatInteger,
+ float_data_processing_one_source: FloatDataProcessingOneSource,
+ float_compare: FloatCompare,
+ float_immediate: FloatImmediate,
+ float_data_processing_two_source: FloatDataProcessingTwoSource,
+ float_data_processing_three_source: FloatDataProcessingThreeSource,
+
+ /// Table C4-91 Encoding table for the Data Processing -- Scalar Floating-Point and Advanced SIMD group
+ pub const Group = packed struct {
+ encoded0: u10,
+ op3: u9,
+ op2: u4,
+ op1: u2,
+ decoded25: u3 = 0b111,
+ op0: u4,
+ };
+
+ /// Advanced SIMD scalar pairwise
+ pub const SimdScalarPairwise = packed union {
+ group: @This().Group,
+ addp: Addp,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ opcode: u5,
+ decoded17: u5 = 0b11000,
+ size: Size,
+ decoded24: u5 = 0b11110,
+ U: u1,
+ decoded30: u2 = 0b01,
+ };
+
+ /// C7.2.4 ADDP (scalar)
+ pub const Addp = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ opcode: u5 = 0b11011,
+ decoded17: u5 = 0b11000,
+ size: Size,
+ decoded24: u5 = 0b11110,
+ U: u1 = 0b0,
+ decoded30: u2 = 0b01,
+ };
+ };
+
+ /// Advanced SIMD copy
+ pub const SimdCopy = packed union {
+ group: @This().Group,
+ smov: Smov,
+ umov: Umov,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u1 = 0b1,
+ imm4: u4,
+ decoded15: u1 = 0b0,
+ imm5: u5,
+ decoded21: u8 = 0b01110000,
+ op: u1,
+ Q: Register.IntegerSize,
+ decoded31: u1 = 0b0,
+ };
+
+ /// C7.2.279 SMOV
+ pub const Smov = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u1 = 0b1,
+ decoded11: u1 = 0b1,
+ decoded12: u1 = 0b0,
+ decoded13: u2 = 0b01,
+ decoded15: u1 = 0b0,
+ imm5: u5,
+ decoded21: u8 = 0b01110000,
+ decoded29: u1 = 0b0,
+ Q: Register.IntegerSize,
+ decoded31: u1 = 0b0,
+ };
+
+ /// C7.2.371 UMOV
+ pub const Umov = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u1 = 0b1,
+ decoded11: u1 = 0b1,
+ decoded12: u1 = 0b1,
+ decoded13: u2 = 0b01,
+ decoded15: u1 = 0b0,
+ imm5: u5,
+ decoded21: u8 = 0b01110000,
+ decoded29: u1 = 0b0,
+ Q: Register.IntegerSize,
+ decoded31: u1 = 0b0,
+ };
+ };
+
+ /// Advanced SIMD two-register miscellaneous
+ pub const SimdTwoRegisterMiscellaneous = packed union {
+ group: @This().Group,
+ cnt: Cnt,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ opcode: u5,
+ decoded17: u5 = 0b10000,
+ size: Size,
+ decoded24: u5 = 0b01110,
+ U: u1,
+ Q: Q,
+ decoded31: u1 = 0b0,
+ };
+
+ /// C7.2.38 CNT
+ pub const Cnt = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ opcode: u5 = 0b00101,
+ decoded17: u5 = 0b10000,
+ size: Size,
+ decoded24: u5 = 0b01110,
+ U: u1 = 0b0,
+ Q: Q,
+ decoded31: u1 = 0b0,
+ };
+ };
+
+ /// Advanced SIMD across lanes
+ pub const SimdAcrossLanes = packed union {
+ group: @This().Group,
+ addv: Addv,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ opcode: u5,
+ decoded17: u5 = 0b11000,
+ size: Size,
+ decoded24: u5 = 0b01110,
+ U: u1,
+ Q: Q,
+ decoded31: u1 = 0b0,
+ };
+
+ /// C7.2.6 ADDV
+ pub const Addv = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ opcode: u5 = 0b11011,
+ decoded17: u5 = 0b11000,
+ size: Size,
+ decoded24: u5 = 0b01110,
+ U: u1 = 0b0,
+ Q: Q,
+ decoded31: u1 = 0b0,
+ };
+ };
+
+ /// Advanced SIMD three same
+ pub const SimdThreeSame = packed union {
+ group: @This().Group,
+ addp: Addp,
+ @"and": And,
+ bic: Bic,
+ orr: Orr,
+ orn: Orn,
+ eor: Eor,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u1 = 0b1,
+ opcode: u5,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ size: Size,
+ decoded24: u5 = 0b01110,
+ U: u1,
+ Q: Q,
+ decoded31: u1 = 0b0,
+ };
+
+ /// C7.2.5 ADDP (vector)
+ pub const Addp = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u1 = 0b1,
+ opcode: u5 = 0b10111,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ size: Size,
+ decoded24: u5 = 0b01110,
+ U: u1 = 0b0,
+ Q: Q,
+ decoded31: u1 = 0b0,
+ };
+
+ /// C7.2.11 AND (vector)
+ pub const And = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u1 = 0b1,
+ opcode: u5 = 0b00011,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ size: Size = .byte,
+ decoded24: u5 = 0b01110,
+ U: u1 = 0b0,
+ Q: Q,
+ decoded31: u1 = 0b0,
+ };
+
+ /// C7.2.21 BIC (vector, register)
+ pub const Bic = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u1 = 0b1,
+ opcode: u5 = 0b00011,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ size: Size = .half,
+ decoded24: u5 = 0b01110,
+ U: u1 = 0b0,
+ Q: Q,
+ decoded31: u1 = 0b0,
+ };
+
+ /// C7.2.213 ORR (vector, register)
+ pub const Orr = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u1 = 0b1,
+ opcode: u5 = 0b00011,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ size: Size = .single,
+ decoded24: u5 = 0b01110,
+ U: u1 = 0b0,
+ Q: Q,
+ decoded31: u1 = 0b0,
+ };
+
+ /// C7.2.211 ORN (vector)
+ pub const Orn = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u1 = 0b1,
+ opcode: u5 = 0b00011,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ size: Size = .double,
+ decoded24: u5 = 0b01110,
+ U: u1 = 0b0,
+ Q: Q,
+ decoded31: u1 = 0b0,
+ };
+
+ /// C7.2.41 EOR (vector)
+ pub const Eor = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u1 = 0b1,
+ opcode: u5 = 0b00011,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ size: Size = .byte,
+ decoded24: u5 = 0b01110,
+ U: u1 = 0b1,
+ Q: Q,
+ decoded31: u1 = 0b0,
+ };
+ };
+
+ /// Advanced SIMD modified immediate
+ pub const SimdModifiedImmediate = packed union {
+ group: @This().Group,
+ movi: Movi,
+ orr: Orr,
+ fmov: Fmov,
+ mvni: Mvni,
+ bic: Bic,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ imm5: u5,
+ decoded10: u1 = 0b1,
+ o2: u1,
+ cmode: u4,
+ imm3: u3,
+ decoded19: u10 = 0b0111100000,
+ op: u1,
+ Q: Q,
+ decoded31: u1 = 0b0,
+ };
+
+ /// C7.2.204 MOVI
+ pub const Movi = packed struct {
+ Rd: Register.Encoded,
+ imm5: u5,
+ decoded10: u1 = 0b1,
+ o2: u1 = 0b0,
+ cmode: u4,
+ imm3: u3,
+ decoded19: u10 = 0b0111100000,
+ op: u1,
+ Q: Q,
+ decoded31: u1 = 0b0,
+ };
+
+ /// C7.2.212 ORR (vector, immediate)
+ pub const Orr = packed struct {
+ Rd: Register.Encoded,
+ imm5: u5,
+ decoded10: u1 = 0b1,
+ o2: u1 = 0b0,
+ cmode0: u1 = 0b1,
+ cmode: u3,
+ imm3: u3,
+ decoded19: u10 = 0b0111100000,
+ op: u1 = 0b0,
+ Q: Q,
+ decoded31: u1 = 0b0,
+ };
+
+ /// C7.2.129 FMOV (vector, immediate)
+ pub const Fmov = packed struct {
+ Rd: Register.Encoded,
+ imm5: u5,
+ decoded10: u1 = 0b1,
+ o2: u1 = 0b1,
+ cmode: u4 = 0b1111,
+ imm3: u3,
+ decoded19: u10 = 0b0111100000,
+ op: u1 = 0b0,
+ Q: Q,
+ decoded31: u1 = 0b0,
+ };
+
+ /// C7.2.208 MVNI
+ pub const Mvni = packed struct {
+ Rd: Register.Encoded,
+ imm5: u5,
+ decoded10: u1 = 0b1,
+ o2: u1 = 0b0,
+ cmode: u4,
+ imm3: u3,
+ decoded19: u10 = 0b0111100000,
+ op: u1 = 0b1,
+ Q: Q,
+ decoded31: u1 = 0b0,
+ };
+
+ /// C7.2.20 BIC (vector, immediate)
+ pub const Bic = packed struct {
+ Rd: Register.Encoded,
+ imm5: u5,
+ decoded10: u1 = 0b1,
+ o2: u1 = 0b0,
+ cmode0: u1 = 0b1,
+ cmode: u3,
+ imm3: u3,
+ decoded19: u10 = 0b0111100000,
+ op: u1 = 0b1,
+ Q: Q,
+ decoded31: u1 = 0b0,
+ };
+ };
+
+ /// Conversion between floating-point and integer
+ pub const ConvertFloatInteger = packed union {
+ group: @This().Group,
+ fcvtns: Fcvtns,
+ fcvtnu: Fcvtnu,
+ scvtf: Scvtf,
+ ucvtf: Ucvtf,
+ fcvtas: Fcvtas,
+ fcvtau: Fcvtau,
+ fmov: Fmov,
+ fcvtps: Fcvtps,
+ fcvtpu: Fcvtpu,
+ fcvtms: Fcvtms,
+ fcvtmu: Fcvtmu,
+ fcvtzs: Fcvtzs,
+ fcvtzu: Fcvtzu,
+ fjcvtzs: Fjcvtzs,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u6 = 0b000000,
+ opcode: u3,
+ rmode: u2,
+ decoded21: u1 = 0b1,
+ ptype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool,
+ decoded30: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C7.2.81 FCVTNS (scalar)
+ pub const Fcvtns = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u6 = 0b000000,
+ opcode: u3 = 0b000,
+ rmode: Rmode = .n,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C7.2.83 FCVTNU (scalar)
+ pub const Fcvtnu = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u6 = 0b000000,
+ opcode: u3 = 0b001,
+ rmode: Rmode = .n,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C7.2.236 SCVTF (scalar, integer)
+ pub const Scvtf = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u6 = 0b000000,
+ opcode: u3 = 0b010,
+ rmode: Rmode = .n,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C7.2.355 UCVTF (scalar, integer)
+ pub const Ucvtf = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u6 = 0b000000,
+ opcode: u3 = 0b011,
+ rmode: Rmode = .n,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C7.2.71 FCVTAS (scalar)
+ pub const Fcvtas = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u6 = 0b000000,
+ opcode: u3 = 0b100,
+ rmode: Rmode = .n,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C7.2.73 FCVTAU (scalar)
+ pub const Fcvtau = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u6 = 0b000000,
+ opcode: u3 = 0b101,
+ rmode: Rmode = .n,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C7.2.131 FMOV (general)
+ pub const Fmov = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u6 = 0b000000,
+ opcode: Opcode,
+ rmode: Fmov.Rmode,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ sf: Register.IntegerSize,
+
+ pub const Opcode = enum(u3) {
+ float_to_integer = 0b110,
+ integer_to_float = 0b111,
+ _,
+ };
+
+ pub const Rmode = enum(u2) {
+ @"0" = 0b00,
+ @"1" = 0b01,
+ _,
+ };
+ };
+
+ /// C7.2.85 FCVTPS (scalar)
+ pub const Fcvtps = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u6 = 0b000000,
+ opcode: u3 = 0b000,
+ rmode: Rmode = .p,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C7.2.87 FCVTPU (scalar)
+ pub const Fcvtpu = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u6 = 0b000000,
+ opcode: u3 = 0b001,
+ rmode: Rmode = .p,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C7.2.76 FCVTMS (scalar)
+ pub const Fcvtms = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u6 = 0b000000,
+ opcode: u3 = 0b000,
+ rmode: Rmode = .m,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C7.2.78 FCVTMU (scalar)
+ pub const Fcvtmu = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u6 = 0b000000,
+ opcode: u3 = 0b001,
+ rmode: Rmode = .m,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C7.2.92 FCVTZS (scalar, integer)
+ pub const Fcvtzs = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u6 = 0b000000,
+ opcode: u3 = 0b000,
+ rmode: Rmode = .z,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C7.2.96 FCVTZU (scalar, integer)
+ pub const Fcvtzu = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u6 = 0b000000,
+ opcode: u3 = 0b001,
+ rmode: Rmode = .z,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ sf: Register.IntegerSize,
+ };
+
+ /// C7.2.99 FJCVTZS
+ pub const Fjcvtzs = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u6 = 0b000000,
+ opcode: u3 = 0b110,
+ rmode: Rmode = .z,
+ decoded21: u1 = 0b1,
+ ftype: Ftype = .double,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ sf: Register.IntegerSize = .word,
+ };
+
+ pub const Rmode = enum(u2) {
+ /// to nearest
+ n = 0b00,
+ /// toward plus infinity
+ p = 0b01,
+ /// toward minus infinity
+ m = 0b10,
+ /// toward zero
+ z = 0b11,
+ };
+ };
+
+ /// Floating-point data-processing (1 source)
+ pub const FloatDataProcessingOneSource = packed union {
+ group: @This().Group,
+ fmov: Fmov,
+ fabs: Fabs,
+ fneg: Fneg,
+ fsqrt: Fsqrt,
+ fcvt: Fcvt,
+ frintn: Frintn,
+ frintp: Frintp,
+ frintm: Frintm,
+ frintz: Frintz,
+ frinta: Frinta,
+ frintx: Frintx,
+ frinti: Frinti,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u5 = 0b10000,
+ opcode: u6,
+ decoded21: u1 = 0b1,
+ ptype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool,
+ decoded30: u1 = 0b0,
+ M: u1,
+ };
+
+ /// C7.2.130 FMOV (register)
+ pub const Fmov = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u5 = 0b10000,
+ opc: u2 = 0b00,
+ decoded17: u4 = 0b0000,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ /// C7.2.46 FABS (scalar)
+ pub const Fabs = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u5 = 0b10000,
+ opc: u2 = 0b01,
+ decoded17: u4 = 0b0000,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ /// C7.2.140 FNEG (scalar)
+ pub const Fneg = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u5 = 0b10000,
+ opc: u2 = 0b10,
+ decoded17: u4 = 0b0000,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ /// C7.2.172 FSQRT (scalar)
+ pub const Fsqrt = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u5 = 0b10000,
+ opc: u2 = 0b11,
+ decoded17: u4 = 0b0000,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ /// C7.2.69 FCVT
+ pub const Fcvt = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u5 = 0b10000,
+ opc: Ftype,
+ decoded17: u4 = 0b0001,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ /// C7.2.162 FRINTN (scalar)
+ pub const Frintn = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u5 = 0b10000,
+ rmode: Rmode = .n,
+ decoded18: u3 = 0b001,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ /// C7.2.164 FRINTP (scalar)
+ pub const Frintp = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u5 = 0b10000,
+ rmode: Rmode = .p,
+ decoded18: u3 = 0b001,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ /// C7.2.160 FRINTM (scalar)
+ pub const Frintm = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u5 = 0b10000,
+ rmode: Rmode = .m,
+ decoded18: u3 = 0b001,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ /// C7.2.168 FRINTZ (scalar)
+ pub const Frintz = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u5 = 0b10000,
+ rmode: Rmode = .z,
+ decoded18: u3 = 0b001,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ /// C7.2.156 FRINTA (scalar)
+ pub const Frinta = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u5 = 0b10000,
+ rmode: Rmode = .a,
+ decoded18: u3 = 0b001,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ /// C7.2.166 FRINTX (scalar)
+ pub const Frintx = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u5 = 0b10000,
+ rmode: Rmode = .x,
+ decoded18: u3 = 0b001,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ /// C7.2.158 FRINTI (scalar)
+ pub const Frinti = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u5 = 0b10000,
+ rmode: Rmode = .i,
+ decoded18: u3 = 0b001,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ pub const Rmode = enum(u3) {
+ /// to nearest with ties to even
+ n = 0b000,
+ /// toward plus infinity
+ p = 0b001,
+ /// toward minus infinity
+ m = 0b010,
+ /// toward zero
+ z = 0b011,
+ /// to nearest with ties to away
+ a = 0b100,
+ /// exact, using current rounding mode
+ x = 0b110,
+ /// using current rounding mode
+ i = 0b111,
+ _,
+ };
+ };
+
+ /// Floating-point compare
+ pub const FloatCompare = packed union {
+ group: @This().Group,
+ fcmp: Fcmp,
+ fcmpe: Fcmpe,
+
+ pub const Group = packed struct {
+ opcode2: u5,
+ Rn: Register.Encoded,
+ decoded10: u4 = 0b1000,
+ op: u2,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ ptype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool,
+ decoded30: u1 = 0b0,
+ M: u1,
+ };
+
+ /// C7.2.66 FCMP
+ pub const Fcmp = packed struct {
+ decoded0: u3 = 0b000,
+ opc0: Opc0,
+ opc1: u1 = 0b0,
+ Rn: Register.Encoded,
+ decoded10: u4 = 0b1000,
+ op: u2 = 0b00,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ /// C7.2.67 FCMPE
+ pub const Fcmpe = packed struct {
+ decoded0: u3 = 0b000,
+ opc0: Opc0,
+ opc1: u1 = 0b1,
+ Rn: Register.Encoded,
+ decoded10: u4 = 0b1000,
+ op: u2 = 0b00,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ pub const Opc0 = enum(u1) {
+ register = 0b00,
+ zero = 0b01,
+ };
+ };
+
+ /// Floating-point immediate
+ pub const FloatImmediate = packed union {
+ group: @This().Group,
+ fmov: Fmov,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ imm5: u5,
+ decoded10: u3 = 0b100,
+ imm8: u8,
+ decoded21: u1 = 0b1,
+ ptype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool,
+ decoded30: u1 = 0b0,
+ M: u1,
+ };
+
+ /// C7.2.132 FMOV (scalar, immediate)
+ pub const Fmov = packed struct {
+ Rd: Register.Encoded,
+ imm5: u5 = 0b00000,
+ decoded10: u3 = 0b100,
+ imm8: u8,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+ };
+
+ /// Floating-point data-processing (2 source)
+ pub const FloatDataProcessingTwoSource = packed union {
+ group: @This().Group,
+ fmul: Fmul,
+ fdiv: Fdiv,
+ fadd: Fadd,
+ fsub: Fsub,
+ fmax: Fmax,
+ fmin: Fmin,
+ fmaxnm: Fmaxnm,
+ fminnm: Fminnm,
+ fnmul: Fnmul,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ opcode: Opcode,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ ptype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool,
+ decoded30: u1 = 0b0,
+ M: u1,
+ };
+
+ /// C7.2.136 FMUL (scalar)
+ pub const Fmul = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ opcode: Opcode = .fmul,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ /// C7.2.98 FDIV (scalar)
+ pub const Fdiv = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ opcode: Opcode = .fdiv,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ /// C7.2.50 FADD (scalar)
+ pub const Fadd = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ opcode: Opcode = .fadd,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ /// C7.2.174 FSUB (scalar)
+ pub const Fsub = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ opcode: Opcode = .fsub,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ /// C7.2.102 FMAX (scalar)
+ pub const Fmax = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ opcode: Opcode = .fmax,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ /// C7.2.112 FMIN (scalar)
+ pub const Fmin = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ opcode: Opcode = .fmin,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ /// C7.2.104 FMAXNM (scalar)
+ pub const Fmaxnm = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ opcode: Opcode = .fmaxnm,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ /// C7.2.114 FMINNM (scalar)
+ pub const Fminnm = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ opcode: Opcode = .fminnm,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ /// C7.2.143 FNMUL (scalar)
+ pub const Fnmul = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ decoded10: u2 = 0b10,
+ opcode: Opcode = .fnmul,
+ Rm: Register.Encoded,
+ decoded21: u1 = 0b1,
+ ftype: Ftype,
+ decoded24: u5 = 0b11110,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ pub const Opcode = enum(u4) {
+ fmul = 0b0000,
+ fdiv = 0b0001,
+ fadd = 0b0010,
+ fsub = 0b0011,
+ fmax = 0b0100,
+ fmin = 0b0101,
+ fmaxnm = 0b0110,
+ fminnm = 0b0111,
+ fnmul = 0b1000,
+ _,
+ };
+ };
+
+ /// Floating-point data-processing (3 source)
+ pub const FloatDataProcessingThreeSource = packed union {
+ group: @This().Group,
+ fmadd: Fmadd,
+ fmsub: Fmsub,
+ fnmadd: Fnmadd,
+ fnmsub: Fnmsub,
+
+ pub const Group = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ Ra: Register.Encoded,
+ o0: AddSubtractOp,
+ Rm: Register.Encoded,
+ o1: u1,
+ ptype: Ftype,
+ decoded24: u5 = 0b11111,
+ S: bool,
+ decoded30: u1 = 0b0,
+ M: u1,
+ };
+
+ /// C7.2.100 FMADD
+ pub const Fmadd = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ Ra: Register.Encoded,
+ o0: AddSubtractOp = .add,
+ Rm: Register.Encoded,
+ o1: O1 = .fm,
+ ftype: Ftype,
+ decoded24: u5 = 0b11111,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ /// C7.2.133 FMSUB
+ pub const Fmsub = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ Ra: Register.Encoded,
+ o0: AddSubtractOp = .sub,
+ Rm: Register.Encoded,
+ o1: O1 = .fm,
+ ftype: Ftype,
+ decoded24: u5 = 0b11111,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ /// C7.2.141 FNMADD
+ pub const Fnmadd = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ Ra: Register.Encoded,
+ o0: AddSubtractOp = .add,
+ Rm: Register.Encoded,
+ o1: O1 = .fnm,
+ ftype: Ftype,
+ decoded24: u5 = 0b11111,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ /// C7.2.142 FNMSUB
+ pub const Fnmsub = packed struct {
+ Rd: Register.Encoded,
+ Rn: Register.Encoded,
+ Ra: Register.Encoded,
+ o0: AddSubtractOp = .sub,
+ Rm: Register.Encoded,
+ o1: O1 = .fnm,
+ ftype: Ftype,
+ decoded24: u5 = 0b11111,
+ S: bool = false,
+ decoded30: u1 = 0b0,
+ M: u1 = 0b0,
+ };
+
+ pub const O1 = enum(u1) {
+ fm = 0b0,
+ fnm = 0b1,
+ };
+ };
+
+ pub const Q = enum(u1) {
+ double = 0b0,
+ quad = 0b1,
+ };
+
+ pub const Size = enum(u2) {
+ byte = 0b00,
+ half = 0b01,
+ single = 0b10,
+ double = 0b11,
+
+ pub fn toVectorSize(s: Size) Register.VectorSize {
+ return switch (s) {
+ .byte => .byte,
+ .half => .half,
+ .single => .single,
+ .double => .double,
+ };
+ }
+
+ pub fn fromVectorSize(vs: Register.VectorSize) Size {
+ return switch (vs) {
+ .byte => .byte,
+ .half => .half,
+ .single => .single,
+ .double => .double,
+ };
+ }
+ };
+
+ pub const Ftype = enum(u2) {
+ single = 0b00,
+ double = 0b01,
+ quad = 0b10,
+ half = 0b11,
+ };
+ };
+
+ pub const AddSubtractOp = enum(u1) {
+ add = 0b0,
+ sub = 0b1,
+ };
+
+ pub const LogicalOpc = enum(u2) {
+ @"and" = 0b00,
+ orr = 0b01,
+ eor = 0b10,
+ ands = 0b11,
+ };
+
+ pub const Decoded = union(enum) {
+ unallocated,
+ reserved: Reserved,
+ sme: Sme,
+ sve: Sve,
+ data_processing_immediate: DataProcessingImmediate,
+ branch_exception_generating_system: BranchExceptionGeneratingSystem,
+ load_store: LoadStore,
+ data_processing_register: DataProcessingRegister,
+ data_processing_vector: DataProcessingVector,
+ };
+ pub fn decode(inst: @This()) @This().Decoded {
+ return switch (inst.group.op1) {
+ 0b0000 => switch (inst.group.op0) {
+ 0b0 => .{ .reserved = inst.reserved },
+ 0b1 => .{ .sme = inst.sme },
+ },
+ 0b0001 => .unallocated,
+ 0b0010 => .{ .sve = inst.sve },
+ 0b0011 => .unallocated,
+ 0b1000, 0b1001 => .{ .data_processing_immediate = inst.data_processing_immediate },
+ 0b1010, 0b1011 => .{ .branch_exception_generating_system = inst.branch_exception_generating_system },
+ 0b0100, 0b0110, 0b1100, 0b1110 => .{ .load_store = inst.load_store },
+ 0b0101, 0b1101 => .{ .data_processing_register = inst.data_processing_register },
+ 0b0111, 0b1111 => .{ .data_processing_vector = inst.data_processing_vector },
+ };
+ }
+
+ /// C6.2.1 ADC
+ pub fn adc(d: Register, n: Register, m: Register) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf and m.format.integer == sf);
+ return .{ .data_processing_register = .{ .add_subtract_with_carry = .{
+ .adc = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .Rm = m.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C6.2.2 ADCS
+ pub fn adcs(d: Register, n: Register, m: Register) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf and m.format.integer == sf);
+ return .{ .data_processing_register = .{ .add_subtract_with_carry = .{
+ .adcs = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .Rm = m.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C6.2.3 ADD (extended register)
+ /// C6.2.4 ADD (immediate)
+ /// C6.2.5 ADD (shifted register)
+ pub fn add(d: Register, n: Register, form: union(enum) {
+ extended_register_explicit: struct {
+ register: Register,
+ option: DataProcessingRegister.AddSubtractExtendedRegister.Option,
+ amount: DataProcessingRegister.AddSubtractExtendedRegister.Extend.Amount,
+ },
+ extended_register: struct { register: Register, extend: DataProcessingRegister.AddSubtractExtendedRegister.Extend },
+ immediate: u12,
+ shifted_immediate: struct { immediate: u12, lsl: DataProcessingImmediate.AddSubtractImmediate.Shift = .@"0" },
+ register: Register,
+ shifted_register_explicit: struct { register: Register, shift: DataProcessingRegister.Shift.Op, amount: u6 },
+ shifted_register: struct { register: Register, shift: DataProcessingRegister.Shift = .none },
+ }) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf);
+ form: switch (form) {
+ .extended_register_explicit => |extended_register_explicit| {
+ assert(extended_register_explicit.register.format.integer == extended_register_explicit.option.sf());
+ return .{ .data_processing_register = .{ .add_subtract_extended_register = .{
+ .add = .{
+ .Rd = d.alias.encode(.{ .sp = true }),
+ .Rn = n.alias.encode(.{ .sp = true }),
+ .imm3 = switch (extended_register_explicit.amount) {
+ 0...4 => |amount| amount,
+ else => unreachable,
+ },
+ .option = extended_register_explicit.option,
+ .Rm = extended_register_explicit.register.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ },
+ .extended_register => |extended_register| continue :form .{ .extended_register_explicit = .{
+ .register = extended_register.register,
+ .option = extended_register.extend,
+ .amount = switch (extended_register.extend) {
+ .uxtb, .uxth, .uxtw, .uxtx, .sxtb, .sxth, .sxtw, .sxtx => |amount| amount,
+ },
+ } },
+ .immediate => |immediate| continue :form .{ .shifted_immediate = .{ .immediate = immediate } },
+ .shifted_immediate => |shifted_immediate| {
+ return .{ .data_processing_immediate = .{ .add_subtract_immediate = .{
+ .add = .{
+ .Rd = d.alias.encode(.{ .sp = true }),
+ .Rn = n.alias.encode(.{ .sp = true }),
+ .imm12 = shifted_immediate.immediate,
+ .sh = shifted_immediate.lsl,
+ .sf = sf,
+ },
+ } } };
+ },
+ .register => |register| continue :form if (d.alias == .sp or n.alias == .sp or register.alias == .sp)
+ .{ .extended_register = .{ .register = register, .extend = switch (sf) {
+ .word => .{ .uxtw = 0 },
+ .doubleword => .{ .uxtx = 0 },
+ } } }
+ else
+ .{ .shifted_register = .{ .register = register } },
+ .shifted_register_explicit => |shifted_register_explicit| {
+ assert(shifted_register_explicit.register.format.integer == sf);
+ return .{ .data_processing_register = .{ .add_subtract_shifted_register = .{
+ .add = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .imm6 = switch (sf) {
+ .word => @as(u5, @intCast(shifted_register_explicit.amount)),
+ .doubleword => @as(u6, @intCast(shifted_register_explicit.amount)),
+ },
+ .Rm = shifted_register_explicit.register.alias.encode(.{}),
+ .shift = switch (shifted_register_explicit.shift) {
+ .lsl, .lsr, .asr => |shift| shift,
+ .ror => unreachable,
+ },
+ .sf = sf,
+ },
+ } } };
+ },
+ .shifted_register => |shifted_register| continue :form .{ .shifted_register_explicit = .{
+ .register = shifted_register.register,
+ .shift = shifted_register.shift,
+ .amount = switch (shifted_register.shift) {
+ .lsl, .lsr, .asr => |amount| amount,
+ .ror => unreachable,
+ },
+ } },
+ }
+ }
+ /// C7.2.4 ADDP (scalar)
+ /// C7.2.5 ADDP (vector)
+ pub fn addp(d: Register, n: Register, form: union(enum) {
+ scalar,
+ vector: Register,
+ }) Instruction {
+ switch (form) {
+ .scalar => {
+ assert(d.format.scalar == .double and n.format.vector == .@"2d");
+ return .{ .data_processing_vector = .{ .simd_scalar_pairwise = .{
+ .addp = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .size = .double,
+ },
+ } } };
+ },
+ .vector => |m| {
+ const arrangement = d.format.vector;
+ assert(arrangement != .@"1d" and n.format.vector == arrangement and m.format.vector == arrangement);
+ return .{ .data_processing_vector = .{ .simd_three_same = .{
+ .addp = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .Rm = m.alias.encode(.{ .V = true }),
+ .size = arrangement.elemSize(),
+ .Q = arrangement.size(),
+ },
+ } } };
+ },
+ }
+ }
+ /// C6.2.7 ADDS (extended register)
+ /// C6.2.8 ADDS (immediate)
+ /// C6.2.9 ADDS (shifted register)
+ pub fn adds(d: Register, n: Register, form: union(enum) {
+ extended_register_explicit: struct {
+ register: Register,
+ option: DataProcessingRegister.AddSubtractExtendedRegister.Option,
+ amount: DataProcessingRegister.AddSubtractExtendedRegister.Extend.Amount,
+ },
+ extended_register: struct { register: Register, extend: DataProcessingRegister.AddSubtractExtendedRegister.Extend },
+ immediate: u12,
+ shifted_immediate: struct { immediate: u12, lsl: DataProcessingImmediate.AddSubtractImmediate.Shift = .@"0" },
+ register: Register,
+ shifted_register_explicit: struct { register: Register, shift: DataProcessingRegister.Shift.Op, amount: u6 },
+ shifted_register: struct { register: Register, shift: DataProcessingRegister.Shift = .none },
+ }) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf);
+ form: switch (form) {
+ .extended_register_explicit => |extended_register_explicit| {
+ assert(extended_register_explicit.register.format.integer == extended_register_explicit.option.sf());
+ return .{ .data_processing_register = .{ .add_subtract_extended_register = .{
+ .adds = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .sp = true }),
+ .imm3 = switch (extended_register_explicit.amount) {
+ 0...4 => |amount| amount,
+ else => unreachable,
+ },
+ .option = extended_register_explicit.option,
+ .Rm = extended_register_explicit.register.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ },
+ .extended_register => |extended_register| continue :form .{ .extended_register_explicit = .{
+ .register = extended_register.register,
+ .option = extended_register.extend,
+ .amount = switch (extended_register.extend) {
+ .uxtb, .uxth, .uxtw, .uxtx, .sxtb, .sxth, .sxtw, .sxtx => |amount| amount,
+ },
+ } },
+ .immediate => |immediate| continue :form .{ .shifted_immediate = .{ .immediate = immediate } },
+ .shifted_immediate => |shifted_immediate| {
+ return .{ .data_processing_immediate = .{ .add_subtract_immediate = .{
+ .adds = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .sp = true }),
+ .imm12 = shifted_immediate.immediate,
+ .sh = shifted_immediate.lsl,
+ .sf = sf,
+ },
+ } } };
+ },
+ .register => |register| continue :form if (d.alias == .sp or n.alias == .sp or register.alias == .sp)
+ .{ .extended_register = .{ .register = register, .extend = switch (sf) {
+ .word => .{ .uxtw = 0 },
+ .doubleword => .{ .uxtx = 0 },
+ } } }
+ else
+ .{ .shifted_register = .{ .register = register } },
+ .shifted_register_explicit => |shifted_register_explicit| {
+ assert(shifted_register_explicit.register.format.integer == sf);
+ return .{ .data_processing_register = .{ .add_subtract_shifted_register = .{
+ .adds = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .imm6 = switch (sf) {
+ .word => @as(u5, @intCast(shifted_register_explicit.amount)),
+ .doubleword => @as(u6, @intCast(shifted_register_explicit.amount)),
+ },
+ .Rm = shifted_register_explicit.register.alias.encode(.{}),
+ .shift = switch (shifted_register_explicit.shift) {
+ .lsl, .lsr, .asr => |shift| shift,
+ .ror => unreachable,
+ },
+ .sf = sf,
+ },
+ } } };
+ },
+ .shifted_register => |shifted_register| continue :form .{ .shifted_register_explicit = .{
+ .register = shifted_register.register,
+ .shift = shifted_register.shift,
+ .amount = switch (shifted_register.shift) {
+ .lsl, .lsr, .asr => |amount| amount,
+ .ror => unreachable,
+ },
+ } },
+ }
+ }
+ /// C7.2.6 ADDV
+ pub fn addv(d: Register, n: Register) Instruction {
+ const arrangement = n.format.vector;
+ assert(arrangement.len() > 2 and d.format.scalar == arrangement.elemSize().toVectorSize());
+ return .{ .data_processing_vector = .{ .simd_across_lanes = .{
+ .addv = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .size = arrangement.elemSize(),
+ .Q = arrangement.size(),
+ },
+ } } };
+ }
+ /// C6.2.10 ADR
+ pub fn adr(d: Register, label: i21) Instruction {
+ assert(d.format.integer == .doubleword);
+ return .{ .data_processing_immediate = .{ .pc_relative_addressing = .{
+ .adr = .{
+ .Rd = d.alias.encode(.{}),
+ .immhi = @intCast(label >> 2),
+ .immlo = @truncate(@as(u21, @bitCast(label))),
+ },
+ } } };
+ }
+ /// C6.2.11 ADRP
+ pub fn adrp(d: Register, label: i33) Instruction {
+ assert(d.format.integer == .doubleword);
+ const imm: i21 = @intCast(@shrExact(label, 12));
+ return .{ .data_processing_immediate = .{ .pc_relative_addressing = .{
+ .adrp = .{
+ .Rd = d.alias.encode(.{}),
+ .immhi = @intCast(imm >> 2),
+ .immlo = @truncate(@as(u21, @bitCast(imm))),
+ },
+ } } };
+ }
+ /// C6.2.12 AND (immediate)
+ /// C6.2.13 AND (shifted register)
+ /// C7.2.11 AND (vector)
+ pub fn @"and"(d: Register, n: Register, form: union(enum) {
+ immediate: DataProcessingImmediate.Bitmask,
+ register: Register,
+ shifted_register_explicit: struct { register: Register, shift: DataProcessingRegister.Shift.Op, amount: u6 },
+ shifted_register: struct { register: Register, shift: DataProcessingRegister.Shift = .none },
+ }) Instruction {
+ switch (d.format) {
+ else => unreachable,
+ .integer => |sf| {
+ assert(n.format.integer == sf);
+ form: switch (form) {
+ .immediate => |bitmask| {
+ assert(bitmask.validImmediate(sf));
+ return .{ .data_processing_immediate = .{ .logical_immediate = .{
+ .@"and" = .{
+ .Rd = d.alias.encode(.{ .sp = true }),
+ .Rn = n.alias.encode(.{}),
+ .imm = bitmask,
+ .sf = sf,
+ },
+ } } };
+ },
+ .register => |register| continue :form .{ .shifted_register = .{ .register = register } },
+ .shifted_register_explicit => |shifted_register_explicit| {
+ assert(shifted_register_explicit.register.format.integer == sf);
+ return .{ .data_processing_register = .{ .logical_shifted_register = .{
+ .@"and" = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .imm6 = switch (sf) {
+ .word => @as(u5, @intCast(shifted_register_explicit.amount)),
+ .doubleword => @as(u6, @intCast(shifted_register_explicit.amount)),
+ },
+ .Rm = shifted_register_explicit.register.alias.encode(.{}),
+ .shift = shifted_register_explicit.shift,
+ .sf = sf,
+ },
+ } } };
+ },
+ .shifted_register => |shifted_register| continue :form .{ .shifted_register_explicit = .{
+ .register = shifted_register.register,
+ .shift = shifted_register.shift,
+ .amount = switch (shifted_register.shift) {
+ .lsl, .lsr, .asr, .ror => |amount| amount,
+ },
+ } },
+ }
+ },
+ .vector => |arrangement| {
+ const m = form.register;
+ assert(arrangement.elemSize() == .byte and n.format.vector == arrangement and m.format.vector == arrangement);
+ return .{ .data_processing_vector = .{ .simd_three_same = .{
+ .@"and" = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .Rm = m.alias.encode(.{ .V = true }),
+ .Q = arrangement.size(),
+ },
+ } } };
+ },
+ }
+ }
+ /// C6.2.14 ANDS (immediate)
+ /// C6.2.15 ANDS (shifted register)
+ pub fn ands(d: Register, n: Register, form: union(enum) {
+ immediate: DataProcessingImmediate.Bitmask,
+ register: Register,
+ shifted_register_explicit: struct { register: Register, shift: DataProcessingRegister.Shift.Op, amount: u6 },
+ shifted_register: struct { register: Register, shift: DataProcessingRegister.Shift = .none },
+ }) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf);
+ form: switch (form) {
+ .immediate => |bitmask| {
+ assert(bitmask.validImmediate(sf));
+ return .{ .data_processing_immediate = .{ .logical_immediate = .{
+ .ands = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .imm = bitmask,
+ .sf = sf,
+ },
+ } } };
+ },
+ .register => |register| continue :form .{ .shifted_register = .{ .register = register } },
+ .shifted_register_explicit => |shifted_register_explicit| {
+ assert(shifted_register_explicit.register.format.integer == sf);
+ return .{ .data_processing_register = .{ .logical_shifted_register = .{
+ .ands = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .imm6 = switch (sf) {
+ .word => @as(u5, @intCast(shifted_register_explicit.amount)),
+ .doubleword => @as(u6, @intCast(shifted_register_explicit.amount)),
+ },
+ .Rm = shifted_register_explicit.register.alias.encode(.{}),
+ .shift = shifted_register_explicit.shift,
+ .sf = sf,
+ },
+ } } };
+ },
+ .shifted_register => |shifted_register| continue :form .{ .shifted_register_explicit = .{
+ .register = shifted_register.register,
+ .shift = shifted_register.shift,
+ .amount = switch (shifted_register.shift) {
+ .lsl, .lsr, .asr, .ror => |amount| amount,
+ },
+ } },
+ }
+ }
+ /// C6.2.18 ASRV
+ pub fn asrv(d: Register, n: Register, m: Register) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf and m.format.integer == sf);
+ return .{ .data_processing_register = .{ .data_processing_two_source = .{
+ .asrv = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .Rm = m.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C6.2.25 B
+ pub fn b(label: i28) Instruction {
+ return .{ .branch_exception_generating_system = .{ .unconditional_branch_immediate = .{
+ .b = .{ .imm26 = @intCast(@shrExact(label, 2)) },
+ } } };
+ }
+ /// C6.2.26 B.cond
+ pub fn @"b."(cond: ConditionCode, label: i21) Instruction {
+ return .{ .branch_exception_generating_system = .{ .conditional_branch_immediate = .{
+ .b = .{
+ .cond = cond,
+ .imm19 = @intCast(@shrExact(label, 2)),
+ },
+ } } };
+ }
+ /// C6.2.27 BC.cond
+ pub fn @"bc."(cond: ConditionCode, label: i21) Instruction {
+ return .{ .branch_exception_generating_system = .{ .conditional_branch_immediate = .{
+ .bc = .{
+ .cond = cond,
+ .imm19 = @intCast(@shrExact(label, 2)),
+ },
+ } } };
+ }
+ /// C6.2.30 BFM
+ pub fn bfm(d: Register, n: Register, bitmask: DataProcessingImmediate.Bitmask) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf and bitmask.validBitfield(sf));
+ return .{ .data_processing_immediate = .{ .bitfield = .{
+ .bfm = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .imm = bitmask,
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C6.2.32 BIC (shifted register)
+ /// C7.2.20 BIC (vector, immediate)
+ /// C7.2.21 BIC (vector, register)
+ pub fn bic(d: Register, n: Register, form: union(enum) {
+ shifted_immediate: struct { immediate: u8, lsl: u5 = 0 },
+ register: Register,
+ shifted_register_explicit: struct { register: Register, shift: DataProcessingRegister.Shift.Op, amount: u6 },
+ shifted_register: struct { register: Register, shift: DataProcessingRegister.Shift = .none },
+ }) Instruction {
+ switch (d.format) {
+ else => unreachable,
+ .integer => |sf| {
+ assert(n.format.integer == sf);
+ form: switch (form) {
+ else => unreachable,
+ .register => |register| continue :form .{ .shifted_register = .{ .register = register } },
+ .shifted_register_explicit => |shifted_register_explicit| {
+ assert(shifted_register_explicit.register.format.integer == sf);
+ return .{ .data_processing_register = .{ .logical_shifted_register = .{
+ .bic = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .imm6 = switch (sf) {
+ .word => @as(u5, @intCast(shifted_register_explicit.amount)),
+ .doubleword => @as(u6, @intCast(shifted_register_explicit.amount)),
+ },
+ .Rm = shifted_register_explicit.register.alias.encode(.{}),
+ .shift = shifted_register_explicit.shift,
+ .sf = sf,
+ },
+ } } };
+ },
+ .shifted_register => |shifted_register| continue :form .{ .shifted_register_explicit = .{
+ .register = shifted_register.register,
+ .shift = shifted_register.shift,
+ .amount = switch (shifted_register.shift) {
+ .lsl, .lsr, .asr, .ror => |amount| amount,
+ },
+ } },
+ }
+ },
+ .vector => |arrangement| switch (form) {
+ else => unreachable,
+ .shifted_immediate => |shifted_immediate| {
+ assert(n.alias == d.alias and n.format.vector == arrangement);
+ return .{ .data_processing_vector = .{ .simd_modified_immediate = .{
+ .bic = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .imm5 = @truncate(shifted_immediate.immediate >> 0),
+ .cmode = switch (arrangement) {
+ else => unreachable,
+ .@"4h", .@"8h" => @as(u3, 0b100) |
+ @as(u3, @as(u1, @intCast(@shrExact(shifted_immediate.lsl, 3)))) << 0,
+ .@"2s", .@"4s" => @as(u3, 0b000) |
+ @as(u3, @as(u2, @intCast(@shrExact(shifted_immediate.lsl, 3)))) << 0,
+ },
+ .imm3 = @intCast(shifted_immediate.immediate >> 5),
+ .Q = arrangement.size(),
+ },
+ } } };
+ },
+ .register => |m| {
+ assert(arrangement.elemSize() == .byte and n.format.vector == arrangement and m.format.vector == arrangement);
+ return .{ .data_processing_vector = .{ .simd_three_same = .{
+ .bic = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .Rm = m.alias.encode(.{ .V = true }),
+ .Q = arrangement.size(),
+ },
+ } } };
+ },
+ },
+ }
+ }
+ /// C6.2.33 BICS (shifted register)
+ pub fn bics(d: Register, n: Register, form: union(enum) {
+ register: Register,
+ shifted_register_explicit: struct { register: Register, shift: DataProcessingRegister.Shift.Op, amount: u6 },
+ shifted_register: struct { register: Register, shift: DataProcessingRegister.Shift = .none },
+ }) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf);
+ form: switch (form) {
+ .register => |register| continue :form .{ .shifted_register = .{ .register = register } },
+ .shifted_register_explicit => |shifted_register_explicit| {
+ assert(shifted_register_explicit.register.format.integer == sf);
+ return .{ .data_processing_register = .{ .logical_shifted_register = .{
+ .bics = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .imm6 = switch (sf) {
+ .word => @as(u5, @intCast(shifted_register_explicit.amount)),
+ .doubleword => @as(u6, @intCast(shifted_register_explicit.amount)),
+ },
+ .Rm = shifted_register_explicit.register.alias.encode(.{}),
+ .shift = shifted_register_explicit.shift,
+ .sf = sf,
+ },
+ } } };
+ },
+ .shifted_register => |shifted_register| continue :form .{ .shifted_register_explicit = .{
+ .register = shifted_register.register,
+ .shift = shifted_register.shift,
+ .amount = switch (shifted_register.shift) {
+ .lsl, .lsr, .asr, .ror => |amount| amount,
+ },
+ } },
+ }
+ }
+ /// C6.2.34 BL
+ pub fn bl(label: i28) Instruction {
+ return .{ .branch_exception_generating_system = .{ .unconditional_branch_immediate = .{
+ .bl = .{ .imm26 = @intCast(@shrExact(label, 2)) },
+ } } };
+ }
+ /// C6.2.35 BLR
+ pub fn blr(n: Register) Instruction {
+ assert(n.format.integer == .doubleword);
+ return .{ .branch_exception_generating_system = .{ .unconditional_branch_register = .{
+ .blr = .{ .Rn = n.alias.encode(.{}) },
+ } } };
+ }
+ /// C6.2.37 BR
+ pub fn br(n: Register) Instruction {
+ assert(n.format.integer == .doubleword);
+ return .{ .branch_exception_generating_system = .{ .unconditional_branch_register = .{
+ .br = .{ .Rn = n.alias.encode(.{}) },
+ } } };
+ }
+ /// C6.2.40 BRK
+ pub fn brk(imm: u16) Instruction {
+ return .{ .branch_exception_generating_system = .{ .exception_generating = .{
+ .brk = .{ .imm16 = imm },
+ } } };
+ }
+ /// C6.2.46 CBNZ
+ pub fn cbnz(t: Register, label: i21) Instruction {
+ return .{ .branch_exception_generating_system = .{ .compare_branch_immediate = .{
+ .cbnz = .{
+ .Rt = t.alias.encode(.{}),
+ .imm19 = @intCast(@shrExact(label, 2)),
+ .sf = t.format.integer,
+ },
+ } } };
+ }
+ /// C6.2.47 CBZ
+ pub fn cbz(t: Register, label: i21) Instruction {
+ return .{ .branch_exception_generating_system = .{ .compare_branch_immediate = .{
+ .cbz = .{
+ .Rt = t.alias.encode(.{}),
+ .imm19 = @intCast(@shrExact(label, 2)),
+ .sf = t.format.integer,
+ },
+ } } };
+ }
+ /// C6.2.48 CCMN (immediate)
+ /// C6.2.49 CCMN (register)
+ pub fn ccmn(
+ n: Register,
+ form: union(enum) { register: Register, immediate: u5 },
+ nzcv: DataProcessingRegister.Nzcv,
+ cond: ConditionCode,
+ ) Instruction {
+ const sf = n.format.integer;
+ switch (form) {
+ .register => |m| {
+ assert(m.format.integer == sf);
+ return .{ .data_processing_register = .{ .conditional_compare_register = .{
+ .ccmn = .{
+ .nzcv = nzcv,
+ .Rn = n.alias.encode(.{}),
+ .cond = cond,
+ .Rm = m.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ },
+ .immediate => |imm| return .{ .data_processing_register = .{ .conditional_compare_immediate = .{
+ .ccmn = .{
+ .nzcv = nzcv,
+ .Rn = n.alias.encode(.{}),
+ .cond = cond,
+ .imm5 = imm,
+ .sf = sf,
+ },
+ } } },
+ }
+ }
+ /// C6.2.50 CCMP (immediate)
+ /// C6.2.51 CCMP (register)
+ pub fn ccmp(
+ n: Register,
+ form: union(enum) { register: Register, immediate: u5 },
+ nzcv: DataProcessingRegister.Nzcv,
+ cond: ConditionCode,
+ ) Instruction {
+ const sf = n.format.integer;
+ switch (form) {
+ .register => |m| {
+ assert(m.format.integer == sf);
+ return .{ .data_processing_register = .{ .conditional_compare_register = .{
+ .ccmp = .{
+ .nzcv = nzcv,
+ .Rn = n.alias.encode(.{}),
+ .cond = cond,
+ .Rm = m.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ },
+ .immediate => |imm| return .{ .data_processing_register = .{ .conditional_compare_immediate = .{
+ .ccmp = .{
+ .nzcv = nzcv,
+ .Rn = n.alias.encode(.{}),
+ .cond = cond,
+ .imm5 = imm,
+ .sf = sf,
+ },
+ } } },
+ }
+ }
+ /// C6.2.56 CLREX
+ pub fn clrex(imm: u4) Instruction {
+ return .{ .branch_exception_generating_system = .{ .barriers = .{
+ .clrex = .{
+ .CRm = imm,
+ },
+ } } };
+ }
+ /// C6.2.58 CLZ
+ pub fn clz(d: Register, n: Register) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf);
+ return .{ .data_processing_register = .{ .data_processing_one_source = .{
+ .clz = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C7.2.38 CNT
+ pub fn cnt(d: Register, n: Register) Instruction {
+ const arrangement = d.format.vector;
+ assert(arrangement.elemSize() == .byte and n.format.vector == arrangement);
+ return .{ .data_processing_vector = .{ .simd_two_register_miscellaneous = .{
+ .cnt = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .size = arrangement.elemSize(),
+ .Q = arrangement.size(),
+ },
+ } } };
+ }
+ /// C6.2.103 CSEL
+ pub fn csel(d: Register, n: Register, m: Register, cond: ConditionCode) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf and m.format.integer == sf);
+ return .{ .data_processing_register = .{ .conditional_select = .{
+ .csel = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .cond = cond,
+ .Rm = m.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C6.2.106 CSINC
+ pub fn csinc(d: Register, n: Register, m: Register, cond: ConditionCode) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf and m.format.integer == sf);
+ return .{ .data_processing_register = .{ .conditional_select = .{
+ .csinc = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .cond = cond,
+ .Rm = m.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C6.2.107 CSINV
+ pub fn csinv(d: Register, n: Register, m: Register, cond: ConditionCode) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf and m.format.integer == sf);
+ return .{ .data_processing_register = .{ .conditional_select = .{
+ .csinv = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .cond = cond,
+ .Rm = m.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C6.2.108 CSNEG
+ pub fn csneg(d: Register, n: Register, m: Register, cond: ConditionCode) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf and m.format.integer == sf);
+ return .{ .data_processing_register = .{ .conditional_select = .{
+ .csneg = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .cond = cond,
+ .Rm = m.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C6.2.110 DCPS1
+ pub fn dcps1(imm: u16) Instruction {
+ return .{ .branch_exception_generating_system = .{ .exception_generating = .{
+ .dcps1 = .{ .imm16 = imm },
+ } } };
+ }
+ /// C6.2.111 DCPS2
+ pub fn dcps2(imm: u16) Instruction {
+ return .{ .branch_exception_generating_system = .{ .exception_generating = .{
+ .dcps2 = .{ .imm16 = imm },
+ } } };
+ }
+ /// C6.2.112 DCPS3
+ pub fn dcps3(imm: u16) Instruction {
+ return .{ .branch_exception_generating_system = .{ .exception_generating = .{
+ .dcps3 = .{ .imm16 = imm },
+ } } };
+ }
+ /// C6.2.116 DSB
+ pub fn dsb(option: BranchExceptionGeneratingSystem.Barriers.Option) Instruction {
+ return .{ .branch_exception_generating_system = .{ .barriers = .{
+ .dsb = .{
+ .CRm = option,
+ },
+ } } };
+ }
+ /// C6.2.118 EON (shifted register)
+ pub fn eon(d: Register, n: Register, form: union(enum) {
+ register: Register,
+ shifted_register_explicit: struct { register: Register, shift: DataProcessingRegister.Shift.Op, amount: u6 },
+ shifted_register: struct { register: Register, shift: DataProcessingRegister.Shift = .none },
+ }) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf);
+ form: switch (form) {
+ .register => |register| continue :form .{ .shifted_register = .{ .register = register } },
+ .shifted_register_explicit => |shifted_register_explicit| {
+ assert(shifted_register_explicit.register.format.integer == sf);
+ return .{ .data_processing_register = .{ .logical_shifted_register = .{
+ .eon = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .imm6 = switch (sf) {
+ .word => @as(u5, @intCast(shifted_register_explicit.amount)),
+ .doubleword => @as(u6, @intCast(shifted_register_explicit.amount)),
+ },
+ .Rm = shifted_register_explicit.register.alias.encode(.{}),
+ .shift = shifted_register_explicit.shift,
+ .sf = sf,
+ },
+ } } };
+ },
+ .shifted_register => |shifted_register| continue :form .{ .shifted_register_explicit = .{
+ .register = shifted_register.register,
+ .shift = shifted_register.shift,
+ .amount = switch (shifted_register.shift) {
+ .lsl, .lsr, .asr, .ror => |amount| amount,
+ },
+ } },
+ }
+ }
+ /// C6.2.119 EOR (immediate)
+ /// C6.2.120 EOR (shifted register)
+ /// C7.2.41 EOR (vector)
+ pub fn eor(d: Register, n: Register, form: union(enum) {
+ immediate: DataProcessingImmediate.Bitmask,
+ register: Register,
+ shifted_register_explicit: struct { register: Register, shift: DataProcessingRegister.Shift.Op, amount: u6 },
+ shifted_register: struct { register: Register, shift: DataProcessingRegister.Shift = .none },
+ }) Instruction {
+ switch (d.format) {
+ else => unreachable,
+ .integer => |sf| {
+ assert(n.format.integer == sf);
+ form: switch (form) {
+ .immediate => |bitmask| {
+ assert(bitmask.validImmediate(sf));
+ return .{ .data_processing_immediate = .{ .logical_immediate = .{
+ .eor = .{
+ .Rd = d.alias.encode(.{ .sp = true }),
+ .Rn = n.alias.encode(.{}),
+ .imm = bitmask,
+ .sf = sf,
+ },
+ } } };
+ },
+ .register => |register| continue :form .{ .shifted_register = .{ .register = register } },
+ .shifted_register_explicit => |shifted_register_explicit| {
+ assert(shifted_register_explicit.register.format.integer == sf);
+ return .{ .data_processing_register = .{ .logical_shifted_register = .{
+ .eor = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .imm6 = switch (sf) {
+ .word => @as(u5, @intCast(shifted_register_explicit.amount)),
+ .doubleword => @as(u6, @intCast(shifted_register_explicit.amount)),
+ },
+ .Rm = shifted_register_explicit.register.alias.encode(.{}),
+ .shift = shifted_register_explicit.shift,
+ .sf = sf,
+ },
+ } } };
+ },
+ .shifted_register => |shifted_register| continue :form .{ .shifted_register_explicit = .{
+ .register = shifted_register.register,
+ .shift = shifted_register.shift,
+ .amount = switch (shifted_register.shift) {
+ .lsl, .lsr, .asr, .ror => |amount| amount,
+ },
+ } },
+ }
+ },
+ .vector => |arrangement| {
+ const m = form.register;
+ assert(arrangement.elemSize() == .byte and n.format.vector == arrangement and m.format.vector == arrangement);
+ return .{ .data_processing_vector = .{ .simd_three_same = .{
+ .eor = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .Rm = m.alias.encode(.{ .V = true }),
+ .Q = arrangement.size(),
+ },
+ } } };
+ },
+ }
+ }
+ /// C6.2.124 EXTR
+ pub fn extr(d: Register, n: Register, m: Register, lsb: u6) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf and m.format.integer == sf);
+ return .{ .data_processing_immediate = .{ .extract = .{
+ .extr = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .imms = switch (sf) {
+ .word => @as(u5, @intCast(lsb)),
+ .doubleword => @as(u6, @intCast(lsb)),
+ },
+ .Rm = m.alias.encode(.{}),
+ .N = sf,
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C7.2.46 FABS (scalar)
+ pub fn fabs(d: Register, n: Register) Instruction {
+ const ftype = d.format.scalar;
+ assert(n.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_one_source = .{
+ .fabs = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C7.2.50 FADD (scalar)
+ pub fn fadd(d: Register, n: Register, m: Register) Instruction {
+ const ftype = d.format.scalar;
+ assert(n.format.scalar == ftype and m.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_two_source = .{
+ .fadd = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .Rm = m.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C7.2.66 FCMP
+ pub fn fcmp(n: Register, form: union(enum) { register: Register, zero }) Instruction {
+ const ftype = n.format.scalar;
+ switch (form) {
+ .register => |m| {
+ assert(m.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_compare = .{
+ .fcmp = .{
+ .opc0 = .register,
+ .Rn = n.alias.encode(.{ .V = true }),
+ .Rm = m.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ },
+ .zero => return .{ .data_processing_vector = .{ .float_compare = .{
+ .fcmp = .{
+ .opc0 = .register,
+ .Rn = n.alias.encode(.{ .V = true }),
+ .Rm = @enumFromInt(0b00000),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } },
+ }
+ }
+ /// C7.2.67 FCMPE
+ pub fn fcmpe(n: Register, form: union(enum) { register: Register, zero }) Instruction {
+ const ftype = n.format.scalar;
+ switch (form) {
+ .register => |m| {
+ assert(m.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_compare = .{
+ .fcmpe = .{
+ .opc0 = .zero,
+ .Rn = n.alias.encode(.{ .V = true }),
+ .Rm = m.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ },
+ .zero => return .{ .data_processing_vector = .{ .float_compare = .{
+ .fcmpe = .{
+ .opc0 = .zero,
+ .Rn = n.alias.encode(.{ .V = true }),
+ .Rm = @enumFromInt(0b00000),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } },
+ }
+ }
+ /// C7.2.69 FCVT
+ pub fn fcvt(d: Register, n: Register) Instruction {
+ assert(d.format.scalar != n.format.scalar);
+ return .{ .data_processing_vector = .{ .float_data_processing_one_source = .{
+ .fcvt = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .opc = switch (d.format.scalar) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ .ftype = switch (n.format.scalar) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C7.2.71 FCVTAS (scalar)
+ pub fn fcvtas(d: Register, n: Register) Instruction {
+ return .{ .data_processing_vector = .{ .convert_float_integer = .{
+ .fcvtas = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .ftype = switch (n.format.scalar) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ .sf = d.format.integer,
+ },
+ } } };
+ }
+ /// C7.2.73 FCVTAU (scalar)
+ pub fn fcvtau(d: Register, n: Register) Instruction {
+ return .{ .data_processing_vector = .{ .convert_float_integer = .{
+ .fcvtau = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .ftype = switch (n.format.scalar) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ .sf = d.format.integer,
+ },
+ } } };
+ }
+ /// C7.2.76 FCVTMS (scalar)
+ pub fn fcvtms(d: Register, n: Register) Instruction {
+ return .{ .data_processing_vector = .{ .convert_float_integer = .{
+ .fcvtms = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .ftype = switch (n.format.scalar) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ .sf = d.format.integer,
+ },
+ } } };
+ }
+ /// C7.2.78 FCVTMU (scalar)
+ pub fn fcvtmu(d: Register, n: Register) Instruction {
+ return .{ .data_processing_vector = .{ .convert_float_integer = .{
+ .fcvtmu = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .ftype = switch (n.format.scalar) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ .sf = d.format.integer,
+ },
+ } } };
+ }
+ /// C7.2.81 FCVTNS (scalar)
+ pub fn fcvtns(d: Register, n: Register) Instruction {
+ return .{ .data_processing_vector = .{ .convert_float_integer = .{
+ .fcvtns = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .ftype = switch (n.format.scalar) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ .sf = d.format.integer,
+ },
+ } } };
+ }
+ /// C7.2.83 FCVTNU (scalar)
+ pub fn fcvtnu(d: Register, n: Register) Instruction {
+ return .{ .data_processing_vector = .{ .convert_float_integer = .{
+ .fcvtnu = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .ftype = switch (n.format.scalar) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ .sf = d.format.integer,
+ },
+ } } };
+ }
+ /// C7.2.85 FCVTPS (scalar)
+ pub fn fcvtps(d: Register, n: Register) Instruction {
+ return .{ .data_processing_vector = .{ .convert_float_integer = .{
+ .fcvtps = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .ftype = switch (n.format.scalar) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ .sf = d.format.integer,
+ },
+ } } };
+ }
+ /// C7.2.87 FCVTPU (scalar)
+ pub fn fcvtpu(d: Register, n: Register) Instruction {
+ return .{ .data_processing_vector = .{ .convert_float_integer = .{
+ .fcvtpu = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .ftype = switch (n.format.scalar) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ .sf = d.format.integer,
+ },
+ } } };
+ }
+ /// C7.2.92 FCVTZS (scalar, integer)
+ pub fn fcvtzs(d: Register, n: Register) Instruction {
+ return .{ .data_processing_vector = .{ .convert_float_integer = .{
+ .fcvtzs = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .ftype = switch (n.format.scalar) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ .sf = d.format.integer,
+ },
+ } } };
+ }
+ /// C7.2.96 FCVTZU (scalar, integer)
+ pub fn fcvtzu(d: Register, n: Register) Instruction {
+ return .{ .data_processing_vector = .{ .convert_float_integer = .{
+ .fcvtzu = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .ftype = switch (n.format.scalar) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ .sf = d.format.integer,
+ },
+ } } };
+ }
+ /// C7.2.98 FDIV (scalar)
+ pub fn fdiv(d: Register, n: Register, m: Register) Instruction {
+ const ftype = d.format.scalar;
+ assert(n.format.scalar == ftype and m.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_two_source = .{
+ .fdiv = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .Rm = m.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C7.2.99 FJCVTZS
+ pub fn fjcvtzs(d: Register, n: Register) Instruction {
+ assert(d.format.integer == .word);
+ assert(n.format.scalar == .double);
+ return .{ .data_processing_vector = .{ .convert_float_integer = .{
+ .fjcvtzs = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .V = true }),
+ },
+ } } };
+ }
+ /// C7.2.100 FMADD
+ pub fn fmadd(d: Register, n: Register, m: Register, a: Register) Instruction {
+ const ftype = d.format.scalar;
+ assert(n.format.scalar == ftype and m.format.scalar == ftype and a.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_three_source = .{
+ .fmadd = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .Rm = m.alias.encode(.{ .V = true }),
+ .Ra = a.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C7.2.102 FMAX (scalar)
+ pub fn fmax(d: Register, n: Register, m: Register) Instruction {
+ const ftype = d.format.scalar;
+ assert(n.format.scalar == ftype and m.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_two_source = .{
+ .fmax = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .Rm = m.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C7.2.104 FMAXNM (scalar)
+ pub fn fmaxnm(d: Register, n: Register, m: Register) Instruction {
+ const ftype = d.format.scalar;
+ assert(n.format.scalar == ftype and m.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_two_source = .{
+ .fmaxnm = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .Rm = m.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C7.2.112 FMIN (scalar)
+ pub fn fmin(d: Register, n: Register, m: Register) Instruction {
+ const ftype = d.format.scalar;
+ assert(n.format.scalar == ftype and m.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_two_source = .{
+ .fmin = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .Rm = m.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C7.2.114 FMINNM (scalar)
+ pub fn fminnm(d: Register, n: Register, m: Register) Instruction {
+ const ftype = d.format.scalar;
+ assert(n.format.scalar == ftype and m.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_two_source = .{
+ .fminnm = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .Rm = m.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C7.2.129 FMOV (vector, immediate)
+ /// C7.2.130 FMOV (register)
+ /// C7.2.131 FMOV (general)
+ /// C7.2.132 FMOV (scalar, immediate)
+ pub fn fmov(d: Register, form: union(enum) { immediate: f16, register: Register }) Instruction {
+ switch (form) {
+ .immediate => |immediate| {
+ const repr: std.math.FloatRepr(f16) = @bitCast(immediate);
+ const imm: u8 = @bitCast(@as(packed struct(u8) {
+ mantissa: u4,
+ exponent: i3,
+ sign: std.math.Sign,
+ }, .{
+ .mantissa = @intCast(@shrExact(repr.mantissa, 6)),
+ .exponent = @intCast(repr.exponent.unbias() - 1),
+ .sign = repr.sign,
+ }));
+ switch (d.format) {
+ else => unreachable,
+ .scalar => |ftype| return .{ .data_processing_vector = .{ .float_immediate = .{
+ .fmov = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .imm8 = imm,
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } },
+ .vector => |arrangement| {
+ assert(arrangement.len() > 1 and arrangement.elemSize() != .byte);
+ return .{ .data_processing_vector = .{ .simd_modified_immediate = .{
+ .fmov = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .imm5 = @truncate(imm >> 0),
+ .imm3 = @intCast(imm >> 5),
+ .Q = arrangement.size(),
+ },
+ } } };
+ },
+ }
+ },
+ .register => |n| switch (d.format) {
+ else => unreachable,
+ .integer => |sf| switch (n.format) {
+ else => unreachable,
+ .scalar => |ftype| {
+ switch (ftype) {
+ else => unreachable,
+ .half => {},
+ .single => assert(sf == .word),
+ .double => assert(sf == .doubleword),
+ }
+ return .{ .data_processing_vector = .{ .convert_float_integer = .{
+ .fmov = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .opcode = .float_to_integer,
+ .rmode = .@"0",
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ .sf = sf,
+ },
+ } } };
+ },
+ .element => |element| return .{ .data_processing_vector = .{ .convert_float_integer = .{
+ .fmov = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .opcode = .float_to_integer,
+ .rmode = switch (element.index) {
+ else => unreachable,
+ 1 => .@"1",
+ },
+ .ftype = switch (element.size) {
+ else => unreachable,
+ .double => .quad,
+ },
+ .sf = sf,
+ },
+ } } },
+ },
+ .scalar => |ftype| switch (n.format) {
+ else => unreachable,
+ .integer => {
+ const sf = n.format.integer;
+ switch (ftype) {
+ else => unreachable,
+ .half => {},
+ .single => assert(sf == .word),
+ .double => assert(sf == .doubleword),
+ }
+ return .{ .data_processing_vector = .{ .convert_float_integer = .{
+ .fmov = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{}),
+ .opcode = .integer_to_float,
+ .rmode = .@"0",
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ .sf = sf,
+ },
+ } } };
+ },
+ .scalar => {
+ assert(n.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_one_source = .{
+ .fmov = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ },
+ },
+ .element => |element| switch (n.format) {
+ else => unreachable,
+ .integer => |sf| return .{ .data_processing_vector = .{ .convert_float_integer = .{
+ .fmov = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{}),
+ .opcode = .integer_to_float,
+ .rmode = switch (element.index) {
+ else => unreachable,
+ 1 => .@"1",
+ },
+ .ftype = switch (element.size) {
+ else => unreachable,
+ .double => .quad,
+ },
+ .sf = sf,
+ },
+ } } },
+ },
+ },
+ }
+ }
+ /// C7.2.133 FMSUB
+ pub fn fmsub(d: Register, n: Register, m: Register, a: Register) Instruction {
+ const ftype = d.format.scalar;
+ assert(n.format.scalar == ftype and m.format.scalar == ftype and a.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_three_source = .{
+ .fmsub = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .Rm = m.alias.encode(.{ .V = true }),
+ .Ra = a.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C7.2.136 FMUL (scalar)
+ pub fn fmul(d: Register, n: Register, m: Register) Instruction {
+ const ftype = d.format.scalar;
+ assert(n.format.scalar == ftype and m.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_two_source = .{
+ .fmul = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .Rm = m.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C7.2.140 FNEG (scalar)
+ pub fn fneg(d: Register, n: Register) Instruction {
+ const ftype = d.format.scalar;
+ assert(n.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_one_source = .{
+ .fneg = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C7.2.141 FNMADD
+ pub fn fnmadd(d: Register, n: Register, m: Register, a: Register) Instruction {
+ const ftype = d.format.scalar;
+ assert(n.format.scalar == ftype and m.format.scalar == ftype and a.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_three_source = .{
+ .fnmadd = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .Rm = m.alias.encode(.{ .V = true }),
+ .Ra = a.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C7.2.142 FNMSUB
+ pub fn fnmsub(d: Register, n: Register, m: Register, a: Register) Instruction {
+ const ftype = d.format.scalar;
+ assert(n.format.scalar == ftype and m.format.scalar == ftype and a.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_three_source = .{
+ .fnmsub = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .Rm = m.alias.encode(.{ .V = true }),
+ .Ra = a.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C7.2.143 FNMUL (scalar)
+ pub fn fnmul(d: Register, n: Register, m: Register) Instruction {
+ const ftype = d.format.scalar;
+ assert(n.format.scalar == ftype and m.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_two_source = .{
+ .fnmul = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .Rm = m.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C7.2.156 FRINTA (scalar)
+ pub fn frinta(d: Register, n: Register) Instruction {
+ const ftype = d.format.scalar;
+ assert(n.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_one_source = .{
+ .frinta = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C7.2.158 FRINTI (scalar)
+ pub fn frinti(d: Register, n: Register) Instruction {
+ const ftype = d.format.scalar;
+ assert(n.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_one_source = .{
+ .frinti = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C7.2.160 FRINTM (scalar)
+ pub fn frintm(d: Register, n: Register) Instruction {
+ const ftype = d.format.scalar;
+ assert(n.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_one_source = .{
+ .frintm = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C7.2.162 FRINTN (scalar)
+ pub fn frintn(d: Register, n: Register) Instruction {
+ const ftype = d.format.scalar;
+ assert(n.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_one_source = .{
+ .frintn = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C7.2.164 FRINTP (scalar)
+ pub fn frintp(d: Register, n: Register) Instruction {
+ const ftype = d.format.scalar;
+ assert(n.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_one_source = .{
+ .frintp = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C7.2.166 FRINTX (scalar)
+ pub fn frintx(d: Register, n: Register) Instruction {
+ const ftype = d.format.scalar;
+ assert(n.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_one_source = .{
+ .frintx = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C7.2.168 FRINTZ (scalar)
+ pub fn frintz(d: Register, n: Register) Instruction {
+ const ftype = d.format.scalar;
+ assert(n.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_one_source = .{
+ .frintz = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C7.2.172 FSQRT (scalar)
+ pub fn fsqrt(d: Register, n: Register) Instruction {
+ const ftype = d.format.scalar;
+ assert(n.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_one_source = .{
+ .fsqrt = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C7.2.174 FSUB (scalar)
+ pub fn fsub(d: Register, n: Register, m: Register) Instruction {
+ const ftype = d.format.scalar;
+ assert(n.format.scalar == ftype and m.format.scalar == ftype);
+ return .{ .data_processing_vector = .{ .float_data_processing_two_source = .{
+ .fsub = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .Rm = m.alias.encode(.{ .V = true }),
+ .ftype = switch (ftype) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ },
+ } } };
+ }
+ /// C6.2.126 HINT
+ pub fn hint(imm: u7) Instruction {
+ return .{ .branch_exception_generating_system = .{ .hints = .{
+ .group = .{
+ .op2 = @truncate(imm >> 0),
+ .CRm = @intCast(imm >> 3),
+ },
+ } } };
+ }
+ /// C6.2.127 HLT
+ pub fn hlt(imm: u16) Instruction {
+ return .{ .branch_exception_generating_system = .{ .exception_generating = .{
+ .hlt = .{ .imm16 = imm },
+ } } };
+ }
+ /// C6.2.128 HVC
+ pub fn hvc(imm: u16) Instruction {
+ return .{ .branch_exception_generating_system = .{ .exception_generating = .{
+ .hvc = .{ .imm16 = imm },
+ } } };
+ }
+ /// C6.2.131 ISB
+ pub fn isb(option: BranchExceptionGeneratingSystem.Barriers.Option) Instruction {
+ return .{ .branch_exception_generating_system = .{ .barriers = .{
+ .isb = .{
+ .CRm = option,
+ },
+ } } };
+ }
+ /// C6.2.164 LDP
+ /// C7.2.190 LDP (SIMD&FP)
+ pub fn ldp(t1: Register, t2: Register, form: union(enum) {
+ post_index: struct { base: Register, index: i10 },
+ pre_index: struct { base: Register, index: i10 },
+ signed_offset: struct { base: Register, offset: i10 = 0 },
+ base: Register,
+ }) Instruction {
+ switch (t1.format) {
+ else => unreachable,
+ .integer => |sf| {
+ assert(t2.format.integer == sf);
+ form: switch (form) {
+ .post_index => |post_index| {
+ assert(post_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_pair_post_indexed = .{ .integer = .{
+ .ldp = .{
+ .Rt = t1.alias.encode(.{}),
+ .Rn = post_index.base.alias.encode(.{ .sp = true }),
+ .Rt2 = t2.alias.encode(.{}),
+ .imm7 = @intCast(@shrExact(post_index.index, @as(u2, 2) + @intFromEnum(sf))),
+ .sf = sf,
+ },
+ } } } };
+ },
+ .signed_offset => |signed_offset| {
+ assert(signed_offset.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_pair_offset = .{ .integer = .{
+ .ldp = .{
+ .Rt = t1.alias.encode(.{}),
+ .Rn = signed_offset.base.alias.encode(.{ .sp = true }),
+ .Rt2 = t2.alias.encode(.{}),
+ .imm7 = @intCast(@shrExact(signed_offset.offset, @as(u2, 2) + @intFromEnum(sf))),
+ .sf = sf,
+ },
+ } } } };
+ },
+ .pre_index => |pre_index| {
+ assert(pre_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_pair_pre_indexed = .{ .integer = .{
+ .ldp = .{
+ .Rt = t1.alias.encode(.{}),
+ .Rn = pre_index.base.alias.encode(.{ .sp = true }),
+ .Rt2 = t2.alias.encode(.{}),
+ .imm7 = @intCast(@shrExact(pre_index.index, @as(u2, 2) + @intFromEnum(sf))),
+ .sf = sf,
+ },
+ } } } };
+ },
+ .base => |base| continue :form .{ .signed_offset = .{ .base = base } },
+ }
+ },
+ .scalar => |vs| {
+ assert(t2.format.scalar == vs);
+ form: switch (form) {
+ .post_index => |post_index| {
+ assert(post_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_pair_post_indexed = .{ .vector = .{
+ .ldp = .{
+ .Rt = t1.alias.encode(.{ .V = true }),
+ .Rn = post_index.base.alias.encode(.{ .sp = true }),
+ .Rt2 = t2.alias.encode(.{ .V = true }),
+ .imm7 = @intCast(@shrExact(post_index.index, @intFromEnum(vs))),
+ .opc = .encode(vs),
+ },
+ } } } };
+ },
+ .signed_offset => |signed_offset| {
+ assert(signed_offset.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_pair_offset = .{ .vector = .{
+ .ldp = .{
+ .Rt = t1.alias.encode(.{ .V = true }),
+ .Rn = signed_offset.base.alias.encode(.{ .sp = true }),
+ .Rt2 = t2.alias.encode(.{ .V = true }),
+ .imm7 = @intCast(@shrExact(signed_offset.offset, @intFromEnum(vs))),
+ .opc = .encode(vs),
+ },
+ } } } };
+ },
+ .pre_index => |pre_index| {
+ assert(pre_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_pair_pre_indexed = .{ .vector = .{
+ .ldp = .{
+ .Rt = t1.alias.encode(.{ .V = true }),
+ .Rn = pre_index.base.alias.encode(.{ .sp = true }),
+ .Rt2 = t2.alias.encode(.{ .V = true }),
+ .imm7 = @intCast(@shrExact(pre_index.index, @intFromEnum(vs))),
+ .opc = .encode(vs),
+ },
+ } } } };
+ },
+ .base => |base| continue :form .{ .signed_offset = .{ .base = base } },
+ }
+ },
+ }
+ }
+ /// C6.2.166 LDR (immediate)
+ /// C6.2.167 LDR (literal)
+ /// C6.2.168 LDR (register)
+ /// C7.2.191 LDR (immediate, SIMD&FP)
+ /// C7.2.192 LDR (literal, SIMD&FP)
+ /// C7.2.193 LDR (register, SIMD&FP)
+ pub fn ldr(t: Register, form: union(enum) {
+ post_index: struct { base: Register, index: i9 },
+ pre_index: struct { base: Register, index: i9 },
+ unsigned_offset: struct { base: Register, offset: u16 = 0 },
+ base: Register,
+ literal: i21,
+ extended_register_explicit: struct {
+ base: Register,
+ index: Register,
+ option: LoadStore.RegisterRegisterOffset.Option,
+ amount: LoadStore.RegisterRegisterOffset.Extend.Amount,
+ },
+ extended_register: struct {
+ base: Register,
+ index: Register,
+ extend: LoadStore.RegisterRegisterOffset.Extend,
+ },
+ }) Instruction {
+ switch (t.format) {
+ else => unreachable,
+ .integer => |sf| form: switch (form) {
+ .post_index => |post_index| {
+ assert(post_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_immediate_post_indexed = .{ .integer = .{
+ .ldr = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = post_index.base.alias.encode(.{ .sp = true }),
+ .imm9 = post_index.index,
+ .sf = sf,
+ },
+ } } } };
+ },
+ .pre_index => |pre_index| {
+ assert(pre_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_immediate_pre_indexed = .{ .integer = .{
+ .ldr = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = pre_index.base.alias.encode(.{ .sp = true }),
+ .imm9 = pre_index.index,
+ .sf = sf,
+ },
+ } } } };
+ },
+ .unsigned_offset => |unsigned_offset| {
+ assert(unsigned_offset.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_unsigned_immediate = .{ .integer = .{
+ .ldr = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = unsigned_offset.base.alias.encode(.{ .sp = true }),
+ .imm12 = @intCast(@shrExact(unsigned_offset.offset, @as(u2, 2) + @intFromEnum(sf))),
+ .sf = sf,
+ },
+ } } } };
+ },
+ .base => |base| continue :form .{ .unsigned_offset = .{ .base = base } },
+ .literal => |offset| return .{ .load_store = .{ .register_literal = .{ .integer = .{
+ .ldr = .{
+ .Rt = t.alias.encode(.{}),
+ .imm19 = @intCast(@shrExact(offset, 2)),
+ .sf = sf,
+ },
+ } } } },
+ .extended_register_explicit => |extended_register_explicit| {
+ assert(extended_register_explicit.base.format.integer == .doubleword and
+ extended_register_explicit.index.format.integer == extended_register_explicit.option.sf());
+ return .{ .load_store = .{ .register_register_offset = .{ .integer = .{
+ .ldr = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = extended_register_explicit.base.alias.encode(.{ .sp = true }),
+ .S = switch (sf) {
+ .word => switch (extended_register_explicit.amount) {
+ 0 => false,
+ 2 => true,
+ else => unreachable,
+ },
+ .doubleword => switch (extended_register_explicit.amount) {
+ 0 => false,
+ 3 => true,
+ else => unreachable,
+ },
+ },
+ .option = extended_register_explicit.option,
+ .Rm = extended_register_explicit.index.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } } };
+ },
+ .extended_register => |extended_register| continue :form .{ .extended_register_explicit = .{
+ .base = extended_register.base,
+ .index = extended_register.index,
+ .option = extended_register.extend,
+ .amount = switch (extended_register.extend) {
+ .uxtw, .lsl, .sxtw, .sxtx => |amount| amount,
+ },
+ } },
+ },
+ .scalar => |vs| form: switch (form) {
+ .post_index => |post_index| {
+ assert(post_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_immediate_post_indexed = .{ .vector = .{
+ .ldr = .{
+ .Rt = t.alias.encode(.{ .V = true }),
+ .Rn = post_index.base.alias.encode(.{ .sp = true }),
+ .imm9 = post_index.index,
+ .opc1 = .encode(vs),
+ .size = .encode(vs),
+ },
+ } } } };
+ },
+ .pre_index => |pre_index| {
+ assert(pre_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_immediate_pre_indexed = .{ .vector = .{
+ .ldr = .{
+ .Rt = t.alias.encode(.{ .V = true }),
+ .Rn = pre_index.base.alias.encode(.{ .sp = true }),
+ .imm9 = pre_index.index,
+ .opc1 = .encode(vs),
+ .size = .encode(vs),
+ },
+ } } } };
+ },
+ .unsigned_offset => |unsigned_offset| {
+ assert(unsigned_offset.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_unsigned_immediate = .{ .vector = .{
+ .ldr = .{
+ .Rt = t.alias.encode(.{ .V = true }),
+ .Rn = unsigned_offset.base.alias.encode(.{ .sp = true }),
+ .imm12 = @intCast(@shrExact(unsigned_offset.offset, @intFromEnum(vs))),
+ .opc1 = .encode(vs),
+ .size = .encode(vs),
+ },
+ } } } };
+ },
+ .base => |base| continue :form .{ .unsigned_offset = .{ .base = base } },
+ .literal => |offset| return .{ .load_store = .{ .register_literal = .{ .vector = .{
+ .ldr = .{
+ .Rt = t.alias.encode(.{ .V = true }),
+ .imm19 = @intCast(@shrExact(offset, 2)),
+ .opc = .encode(vs),
+ },
+ } } } },
+ .extended_register_explicit => |extended_register_explicit| {
+ assert(extended_register_explicit.base.format.integer == .doubleword and
+ extended_register_explicit.index.format.integer == extended_register_explicit.option.sf());
+ return .{ .load_store = .{ .register_register_offset = .{ .vector = .{
+ .ldr = .{
+ .Rt = t.alias.encode(.{ .V = true }),
+ .Rn = extended_register_explicit.base.alias.encode(.{ .sp = true }),
+ .S = switch (vs) {
+ else => unreachable,
+ .byte => switch (extended_register_explicit.amount) {
+ 0 => false,
+ else => unreachable,
+ },
+ .half => switch (extended_register_explicit.amount) {
+ 0 => false,
+ 1 => true,
+ else => unreachable,
+ },
+ .single => switch (extended_register_explicit.amount) {
+ 0 => false,
+ 2 => true,
+ else => unreachable,
+ },
+ .double => switch (extended_register_explicit.amount) {
+ 0 => false,
+ 3 => true,
+ else => unreachable,
+ },
+ .quad => switch (extended_register_explicit.amount) {
+ 0 => false,
+ 4 => true,
+ else => unreachable,
+ },
+ },
+ .option = extended_register_explicit.option,
+ .Rm = extended_register_explicit.index.alias.encode(.{}),
+ .opc1 = .encode(vs),
+ .size = .encode(vs),
+ },
+ } } } };
+ },
+ .extended_register => |extended_register| continue :form .{ .extended_register_explicit = .{
+ .base = extended_register.base,
+ .index = extended_register.index,
+ .option = extended_register.extend,
+ .amount = switch (extended_register.extend) {
+ .uxtw, .lsl, .sxtw, .sxtx => |amount| amount,
+ },
+ } },
+ },
+ }
+ }
+ /// C6.2.170 LDRB (immediate)
+ /// C6.2.171 LDRB (register)
+ pub fn ldrb(t: Register, form: union(enum) {
+ post_index: struct { base: Register, index: i9 },
+ pre_index: struct { base: Register, index: i9 },
+ unsigned_offset: struct { base: Register, offset: u12 = 0 },
+ base: Register,
+ extended_register_explicit: struct {
+ base: Register,
+ index: Register,
+ option: LoadStore.RegisterRegisterOffset.Option,
+ amount: LoadStore.RegisterRegisterOffset.Extend.Amount,
+ },
+ extended_register: struct {
+ base: Register,
+ index: Register,
+ extend: LoadStore.RegisterRegisterOffset.Extend,
+ },
+ }) Instruction {
+ assert(t.format.integer == .word);
+ form: switch (form) {
+ .post_index => |post_index| {
+ assert(post_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_immediate_post_indexed = .{ .integer = .{
+ .ldrb = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = post_index.base.alias.encode(.{ .sp = true }),
+ .imm9 = post_index.index,
+ },
+ } } } };
+ },
+ .pre_index => |pre_index| {
+ assert(pre_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_immediate_pre_indexed = .{ .integer = .{
+ .ldrb = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = pre_index.base.alias.encode(.{ .sp = true }),
+ .imm9 = pre_index.index,
+ },
+ } } } };
+ },
+ .unsigned_offset => |unsigned_offset| {
+ assert(unsigned_offset.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_unsigned_immediate = .{ .integer = .{
+ .ldrb = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = unsigned_offset.base.alias.encode(.{ .sp = true }),
+ .imm12 = unsigned_offset.offset,
+ },
+ } } } };
+ },
+ .base => |base| continue :form .{ .unsigned_offset = .{ .base = base } },
+ .extended_register_explicit => |extended_register_explicit| {
+ assert(extended_register_explicit.base.format.integer == .doubleword and
+ extended_register_explicit.index.format.integer == extended_register_explicit.option.sf());
+ return .{ .load_store = .{ .register_register_offset = .{ .integer = .{
+ .ldrb = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = extended_register_explicit.base.alias.encode(.{ .sp = true }),
+ .S = switch (extended_register_explicit.amount) {
+ 0 => false,
+ else => unreachable,
+ },
+ .option = extended_register_explicit.option,
+ .Rm = extended_register_explicit.index.alias.encode(.{}),
+ },
+ } } } };
+ },
+ .extended_register => |extended_register| continue :form .{ .extended_register_explicit = .{
+ .base = extended_register.base,
+ .index = extended_register.index,
+ .option = extended_register.extend,
+ .amount = switch (extended_register.extend) {
+ .uxtw, .lsl, .sxtw, .sxtx => |amount| amount,
+ },
+ } },
+ }
+ }
+ /// C6.2.172 LDRH (immediate)
+ /// C6.2.173 LDRH (register)
+ pub fn ldrh(t: Register, form: union(enum) {
+ post_index: struct { base: Register, index: i9 },
+ pre_index: struct { base: Register, index: i9 },
+ unsigned_offset: struct { base: Register, offset: u13 = 0 },
+ base: Register,
+ extended_register_explicit: struct {
+ base: Register,
+ index: Register,
+ option: LoadStore.RegisterRegisterOffset.Option,
+ amount: LoadStore.RegisterRegisterOffset.Extend.Amount,
+ },
+ extended_register: struct {
+ base: Register,
+ index: Register,
+ extend: LoadStore.RegisterRegisterOffset.Extend,
+ },
+ }) Instruction {
+ assert(t.format.integer == .word);
+ form: switch (form) {
+ .post_index => |post_index| {
+ assert(post_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_immediate_post_indexed = .{ .integer = .{
+ .ldrh = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = post_index.base.alias.encode(.{ .sp = true }),
+ .imm9 = post_index.index,
+ },
+ } } } };
+ },
+ .pre_index => |pre_index| {
+ assert(pre_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_immediate_pre_indexed = .{ .integer = .{
+ .ldrh = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = pre_index.base.alias.encode(.{ .sp = true }),
+ .imm9 = pre_index.index,
+ },
+ } } } };
+ },
+ .unsigned_offset => |unsigned_offset| {
+ assert(unsigned_offset.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_unsigned_immediate = .{ .integer = .{
+ .ldrh = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = unsigned_offset.base.alias.encode(.{ .sp = true }),
+ .imm12 = @intCast(@shrExact(unsigned_offset.offset, 1)),
+ },
+ } } } };
+ },
+ .base => |base| continue :form .{ .unsigned_offset = .{ .base = base } },
+ .extended_register_explicit => |extended_register_explicit| {
+ assert(extended_register_explicit.base.format.integer == .doubleword and
+ extended_register_explicit.index.format.integer == extended_register_explicit.option.sf());
+ return .{ .load_store = .{ .register_register_offset = .{ .integer = .{
+ .ldrh = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = extended_register_explicit.base.alias.encode(.{ .sp = true }),
+ .S = switch (extended_register_explicit.amount) {
+ 0 => false,
+ 1 => true,
+ else => unreachable,
+ },
+ .option = extended_register_explicit.option,
+ .Rm = extended_register_explicit.index.alias.encode(.{}),
+ },
+ } } } };
+ },
+ .extended_register => |extended_register| continue :form .{ .extended_register_explicit = .{
+ .base = extended_register.base,
+ .index = extended_register.index,
+ .option = extended_register.extend,
+ .amount = switch (extended_register.extend) {
+ .uxtw, .lsl, .sxtw, .sxtx => |amount| amount,
+ },
+ } },
+ }
+ }
+ /// C6.2.174 LDRSB (immediate)
+ /// C6.2.175 LDRSB (register)
+ pub fn ldrsb(t: Register, form: union(enum) {
+ post_index: struct { base: Register, index: i9 },
+ pre_index: struct { base: Register, index: i9 },
+ unsigned_offset: struct { base: Register, offset: u12 = 0 },
+ base: Register,
+ extended_register_explicit: struct {
+ base: Register,
+ index: Register,
+ option: LoadStore.RegisterRegisterOffset.Option,
+ amount: LoadStore.RegisterRegisterOffset.Extend.Amount,
+ },
+ extended_register: struct {
+ base: Register,
+ index: Register,
+ extend: LoadStore.RegisterRegisterOffset.Extend,
+ },
+ }) Instruction {
+ const sf = t.format.integer;
+ form: switch (form) {
+ .post_index => |post_index| {
+ assert(post_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_immediate_post_indexed = .{ .integer = .{
+ .ldrsb = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = post_index.base.alias.encode(.{ .sp = true }),
+ .imm9 = post_index.index,
+ .opc0 = ~@intFromEnum(sf),
+ },
+ } } } };
+ },
+ .pre_index => |pre_index| {
+ assert(pre_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_immediate_pre_indexed = .{ .integer = .{
+ .ldrsb = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = pre_index.base.alias.encode(.{ .sp = true }),
+ .imm9 = pre_index.index,
+ .opc0 = ~@intFromEnum(sf),
+ },
+ } } } };
+ },
+ .unsigned_offset => |unsigned_offset| {
+ assert(unsigned_offset.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_unsigned_immediate = .{ .integer = .{
+ .ldrsb = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = unsigned_offset.base.alias.encode(.{ .sp = true }),
+ .imm12 = unsigned_offset.offset,
+ .opc0 = ~@intFromEnum(sf),
+ },
+ } } } };
+ },
+ .base => |base| continue :form .{ .unsigned_offset = .{ .base = base } },
+ .extended_register_explicit => |extended_register_explicit| {
+ assert(extended_register_explicit.base.format.integer == .doubleword and
+ extended_register_explicit.index.format.integer == extended_register_explicit.option.sf());
+ return .{ .load_store = .{ .register_register_offset = .{ .integer = .{
+ .ldrsb = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = extended_register_explicit.base.alias.encode(.{ .sp = true }),
+ .S = switch (extended_register_explicit.amount) {
+ 0 => false,
+ else => unreachable,
+ },
+ .option = extended_register_explicit.option,
+ .Rm = extended_register_explicit.index.alias.encode(.{}),
+ .opc0 = ~@intFromEnum(sf),
+ },
+ } } } };
+ },
+ .extended_register => |extended_register| continue :form .{ .extended_register_explicit = .{
+ .base = extended_register.base,
+ .index = extended_register.index,
+ .option = extended_register.extend,
+ .amount = switch (extended_register.extend) {
+ .uxtw, .lsl, .sxtw, .sxtx => |amount| amount,
+ },
+ } },
+ }
+ }
+ /// C6.2.176 LDRSH (immediate)
+ /// C6.2.177 LDRSH (register)
+ pub fn ldrsh(t: Register, form: union(enum) {
+ post_index: struct { base: Register, index: i9 },
+ pre_index: struct { base: Register, index: i9 },
+ unsigned_offset: struct { base: Register, offset: u13 = 0 },
+ base: Register,
+ extended_register_explicit: struct {
+ base: Register,
+ index: Register,
+ option: LoadStore.RegisterRegisterOffset.Option,
+ amount: LoadStore.RegisterRegisterOffset.Extend.Amount,
+ },
+ extended_register: struct {
+ base: Register,
+ index: Register,
+ extend: LoadStore.RegisterRegisterOffset.Extend,
+ },
+ }) Instruction {
+ const sf = t.format.integer;
+ form: switch (form) {
+ .post_index => |post_index| {
+ assert(post_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_immediate_post_indexed = .{ .integer = .{
+ .ldrsh = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = post_index.base.alias.encode(.{ .sp = true }),
+ .imm9 = post_index.index,
+ .opc0 = ~@intFromEnum(sf),
+ },
+ } } } };
+ },
+ .pre_index => |pre_index| {
+ assert(pre_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_immediate_pre_indexed = .{ .integer = .{
+ .ldrsh = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = pre_index.base.alias.encode(.{ .sp = true }),
+ .imm9 = pre_index.index,
+ .opc0 = ~@intFromEnum(sf),
+ },
+ } } } };
+ },
+ .unsigned_offset => |unsigned_offset| {
+ assert(unsigned_offset.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_unsigned_immediate = .{ .integer = .{
+ .ldrsh = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = unsigned_offset.base.alias.encode(.{ .sp = true }),
+ .imm12 = @intCast(@shrExact(unsigned_offset.offset, 1)),
+ .opc0 = ~@intFromEnum(sf),
+ },
+ } } } };
+ },
+ .base => |base| continue :form .{ .unsigned_offset = .{ .base = base } },
+ .extended_register_explicit => |extended_register_explicit| {
+ assert(extended_register_explicit.base.format.integer == .doubleword and
+ extended_register_explicit.index.format.integer == extended_register_explicit.option.sf());
+ return .{ .load_store = .{ .register_register_offset = .{ .integer = .{
+ .ldrsh = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = extended_register_explicit.base.alias.encode(.{ .sp = true }),
+ .S = switch (extended_register_explicit.amount) {
+ 0 => false,
+ 1 => true,
+ else => unreachable,
+ },
+ .option = extended_register_explicit.option,
+ .Rm = extended_register_explicit.index.alias.encode(.{}),
+ .opc0 = ~@intFromEnum(sf),
+ },
+ } } } };
+ },
+ .extended_register => |extended_register| continue :form .{ .extended_register_explicit = .{
+ .base = extended_register.base,
+ .index = extended_register.index,
+ .option = extended_register.extend,
+ .amount = switch (extended_register.extend) {
+ .uxtw, .lsl, .sxtw, .sxtx => |amount| amount,
+ },
+ } },
+ }
+ }
+ /// C6.2.178 LDRSW (immediate)
+ /// C6.2.179 LDRSW (literal)
+ /// C6.2.180 LDRSW (register)
+ pub fn ldrsw(t: Register, form: union(enum) {
+ post_index: struct { base: Register, index: i9 },
+ pre_index: struct { base: Register, index: i9 },
+ unsigned_offset: struct { base: Register, offset: u14 = 0 },
+ base: Register,
+ literal: i21,
+ extended_register_explicit: struct {
+ base: Register,
+ index: Register,
+ option: LoadStore.RegisterRegisterOffset.Integer.Option,
+ amount: LoadStore.RegisterRegisterOffset.Integer.Extend.Amount,
+ },
+ extended_register: struct {
+ base: Register,
+ index: Register,
+ extend: LoadStore.RegisterRegisterOffset.Integer.Extend,
+ },
+ }) Instruction {
+ assert(t.format.integer == .doubleword);
+ form: switch (form) {
+ .post_index => |post_index| {
+ assert(post_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_immediate_post_indexed = .{
+ .ldrsw = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = post_index.base.alias.encode(.{ .sp = true }),
+ .imm9 = post_index.index,
+ },
+ } } };
+ },
+ .pre_index => |pre_index| {
+ assert(pre_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_immediate_pre_indexed = .{ .integer = .{
+ .ldrsw = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = pre_index.base.alias.encode(.{ .sp = true }),
+ .imm9 = pre_index.index,
+ },
+ } } } };
+ },
+ .unsigned_offset => |unsigned_offset| {
+ assert(unsigned_offset.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_unsigned_immediate = .{
+ .ldrsw = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = unsigned_offset.base.alias.encode(.{ .sp = true }),
+ .imm12 = @intCast(@shrExact(unsigned_offset.offset, 2)),
+ },
+ } } };
+ },
+ .base => |base| continue :form .{ .unsigned_offset = .{ .base = base } },
+ .literal => |offset| return .{ .load_store = .{ .register_literal = .{
+ .ldrsw = .{
+ .Rt = t.alias.encode(.{}),
+ .imm19 = @intCast(@shrExact(offset, 2)),
+ },
+ } } },
+ .extended_register_explicit => |extended_register_explicit| {
+ assert(extended_register_explicit.base.format.integer == .doubleword and
+ extended_register_explicit.index.format.integer == extended_register_explicit.option.sf());
+ return .{ .load_store = .{ .register_register_offset = .{ .integer = .{
+ .ldrsw = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = extended_register_explicit.base.alias.encode(.{ .sp = true }),
+ .S = switch (extended_register_explicit.amount) {
+ 0 => 0b0,
+ 2 => 0b1,
+ else => unreachable,
+ },
+ .option = extended_register_explicit.option,
+ .Rm = extended_register_explicit.index.alias.encode(.{}),
+ },
+ } } } };
+ },
+ .extended_register => |extended_register| continue :form .{ .extended_register_explicit = .{
+ .base = extended_register.base,
+ .index = extended_register.index,
+ .option = extended_register.extend,
+ .amount = switch (extended_register.extend) {
+ .uxtw, .lsl, .sxtw, .sxtx => |amount| amount,
+ },
+ } },
+ }
+ }
+ /// C6.2.202 LDUR
+ /// C7.2.194 LDUR (SIMD&FP)
+ pub fn ldur(t: Register, n: Register, simm: i9) Instruction {
+ assert(n.format.integer == .doubleword);
+ switch (t.format) {
+ else => unreachable,
+ .integer => |sf| return .{ .load_store = .{ .register_unscaled_immediate = .{ .integer = .{
+ .ldur = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .sp = true }),
+ .imm9 = simm,
+ .sf = sf,
+ },
+ } } } },
+ .scalar => |vs| return .{ .load_store = .{ .register_unscaled_immediate = .{ .vector = .{
+ .ldur = .{
+ .Rt = t.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .sp = true }),
+ .imm9 = simm,
+ .opc1 = .encode(vs),
+ .size = .encode(vs),
+ },
+ } } } },
+ }
+ }
+ /// C6.2.203 LDURB
+ pub fn ldurb(t: Register, n: Register, simm: i9) Instruction {
+ assert(t.format.integer == .word and n.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_unscaled_immediate = .{ .integer = .{
+ .ldurb = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .sp = true }),
+ .imm9 = simm,
+ },
+ } } } };
+ }
+ /// C6.2.204 LDURH
+ pub fn ldurh(t: Register, n: Register, simm: i9) Instruction {
+ assert(t.format.integer == .word and n.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_unscaled_immediate = .{ .integer = .{
+ .ldurh = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .sp = true }),
+ .imm9 = simm,
+ },
+ } } } };
+ }
+ /// C6.2.205 LDURSB
+ pub fn ldursb(t: Register, n: Register, simm: i9) Instruction {
+ assert(n.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_unscaled_immediate = .{ .integer = .{
+ .ldursb = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .sp = true }),
+ .imm9 = simm,
+ .opc0 = ~@intFromEnum(t.format.integer),
+ },
+ } } } };
+ }
+ /// C6.2.206 LDURSH
+ pub fn ldursh(t: Register, n: Register, simm: i9) Instruction {
+ assert(n.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_unscaled_immediate = .{ .integer = .{
+ .ldursh = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .sp = true }),
+ .imm9 = simm,
+ .opc0 = ~@intFromEnum(t.format.integer),
+ },
+ } } } };
+ }
+ /// C6.2.207 LDURSW
+ pub fn ldursw(t: Register, n: Register, simm: i9) Instruction {
+ assert(t.format.integer == .doubleword and n.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_unscaled_immediate = .{ .integer = .{
+ .ldursw = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .sp = true }),
+ .imm9 = simm,
+ },
+ } } } };
+ }
+ /// C6.2.214 LSLV
+ pub fn lslv(d: Register, n: Register, m: Register) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf and m.format.integer == sf);
+ return .{ .data_processing_register = .{ .data_processing_two_source = .{
+ .lslv = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .Rm = m.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C6.2.217 LSRV
+ pub fn lsrv(d: Register, n: Register, m: Register) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf and m.format.integer == sf);
+ return .{ .data_processing_register = .{ .data_processing_two_source = .{
+ .lsrv = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .Rm = m.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C6.2.218 MADD
+ pub fn madd(d: Register, n: Register, m: Register, a: Register) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf and m.format.integer == sf and a.format.integer == sf);
+ return .{ .data_processing_register = .{ .data_processing_three_source = .{
+ .madd = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .Ra = a.alias.encode(.{}),
+ .Rm = m.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C7.2.204 MOVI
+ pub fn movi(d: Register, imm8: u8, shift: union(enum) { lsl: u5, msl: u5, replicate }) Instruction {
+ const arrangement = switch (d.format) {
+ else => unreachable,
+ .scalar => |vs| switch (vs) {
+ else => unreachable,
+ .double => .@"1d",
+ },
+ .vector => |arrangement| switch (arrangement) {
+ .@"1d" => unreachable,
+ else => arrangement,
+ },
+ };
+ return .{ .data_processing_vector = .{ .simd_modified_immediate = .{
+ .movi = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .imm5 = @truncate(imm8 >> 0),
+ .cmode = switch (shift) {
+ .lsl => |amount| switch (arrangement) {
+ else => unreachable,
+ .@"8b", .@"16b" => @as(u4, 0b1110) |
+ @as(u4, @as(u0, @intCast(@shrExact(amount, 3)))) << 1,
+ .@"4h", .@"8h" => @as(u4, 0b1000) |
+ @as(u4, @as(u1, @intCast(@shrExact(amount, 3)))) << 1,
+ .@"2s", .@"4s" => @as(u4, 0b0000) |
+ @as(u4, @as(u2, @intCast(@shrExact(amount, 3)))) << 1,
+ },
+ .msl => |amount| switch (arrangement) {
+ else => unreachable,
+ .@"2s", .@"4s" => @as(u4, 0b1100) |
+ @as(u4, @as(u1, @intCast(@shrExact(amount, 3) - 1))) << 0,
+ },
+ .replicate => switch (arrangement) {
+ else => unreachable,
+ .@"1d", .@"2d" => 0b1110,
+ },
+ },
+ .imm3 = @intCast(imm8 >> 5),
+ .op = switch (shift) {
+ .lsl, .msl => 0b0,
+ .replicate => 0b1,
+ },
+ .Q = arrangement.size(),
+ },
+ } } };
+ }
+ /// C6.2.225 MOVK
+ pub fn movk(
+ d: Register,
+ imm: u16,
+ shift: struct { lsl: DataProcessingImmediate.MoveWideImmediate.Hw = .@"0" },
+ ) Instruction {
+ const sf = d.format.integer;
+ assert(sf == .doubleword or shift.lsl.sf() == .word);
+ return .{ .data_processing_immediate = .{ .move_wide_immediate = .{
+ .movk = .{
+ .Rd = d.alias.encode(.{}),
+ .imm16 = imm,
+ .hw = shift.lsl,
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C6.2.226 MOVN
+ pub fn movn(
+ d: Register,
+ imm: u16,
+ shift: struct { lsl: DataProcessingImmediate.MoveWideImmediate.Hw = .@"0" },
+ ) Instruction {
+ const sf = d.format.integer;
+ assert(sf == .doubleword or shift.lsl.sf() == .word);
+ return .{ .data_processing_immediate = .{ .move_wide_immediate = .{
+ .movn = .{
+ .Rd = d.alias.encode(.{}),
+ .imm16 = imm,
+ .hw = shift.lsl,
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C6.2.227 MOVZ
+ pub fn movz(
+ d: Register,
+ imm: u16,
+ shift: struct { lsl: DataProcessingImmediate.MoveWideImmediate.Hw = .@"0" },
+ ) Instruction {
+ const sf = d.format.integer;
+ assert(sf == .doubleword or shift.lsl.sf() == .word);
+ return .{ .data_processing_immediate = .{ .move_wide_immediate = .{
+ .movz = .{
+ .Rd = d.alias.encode(.{}),
+ .imm16 = imm,
+ .hw = shift.lsl,
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C6.2.228 MRS
+ pub fn mrs(t: Register, op0: u2, op1: u3, n: u4, m: u4, op2: u3) Instruction {
+ assert(t.format.integer == .doubleword);
+ return .{ .branch_exception_generating_system = .{ .system_register_move = .{
+ .mrs = .{
+ .Rt = t.alias.encode(.{}),
+ .op2 = op2,
+ .CRm = m,
+ .CRn = n,
+ .op1 = op1,
+ .o0 = @intCast(op0 - 0b10),
+ },
+ } } };
+ }
+ /// C6.2.230 MSR (register)
+ pub fn msr(op0: u2, op1: u3, n: u4, m: u4, op2: u3, t: Register) Instruction {
+ assert(t.format.integer == .doubleword);
+ return .{ .branch_exception_generating_system = .{ .system_register_move = .{
+ .msr = .{
+ .Rt = t.alias.encode(.{}),
+ .op2 = op2,
+ .CRm = m,
+ .CRn = n,
+ .op1 = op1,
+ .o0 = @intCast(op0 - 0b10),
+ },
+ } } };
+ }
+ /// C6.2.231 MSUB
+ pub fn msub(d: Register, n: Register, m: Register, a: Register) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf and m.format.integer == sf and a.format.integer == sf);
+ return .{ .data_processing_register = .{ .data_processing_three_source = .{
+ .msub = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .Ra = a.alias.encode(.{}),
+ .Rm = m.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C6.2.238 NOP
+ pub fn nop() Instruction {
+ return .{ .branch_exception_generating_system = .{ .hints = .{
+ .nop = .{},
+ } } };
+ }
+ /// C6.2.239 ORN (shifted register)
+ /// C7.2.211 ORN (vector)
+ pub fn orn(d: Register, n: Register, form: union(enum) {
+ register: Register,
+ shifted_register_explicit: struct { register: Register, shift: DataProcessingRegister.Shift.Op, amount: u6 },
+ shifted_register: struct { register: Register, shift: DataProcessingRegister.Shift = .none },
+ }) Instruction {
+ switch (d.format) {
+ else => unreachable,
+ .integer => |sf| {
+ assert(n.format.integer == sf);
+ form: switch (form) {
+ .register => |register| continue :form .{ .shifted_register = .{ .register = register } },
+ .shifted_register_explicit => |shifted_register_explicit| {
+ assert(shifted_register_explicit.register.format.integer == sf);
+ return .{ .data_processing_register = .{ .logical_shifted_register = .{
+ .orn = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .imm6 = switch (sf) {
+ .word => @as(u5, @intCast(shifted_register_explicit.amount)),
+ .doubleword => @as(u6, @intCast(shifted_register_explicit.amount)),
+ },
+ .Rm = shifted_register_explicit.register.alias.encode(.{}),
+ .shift = shifted_register_explicit.shift,
+ .sf = sf,
+ },
+ } } };
+ },
+ .shifted_register => |shifted_register| continue :form .{ .shifted_register_explicit = .{
+ .register = shifted_register.register,
+ .shift = shifted_register.shift,
+ .amount = switch (shifted_register.shift) {
+ .lsl, .lsr, .asr, .ror => |amount| amount,
+ },
+ } },
+ }
+ },
+ .vector => |arrangement| {
+ const m = form.register;
+ assert(arrangement.elemSize() == .byte and n.format.vector == arrangement and m.format.vector == arrangement);
+ return .{ .data_processing_vector = .{ .simd_three_same = .{
+ .orn = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .Rm = m.alias.encode(.{ .V = true }),
+ .Q = arrangement.size(),
+ },
+ } } };
+ },
+ }
+ }
+ /// C6.2.240 ORR (immediate)
+ /// C6.2.241 ORR (shifted register)
+ /// C7.2.212 ORR (vector, immediate)
+ /// C7.2.213 ORR (vector, register)
+ pub fn orr(d: Register, n: Register, form: union(enum) {
+ immediate: DataProcessingImmediate.Bitmask,
+ shifted_immediate: struct { immediate: u8, lsl: u5 = 0 },
+ register: Register,
+ shifted_register_explicit: struct { register: Register, shift: DataProcessingRegister.Shift.Op, amount: u6 },
+ shifted_register: struct { register: Register, shift: DataProcessingRegister.Shift = .none },
+ }) Instruction {
+ switch (d.format) {
+ else => unreachable,
+ .integer => |sf| {
+ assert(n.format.integer == sf);
+ form: switch (form) {
+ .immediate => |bitmask| {
+ assert(bitmask.validImmediate(sf));
+ return .{ .data_processing_immediate = .{ .logical_immediate = .{
+ .orr = .{
+ .Rd = d.alias.encode(.{ .sp = true }),
+ .Rn = n.alias.encode(.{}),
+ .imm = bitmask,
+ .sf = sf,
+ },
+ } } };
+ },
+ .shifted_immediate => unreachable,
+ .register => |register| continue :form .{ .shifted_register = .{ .register = register } },
+ .shifted_register_explicit => |shifted_register_explicit| {
+ assert(shifted_register_explicit.register.format.integer == sf);
+ return .{ .data_processing_register = .{ .logical_shifted_register = .{
+ .orr = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .imm6 = switch (sf) {
+ .word => @as(u5, @intCast(shifted_register_explicit.amount)),
+ .doubleword => @as(u6, @intCast(shifted_register_explicit.amount)),
+ },
+ .Rm = shifted_register_explicit.register.alias.encode(.{}),
+ .shift = shifted_register_explicit.shift,
+ .sf = sf,
+ },
+ } } };
+ },
+ .shifted_register => |shifted_register| continue :form .{ .shifted_register_explicit = .{
+ .register = shifted_register.register,
+ .shift = shifted_register.shift,
+ .amount = switch (shifted_register.shift) {
+ .lsl, .lsr, .asr, .ror => |amount| amount,
+ },
+ } },
+ }
+ },
+ .vector => |arrangement| switch (form) {
+ else => unreachable,
+ .shifted_immediate => |shifted_immediate| {
+ assert(n.alias == d.alias and n.format.vector == arrangement);
+ return .{ .data_processing_vector = .{ .simd_modified_immediate = .{
+ .orr = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .imm5 = @truncate(shifted_immediate.immediate >> 0),
+ .cmode = switch (arrangement) {
+ else => unreachable,
+ .@"4h", .@"8h" => @as(u3, 0b100) |
+ @as(u3, @as(u1, @intCast(@shrExact(shifted_immediate.lsl, 3)))) << 0,
+ .@"2s", .@"4s" => @as(u3, 0b000) |
+ @as(u3, @as(u2, @intCast(@shrExact(shifted_immediate.lsl, 3)))) << 0,
+ },
+ .imm3 = @intCast(shifted_immediate.immediate >> 5),
+ .Q = arrangement.size(),
+ },
+ } } };
+ },
+ .register => |m| {
+ assert(arrangement.elemSize() == .byte and n.format.vector == arrangement and m.format.vector == arrangement);
+ return .{ .data_processing_vector = .{ .simd_three_same = .{
+ .orr = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .Rm = m.alias.encode(.{ .V = true }),
+ .Q = arrangement.size(),
+ },
+ } } };
+ },
+ },
+ }
+ }
+ /// C6.2.247 PRFM (immediate)
+ /// C6.2.248 PRFM (literal)
+ /// C6.2.249 PRFM (register)
+ pub fn prfm(prfop: LoadStore.PrfOp, form: union(enum) {
+ unsigned_offset: struct { base: Register, offset: u15 = 0 },
+ base: Register,
+ literal: i21,
+ extended_register_explicit: struct {
+ base: Register,
+ index: Register,
+ option: LoadStore.RegisterRegisterOffset.Option,
+ amount: LoadStore.RegisterRegisterOffset.Extend.Amount,
+ },
+ extended_register: struct {
+ base: Register,
+ index: Register,
+ extend: LoadStore.RegisterRegisterOffset.Extend,
+ },
+ }) Instruction {
+ form: switch (form) {
+ .unsigned_offset => |unsigned_offset| {
+ assert(unsigned_offset.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_unsigned_immediate = .{ .integer = .{
+ .prfm = .{
+ .prfop = prfop,
+ .Rn = unsigned_offset.base.alias.encode(.{ .sp = true }),
+ .imm12 = @intCast(@shrExact(unsigned_offset.offset, 3)),
+ },
+ } } } };
+ },
+ .base => |base| continue :form .{ .unsigned_offset = .{ .base = base } },
+ .literal => |offset| return .{ .load_store = .{ .register_literal = .{ .integer = .{
+ .prfm = .{
+ .prfop = prfop,
+ .imm19 = @intCast(@shrExact(offset, 2)),
+ },
+ } } } },
+ .extended_register_explicit => |extended_register_explicit| {
+ assert(extended_register_explicit.base.format.integer == .doubleword and
+ extended_register_explicit.index.format.integer == extended_register_explicit.option.sf());
+ return .{ .load_store = .{ .register_register_offset = .{ .integer = .{
+ .prfm = .{
+ .prfop = prfop,
+ .Rn = extended_register_explicit.base.alias.encode(.{ .sp = true }),
+ .S = switch (extended_register_explicit.amount) {
+ 0 => false,
+ 3 => true,
+ else => unreachable,
+ },
+ .option = extended_register_explicit.option,
+ .Rm = extended_register_explicit.index.alias.encode(.{}),
+ },
+ } } } };
+ },
+ .extended_register => |extended_register| continue :form .{ .extended_register_explicit = .{
+ .base = extended_register.base,
+ .index = extended_register.index,
+ .option = extended_register.extend,
+ .amount = switch (extended_register.extend) {
+ .uxtw, .lsl, .sxtw, .sxtx => |amount| amount,
+ },
+ } },
+ }
+ }
+ /// C6.2.253 RBIT
+ pub fn rbit(d: Register, n: Register) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf);
+ return .{ .data_processing_register = .{ .data_processing_one_source = .{
+ .rbit = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C6.2.254 RET
+ pub fn ret(n: Register) Instruction {
+ assert(n.format.integer == .doubleword);
+ return .{ .branch_exception_generating_system = .{ .unconditional_branch_register = .{
+ .ret = .{ .Rn = n.alias.encode(.{}) },
+ } } };
+ }
+ /// C6.2.256 REV
+ pub fn rev(d: Register, n: Register) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf);
+ return .{ .data_processing_register = .{ .data_processing_one_source = .{
+ .rev = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .opc0 = sf,
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C6.2.257 REV16
+ pub fn rev16(d: Register, n: Register) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf);
+ return .{ .data_processing_register = .{ .data_processing_one_source = .{
+ .rev16 = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C6.2.258 REV32
+ pub fn rev32(d: Register, n: Register) Instruction {
+ assert(d.format.integer == .doubleword and n.format.integer == .doubleword);
+ return .{ .data_processing_register = .{ .data_processing_one_source = .{
+ .rev32 = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ },
+ } } };
+ }
+ /// C6.2.263 RORV
+ pub fn rorv(d: Register, n: Register, m: Register) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf and m.format.integer == sf);
+ return .{ .data_processing_register = .{ .data_processing_two_source = .{
+ .rorv = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .Rm = m.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C6.2.264 SB
+ pub fn sb() Instruction {
+ return .{ .branch_exception_generating_system = .{ .barriers = .{
+ .sb = .{},
+ } } };
+ }
+ /// C6.2.265 SBC
+ pub fn sbc(d: Register, n: Register, m: Register) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf and m.format.integer == sf);
+ return .{ .data_processing_register = .{ .add_subtract_with_carry = .{
+ .sbc = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .Rm = m.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C6.2.266 SBCS
+ pub fn sbcs(d: Register, n: Register, m: Register) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf and m.format.integer == sf);
+ return .{ .data_processing_register = .{ .add_subtract_with_carry = .{
+ .sbcs = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .Rm = m.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C6.2.268 SBFM
+ pub fn sbfm(d: Register, n: Register, bitmask: DataProcessingImmediate.Bitmask) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf and bitmask.validBitfield(sf));
+ return .{ .data_processing_immediate = .{ .bitfield = .{
+ .sbfm = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .imm = bitmask,
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C7.2.236 SCVTF (scalar, integer)
+ pub fn scvtf(d: Register, n: Register) Instruction {
+ return .{ .data_processing_vector = .{ .convert_float_integer = .{
+ .scvtf = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{}),
+ .ftype = switch (d.format.scalar) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ .sf = n.format.integer,
+ },
+ } } };
+ }
+ /// C6.2.270 SDIV
+ pub fn sdiv(d: Register, n: Register, m: Register) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf and m.format.integer == sf);
+ return .{ .data_processing_register = .{ .data_processing_two_source = .{
+ .sdiv = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .Rm = m.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C6.2.280 SEV
+ pub fn sev() Instruction {
+ return .{ .branch_exception_generating_system = .{ .hints = .{
+ .sev = .{},
+ } } };
+ }
+ /// C6.2.281 SEVL
+ pub fn sevl() Instruction {
+ return .{ .branch_exception_generating_system = .{ .hints = .{
+ .sevl = .{},
+ } } };
+ }
+ /// C6.2.282 SMADDL
+ pub fn smaddl(d: Register, n: Register, m: Register, a: Register) Instruction {
+ assert(d.format.integer == .doubleword and n.format.integer == .word and m.format.integer == .word and a.format.integer == .doubleword);
+ return .{ .data_processing_register = .{ .data_processing_three_source = .{
+ .smaddl = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .Ra = a.alias.encode(.{}),
+ .Rm = m.alias.encode(.{}),
+ },
+ } } };
+ }
+ /// C6.2.283 SMC
+ pub fn smc(imm: u16) Instruction {
+ return .{ .branch_exception_generating_system = .{ .exception_generating = .{
+ .smc = .{ .imm16 = imm },
+ } } };
+ }
+ /// C7.2.279 SMOV
+ pub fn smov(d: Register, n: Register) Instruction {
+ const sf = d.format.integer;
+ const vs = n.format.element.size;
+ switch (vs) {
+ else => unreachable,
+ .byte, .half => {},
+ .single => assert(sf == .doubleword),
+ }
+ return .{ .data_processing_vector = .{ .simd_copy = .{
+ .smov = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .imm5 = switch (vs) {
+ else => unreachable,
+ .byte => @as(u5, @as(u4, @intCast(n.format.element.index))) << 1 | @as(u5, 0b1) << 0,
+ .half => @as(u5, @as(u3, @intCast(n.format.element.index))) << 2 | @as(u5, 0b10) << 0,
+ .single => @as(u5, @as(u2, @intCast(n.format.element.index))) << 3 | @as(u5, 0b100) << 0,
+ },
+ .Q = sf,
+ },
+ } } };
+ }
+ /// C6.2.287 SMSUBL
+ pub fn smsubl(d: Register, n: Register, m: Register, a: Register) Instruction {
+ assert(d.format.integer == .doubleword and n.format.integer == .word and m.format.integer == .word and a.format.integer == .doubleword);
+ return .{ .data_processing_register = .{ .data_processing_three_source = .{
+ .smsubl = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .Ra = a.alias.encode(.{}),
+ .Rm = m.alias.encode(.{}),
+ },
+ } } };
+ }
+ /// C6.2.288 SMULH
+ pub fn smulh(d: Register, n: Register, m: Register) Instruction {
+ assert(d.format.integer == .doubleword and n.format.integer == .doubleword and m.format.integer == .doubleword);
+ return .{ .data_processing_register = .{ .data_processing_three_source = .{
+ .smulh = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .Rm = m.alias.encode(.{}),
+ },
+ } } };
+ }
+ /// C6.2.321 STP
+ /// C7.2.330 STP (SIMD&FP)
+ pub fn stp(t1: Register, t2: Register, form: union(enum) {
+ post_index: struct { base: Register, index: i10 },
+ pre_index: struct { base: Register, index: i10 },
+ signed_offset: struct { base: Register, offset: i10 = 0 },
+ base: Register,
+ }) Instruction {
+ switch (t1.format) {
+ else => unreachable,
+ .integer => |sf| {
+ assert(t2.format.integer == sf);
+ form: switch (form) {
+ .post_index => |post_index| {
+ assert(post_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_pair_post_indexed = .{ .integer = .{
+ .stp = .{
+ .Rt = t1.alias.encode(.{}),
+ .Rn = post_index.base.alias.encode(.{ .sp = true }),
+ .Rt2 = t2.alias.encode(.{}),
+ .imm7 = @intCast(@shrExact(post_index.index, @as(u2, 2) + @intFromEnum(sf))),
+ .sf = sf,
+ },
+ } } } };
+ },
+ .signed_offset => |signed_offset| {
+ assert(signed_offset.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_pair_offset = .{ .integer = .{
+ .stp = .{
+ .Rt = t1.alias.encode(.{}),
+ .Rn = signed_offset.base.alias.encode(.{ .sp = true }),
+ .Rt2 = t2.alias.encode(.{}),
+ .imm7 = @intCast(@shrExact(signed_offset.offset, @as(u2, 2) + @intFromEnum(sf))),
+ .sf = sf,
+ },
+ } } } };
+ },
+ .pre_index => |pre_index| {
+ assert(pre_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_pair_pre_indexed = .{ .integer = .{
+ .stp = .{
+ .Rt = t1.alias.encode(.{}),
+ .Rn = pre_index.base.alias.encode(.{ .sp = true }),
+ .Rt2 = t2.alias.encode(.{}),
+ .imm7 = @intCast(@shrExact(pre_index.index, @as(u2, 2) + @intFromEnum(sf))),
+ .sf = sf,
+ },
+ } } } };
+ },
+ .base => |base| continue :form .{ .signed_offset = .{ .base = base } },
+ }
+ },
+ .scalar => |vs| {
+ assert(t2.format.scalar == vs);
+ form: switch (form) {
+ .post_index => |post_index| {
+ assert(post_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_pair_post_indexed = .{ .vector = .{
+ .stp = .{
+ .Rt = t1.alias.encode(.{ .V = true }),
+ .Rn = post_index.base.alias.encode(.{ .sp = true }),
+ .Rt2 = t2.alias.encode(.{ .V = true }),
+ .imm7 = @intCast(@shrExact(post_index.index, @intFromEnum(vs))),
+ .opc = .encode(vs),
+ },
+ } } } };
+ },
+ .signed_offset => |signed_offset| {
+ assert(signed_offset.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_pair_offset = .{ .vector = .{
+ .stp = .{
+ .Rt = t1.alias.encode(.{ .V = true }),
+ .Rn = signed_offset.base.alias.encode(.{ .sp = true }),
+ .Rt2 = t2.alias.encode(.{ .V = true }),
+ .imm7 = @intCast(@shrExact(signed_offset.offset, @intFromEnum(vs))),
+ .opc = .encode(vs),
+ },
+ } } } };
+ },
+ .pre_index => |pre_index| {
+ assert(pre_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_pair_pre_indexed = .{ .vector = .{
+ .stp = .{
+ .Rt = t1.alias.encode(.{ .V = true }),
+ .Rn = pre_index.base.alias.encode(.{ .sp = true }),
+ .Rt2 = t2.alias.encode(.{ .V = true }),
+ .imm7 = @intCast(@shrExact(pre_index.index, @intFromEnum(vs))),
+ .opc = .encode(vs),
+ },
+ } } } };
+ },
+ .base => |base| continue :form .{ .signed_offset = .{ .base = base } },
+ }
+ },
+ }
+ }
+ /// C6.2.322 STR (immediate)
+ /// C7.2.331 STR (immediate, SIMD&FP)
+ pub fn str(t: Register, form: union(enum) {
+ post_index: struct { base: Register, index: i9 },
+ pre_index: struct { base: Register, index: i9 },
+ unsigned_offset: struct { base: Register, offset: u16 = 0 },
+ base: Register,
+ }) Instruction {
+ switch (t.format) {
+ else => unreachable,
+ .integer => |sf| form: switch (form) {
+ .post_index => |post_index| {
+ assert(post_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_immediate_post_indexed = .{ .integer = .{
+ .str = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = post_index.base.alias.encode(.{ .sp = true }),
+ .imm9 = post_index.index,
+ .sf = sf,
+ },
+ } } } };
+ },
+ .pre_index => |pre_index| {
+ assert(pre_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_immediate_pre_indexed = .{ .integer = .{
+ .str = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = pre_index.base.alias.encode(.{ .sp = true }),
+ .imm9 = pre_index.index,
+ .sf = sf,
+ },
+ } } } };
+ },
+ .unsigned_offset => |unsigned_offset| {
+ assert(unsigned_offset.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_unsigned_immediate = .{ .integer = .{
+ .str = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = unsigned_offset.base.alias.encode(.{ .sp = true }),
+ .imm12 = @intCast(@shrExact(unsigned_offset.offset, @as(u2, 2) + @intFromEnum(sf))),
+ .sf = sf,
+ },
+ } } } };
+ },
+ .base => |base| continue :form .{ .unsigned_offset = .{ .base = base } },
+ },
+ .scalar => |vs| form: switch (form) {
+ .post_index => |post_index| {
+ assert(post_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_immediate_post_indexed = .{ .vector = .{
+ .str = .{
+ .Rt = t.alias.encode(.{ .V = true }),
+ .Rn = post_index.base.alias.encode(.{ .sp = true }),
+ .imm9 = post_index.index,
+ .opc1 = .encode(vs),
+ .size = .encode(vs),
+ },
+ } } } };
+ },
+ .pre_index => |pre_index| {
+ assert(pre_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_immediate_pre_indexed = .{ .vector = .{
+ .str = .{
+ .Rt = t.alias.encode(.{ .V = true }),
+ .Rn = pre_index.base.alias.encode(.{ .sp = true }),
+ .imm9 = pre_index.index,
+ .opc1 = .encode(vs),
+ .size = .encode(vs),
+ },
+ } } } };
+ },
+ .unsigned_offset => |unsigned_offset| {
+ assert(unsigned_offset.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_unsigned_immediate = .{ .vector = .{
+ .str = .{
+ .Rt = t.alias.encode(.{ .V = true }),
+ .Rn = unsigned_offset.base.alias.encode(.{ .sp = true }),
+ .imm12 = @intCast(@shrExact(unsigned_offset.offset, @intFromEnum(vs))),
+ .opc1 = .encode(vs),
+ .size = .encode(vs),
+ },
+ } } } };
+ },
+ .base => |base| continue :form .{ .unsigned_offset = .{ .base = base } },
+ },
+ }
+ }
+ /// C6.2.324 STRB (immediate)
+ pub fn strb(t: Register, form: union(enum) {
+ post_index: struct { base: Register, index: i9 },
+ pre_index: struct { base: Register, index: i9 },
+ unsigned_offset: struct { base: Register, offset: u12 = 0 },
+ base: Register,
+ }) Instruction {
+ assert(t.format.integer == .word);
+ form: switch (form) {
+ .post_index => |post_index| {
+ assert(post_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_immediate_post_indexed = .{ .integer = .{
+ .strb = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = post_index.base.alias.encode(.{ .sp = true }),
+ .imm9 = post_index.index,
+ },
+ } } } };
+ },
+ .pre_index => |pre_index| {
+ assert(pre_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_immediate_pre_indexed = .{ .integer = .{
+ .strb = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = pre_index.base.alias.encode(.{ .sp = true }),
+ .imm9 = pre_index.index,
+ },
+ } } } };
+ },
+ .unsigned_offset => |unsigned_offset| {
+ assert(unsigned_offset.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_unsigned_immediate = .{ .integer = .{
+ .strb = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = unsigned_offset.base.alias.encode(.{ .sp = true }),
+ .imm12 = unsigned_offset.offset,
+ },
+ } } } };
+ },
+ .base => |base| continue :form .{ .unsigned_offset = .{ .base = base } },
+ }
+ }
+ /// C6.2.326 STRH (immediate)
+ pub fn strh(t: Register, form: union(enum) {
+ post_index: struct { base: Register, index: i9 },
+ pre_index: struct { base: Register, index: i9 },
+ unsigned_offset: struct { base: Register, offset: u13 = 0 },
+ base: Register,
+ }) Instruction {
+ assert(t.format.integer == .word);
+ form: switch (form) {
+ .post_index => |post_index| {
+ assert(post_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_immediate_post_indexed = .{ .integer = .{
+ .strh = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = post_index.base.alias.encode(.{ .sp = true }),
+ .imm9 = post_index.index,
+ },
+ } } } };
+ },
+ .pre_index => |pre_index| {
+ assert(pre_index.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_immediate_pre_indexed = .{ .integer = .{
+ .strh = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = pre_index.base.alias.encode(.{ .sp = true }),
+ .imm9 = pre_index.index,
+ },
+ } } } };
+ },
+ .unsigned_offset => |unsigned_offset| {
+ assert(unsigned_offset.base.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_unsigned_immediate = .{ .integer = .{
+ .strh = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = unsigned_offset.base.alias.encode(.{ .sp = true }),
+ .imm12 = @intCast(@shrExact(unsigned_offset.offset, 1)),
+ },
+ } } } };
+ },
+ .base => |base| continue :form .{ .unsigned_offset = .{ .base = base } },
+ }
+ }
+ /// C6.2.346 STUR
+ /// C7.2.333 STUR (SIMD&FP)
+ pub fn stur(t: Register, n: Register, simm: i9) Instruction {
+ assert(n.format.integer == .doubleword);
+ switch (t.format) {
+ else => unreachable,
+ .integer => |sf| return .{ .load_store = .{ .register_unscaled_immediate = .{ .integer = .{
+ .stur = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .sp = true }),
+ .imm9 = simm,
+ .sf = sf,
+ },
+ } } } },
+ .scalar => |vs| return .{ .load_store = .{ .register_unscaled_immediate = .{ .vector = .{
+ .stur = .{
+ .Rt = t.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{ .sp = true }),
+ .imm9 = simm,
+ .opc1 = .encode(vs),
+ .size = .encode(vs),
+ },
+ } } } },
+ }
+ }
+ /// C6.2.347 STURB
+ pub fn sturb(t: Register, n: Register, simm: i9) Instruction {
+ assert(t.format.integer == .word and n.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_unscaled_immediate = .{ .integer = .{
+ .sturb = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .sp = true }),
+ .imm9 = simm,
+ },
+ } } } };
+ }
+ /// C6.2.348 STURH
+ pub fn sturh(t: Register, n: Register, simm: i9) Instruction {
+ assert(t.format.integer == .word and n.format.integer == .doubleword);
+ return .{ .load_store = .{ .register_unscaled_immediate = .{ .integer = .{
+ .sturh = .{
+ .Rt = t.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .sp = true }),
+ .imm9 = simm,
+ },
+ } } } };
+ }
+ /// C6.2.356 SUB (extended register)
+ /// C6.2.357 SUB (immediate)
+ /// C6.2.358 SUB (shifted register)
+ pub fn sub(d: Register, n: Register, form: union(enum) {
+ extended_register_explicit: struct {
+ register: Register,
+ option: DataProcessingRegister.AddSubtractExtendedRegister.Option,
+ amount: DataProcessingRegister.AddSubtractExtendedRegister.Extend.Amount,
+ },
+ extended_register: struct { register: Register, extend: DataProcessingRegister.AddSubtractExtendedRegister.Extend },
+ immediate: u12,
+ shifted_immediate: struct { immediate: u12, lsl: DataProcessingImmediate.AddSubtractImmediate.Shift = .@"0" },
+ register: Register,
+ shifted_register_explicit: struct { register: Register, shift: DataProcessingRegister.Shift.Op, amount: u6 },
+ shifted_register: struct { register: Register, shift: DataProcessingRegister.Shift = .none },
+ }) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf);
+ form: switch (form) {
+ .extended_register_explicit => |extended_register_explicit| {
+ assert(extended_register_explicit.register.format.integer == extended_register_explicit.option.sf());
+ return .{ .data_processing_register = .{ .add_subtract_extended_register = .{
+ .sub = .{
+ .Rd = d.alias.encode(.{ .sp = true }),
+ .Rn = n.alias.encode(.{ .sp = true }),
+ .imm3 = switch (extended_register_explicit.amount) {
+ 0...4 => |amount| amount,
+ else => unreachable,
+ },
+ .option = extended_register_explicit.option,
+ .Rm = extended_register_explicit.register.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ },
+ .extended_register => |extended_register| continue :form .{ .extended_register_explicit = .{
+ .register = extended_register.register,
+ .option = extended_register.extend,
+ .amount = switch (extended_register.extend) {
+ .uxtb, .uxth, .uxtw, .uxtx, .sxtb, .sxth, .sxtw, .sxtx => |amount| amount,
+ },
+ } },
+ .immediate => |immediate| continue :form .{ .shifted_immediate = .{ .immediate = immediate } },
+ .shifted_immediate => |shifted_immediate| {
+ return .{ .data_processing_immediate = .{ .add_subtract_immediate = .{
+ .sub = .{
+ .Rd = d.alias.encode(.{ .sp = true }),
+ .Rn = n.alias.encode(.{ .sp = true }),
+ .imm12 = shifted_immediate.immediate,
+ .sh = shifted_immediate.lsl,
+ .sf = sf,
+ },
+ } } };
+ },
+ .register => |register| continue :form if (d.alias == .sp or n.alias == .sp or register.alias == .sp)
+ .{ .extended_register = .{ .register = register, .extend = switch (sf) {
+ .word => .{ .uxtw = 0 },
+ .doubleword => .{ .uxtx = 0 },
+ } } }
+ else
+ .{ .shifted_register = .{ .register = register } },
+ .shifted_register_explicit => |shifted_register_explicit| {
+ assert(shifted_register_explicit.register.format.integer == sf);
+ return .{ .data_processing_register = .{ .add_subtract_shifted_register = .{
+ .sub = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .imm6 = switch (sf) {
+ .word => @as(u5, @intCast(shifted_register_explicit.amount)),
+ .doubleword => @as(u6, @intCast(shifted_register_explicit.amount)),
+ },
+ .Rm = shifted_register_explicit.register.alias.encode(.{}),
+ .shift = switch (shifted_register_explicit.shift) {
+ .lsl, .lsr, .asr => |shift| shift,
+ .ror => unreachable,
+ },
+ .sf = sf,
+ },
+ } } };
+ },
+ .shifted_register => |shifted_register| continue :form .{ .shifted_register_explicit = .{
+ .register = shifted_register.register,
+ .shift = shifted_register.shift,
+ .amount = switch (shifted_register.shift) {
+ .lsl, .lsr, .asr => |amount| amount,
+ .ror => unreachable,
+ },
+ } },
+ }
+ }
+ /// C6.2.362 SUBS (extended register)
+ /// C6.2.363 SUBS (immediate)
+ /// C6.2.364 SUBS (shifted register)
+ pub fn subs(d: Register, n: Register, form: union(enum) {
+ extended_register_explicit: struct {
+ register: Register,
+ option: DataProcessingRegister.AddSubtractExtendedRegister.Option,
+ amount: DataProcessingRegister.AddSubtractExtendedRegister.Extend.Amount,
+ },
+ extended_register: struct { register: Register, extend: DataProcessingRegister.AddSubtractExtendedRegister.Extend },
+ immediate: u12,
+ shifted_immediate: struct { immediate: u12, lsl: DataProcessingImmediate.AddSubtractImmediate.Shift = .@"0" },
+ register: Register,
+ shifted_register_explicit: struct { register: Register, shift: DataProcessingRegister.Shift.Op, amount: u6 },
+ shifted_register: struct { register: Register, shift: DataProcessingRegister.Shift = .none },
+ }) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf);
+ form: switch (form) {
+ .extended_register_explicit => |extended_register_explicit| {
+ assert(extended_register_explicit.register.format.integer == extended_register_explicit.option.sf());
+ return .{ .data_processing_register = .{ .add_subtract_extended_register = .{
+ .subs = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .sp = true }),
+ .imm3 = switch (extended_register_explicit.amount) {
+ 0...4 => |amount| amount,
+ else => unreachable,
+ },
+ .option = extended_register_explicit.option,
+ .Rm = extended_register_explicit.register.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ },
+ .extended_register => |extended_register| continue :form .{ .extended_register_explicit = .{
+ .register = extended_register.register,
+ .option = extended_register.extend,
+ .amount = switch (extended_register.extend) {
+ .uxtb, .uxth, .uxtw, .uxtx, .sxtb, .sxth, .sxtw, .sxtx => |amount| amount,
+ },
+ } },
+ .immediate => |immediate| continue :form .{ .shifted_immediate = .{ .immediate = immediate } },
+ .shifted_immediate => |shifted_immediate| {
+ return .{ .data_processing_immediate = .{ .add_subtract_immediate = .{
+ .subs = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .sp = true }),
+ .imm12 = shifted_immediate.immediate,
+ .sh = shifted_immediate.lsl,
+ .sf = sf,
+ },
+ } } };
+ },
+ .register => |register| continue :form if (d.alias == .sp or n.alias == .sp or register.alias == .sp)
+ .{ .extended_register = .{ .register = register, .extend = switch (sf) {
+ .word => .{ .uxtw = 0 },
+ .doubleword => .{ .uxtx = 0 },
+ } } }
+ else
+ .{ .shifted_register = .{ .register = register } },
+ .shifted_register_explicit => |shifted_register_explicit| {
+ assert(shifted_register_explicit.register.format.integer == sf);
+ return .{ .data_processing_register = .{ .add_subtract_shifted_register = .{
+ .subs = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .imm6 = switch (sf) {
+ .word => @as(u5, @intCast(shifted_register_explicit.amount)),
+ .doubleword => @as(u6, @intCast(shifted_register_explicit.amount)),
+ },
+ .Rm = shifted_register_explicit.register.alias.encode(.{}),
+ .shift = switch (shifted_register_explicit.shift) {
+ .lsl, .lsr, .asr => |shift| shift,
+ .ror => unreachable,
+ },
+ .sf = sf,
+ },
+ } } };
+ },
+ .shifted_register => |shifted_register| continue :form .{ .shifted_register_explicit = .{
+ .register = shifted_register.register,
+ .shift = shifted_register.shift,
+ .amount = switch (shifted_register.shift) {
+ .lsl, .lsr, .asr => |amount| amount,
+ .ror => unreachable,
+ },
+ } },
+ }
+ }
+ /// C6.2.365 SVC
+ pub fn svc(imm: u16) Instruction {
+ return .{ .branch_exception_generating_system = .{ .exception_generating = .{
+ .svc = .{ .imm16 = imm },
+ } } };
+ }
+ /// C6.2.372 SYS
+ pub fn sys(op1: u3, n: u4, m: u4, op2: u3, t: Register) Instruction {
+ assert(t.format.integer == .doubleword);
+ return .{ .branch_exception_generating_system = .{ .system = .{
+ .sys = .{
+ .Rt = t.alias.encode(.{}),
+ .op2 = op2,
+ .CRm = m,
+ .CRn = n,
+ .op1 = op1,
+ },
+ } } };
+ }
+ /// C6.2.373 SYSL
+ pub fn sysl(t: Register, op1: u3, n: u4, m: u4, op2: u3) Instruction {
+ assert(t.format.integer == .doubleword);
+ return .{ .branch_exception_generating_system = .{ .system = .{
+ .sysl = .{
+ .Rt = t.alias.encode(.{}),
+ .op2 = op2,
+ .CRm = m,
+ .CRn = n,
+ .op1 = op1,
+ },
+ } } };
+ }
+ /// C6.2.374 TBNZ
+ pub fn tbnz(t: Register, imm: u6, label: i16) Instruction {
+ return .{ .branch_exception_generating_system = .{ .test_branch_immediate = .{
+ .tbnz = .{
+ .Rt = t.alias.encode(.{}),
+ .imm14 = @intCast(@shrExact(label, 2)),
+ .b40 = @truncate(switch (t.format.integer) {
+ .word => @as(u5, @intCast(imm)),
+ .doubleword => imm,
+ }),
+ .b5 = @intCast(imm >> 5),
+ },
+ } } };
+ }
+ /// C6.2.375 TBZ
+ pub fn tbz(t: Register, imm: u6, label: i16) Instruction {
+ return .{ .branch_exception_generating_system = .{ .test_branch_immediate = .{
+ .tbz = .{
+ .Rt = t.alias.encode(.{}),
+ .imm14 = @intCast(@shrExact(label, 2)),
+ .b40 = @truncate(switch (t.format.integer) {
+ .word => @as(u5, @intCast(imm)),
+ .doubleword => imm,
+ }),
+ .b5 = @intCast(imm >> 5),
+ },
+ } } };
+ }
+ /// C6.2.376 TCANCEL
+ pub fn tcancel(imm: u16) Instruction {
+ return .{ .branch_exception_generating_system = .{ .exception_generating = .{
+ .tcancel = .{ .imm16 = imm },
+ } } };
+ }
+ /// C6.2.385 UBFM
+ pub fn ubfm(d: Register, n: Register, bitmask: DataProcessingImmediate.Bitmask) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf and bitmask.validBitfield(sf));
+ return .{ .data_processing_immediate = .{ .bitfield = .{
+ .ubfm = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .imm = bitmask,
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C7.2.355 UCVTF (scalar, integer)
+ pub fn ucvtf(d: Register, n: Register) Instruction {
+ return .{ .data_processing_vector = .{ .convert_float_integer = .{
+ .ucvtf = .{
+ .Rd = d.alias.encode(.{ .V = true }),
+ .Rn = n.alias.encode(.{}),
+ .ftype = switch (d.format.scalar) {
+ else => unreachable,
+ .single => .single,
+ .double => .double,
+ .half => .half,
+ },
+ .sf = n.format.integer,
+ },
+ } } };
+ }
+ /// C6.2.387 UDF
+ pub fn udf(imm: u16) Instruction {
+ return .{ .reserved = .{
+ .udf = .{ .imm16 = imm },
+ } };
+ }
+ /// C6.2.388 UDIV
+ pub fn udiv(d: Register, n: Register, m: Register) Instruction {
+ const sf = d.format.integer;
+ assert(n.format.integer == sf and m.format.integer == sf);
+ return .{ .data_processing_register = .{ .data_processing_two_source = .{
+ .udiv = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .Rm = m.alias.encode(.{}),
+ .sf = sf,
+ },
+ } } };
+ }
+ /// C6.2.389 UMADDL
+ pub fn umaddl(d: Register, n: Register, m: Register, a: Register) Instruction {
+ assert(d.format.integer == .doubleword and n.format.integer == .word and m.format.integer == .word and a.format.integer == .doubleword);
+ return .{ .data_processing_register = .{ .data_processing_three_source = .{
+ .umaddl = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .Ra = a.alias.encode(.{}),
+ .Rm = m.alias.encode(.{}),
+ },
+ } } };
+ }
+ /// C6.2.391 UMSUBL
+ pub fn umsubl(d: Register, n: Register, m: Register, a: Register) Instruction {
+ assert(d.format.integer == .doubleword and n.format.integer == .word and m.format.integer == .word and a.format.integer == .doubleword);
+ return .{ .data_processing_register = .{ .data_processing_three_source = .{
+ .umsubl = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .Ra = a.alias.encode(.{}),
+ .Rm = m.alias.encode(.{}),
+ },
+ } } };
+ }
+ /// C7.2.371 UMOV
+ pub fn umov(d: Register, n: Register) Instruction {
+ const sf = d.format.integer;
+ const vs = n.format.element.size;
+ switch (vs) {
+ else => unreachable,
+ .byte, .half, .single => assert(sf == .word),
+ .double => assert(sf == .doubleword),
+ }
+ return .{ .data_processing_vector = .{ .simd_copy = .{
+ .umov = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{ .V = true }),
+ .imm5 = switch (vs) {
+ else => unreachable,
+ .byte => @as(u5, @as(u4, @intCast(n.format.element.index))) << 1 | @as(u5, 0b1) << 0,
+ .half => @as(u5, @as(u3, @intCast(n.format.element.index))) << 2 | @as(u5, 0b10) << 0,
+ .single => @as(u5, @as(u2, @intCast(n.format.element.index))) << 3 | @as(u5, 0b100) << 0,
+ .double => @as(u5, @as(u1, @intCast(n.format.element.index))) << 4 | @as(u5, 0b1000) << 0,
+ },
+ .Q = sf,
+ },
+ } } };
+ }
+ /// C6.2.392 UMULH
+ pub fn umulh(d: Register, n: Register, m: Register) Instruction {
+ assert(d.format.integer == .doubleword and n.format.integer == .doubleword and m.format.integer == .doubleword);
+ return .{ .data_processing_register = .{ .data_processing_three_source = .{
+ .umulh = .{
+ .Rd = d.alias.encode(.{}),
+ .Rn = n.alias.encode(.{}),
+ .Rm = m.alias.encode(.{}),
+ },
+ } } };
+ }
+ /// C6.2.396 WFE
+ pub fn wfe() Instruction {
+ return .{ .branch_exception_generating_system = .{ .hints = .{
+ .wfe = .{},
+ } } };
+ }
+ /// C6.2.398 WFI
+ pub fn wfi() Instruction {
+ return .{ .branch_exception_generating_system = .{ .hints = .{
+ .wfi = .{},
+ } } };
+ }
+ /// C6.2.402 YIELD
+ pub fn yield() Instruction {
+ return .{ .branch_exception_generating_system = .{ .hints = .{
+ .yield = .{},
+ } } };
+ }
+
+ pub const size = @divExact(@bitSizeOf(Backing), 8);
+ pub const Backing = u32;
+ pub fn read(mem: *const [size]u8) Instruction {
+ return @bitCast(std.mem.readInt(Backing, mem, .little));
+ }
+ pub fn write(inst: Instruction, mem: *[size]u8) void {
+ std.mem.writeInt(Backing, mem, @bitCast(inst), .little);
+ }
+
+ pub fn format(inst: Instruction, writer: *std.Io.Writer) std.Io.Writer.Error!void {
+ const dis: aarch64.Disassemble = .{};
+ try dis.printInstruction(inst, writer);
+ }
+
+ comptime {
+ @setEvalBranchQuota(68_000);
+ verify(@typeName(Instruction), Instruction);
+ }
+ fn verify(name: []const u8, Type: type) void {
+ switch (@typeInfo(Type)) {
+ .@"union" => |info| {
+ if (info.layout != .@"packed" or @bitSizeOf(Type) != @bitSizeOf(Backing)) {
+ @compileLog(name ++ " should have u32 abi");
+ }
+ for (info.fields) |field| verify(name ++ "." ++ field.name, field.type);
+ },
+ .@"struct" => |info| {
+ if (info.layout != .@"packed" or info.backing_integer != Backing) {
+ @compileLog(name ++ " should have u32 abi");
+ }
+ var bit_offset = 0;
+ for (info.fields) |field| {
+ if (std.mem.startsWith(u8, field.name, "encoded")) {
+ if (if (std.fmt.parseInt(u5, field.name["encoded".len..], 10)) |encoded_bit_offset| encoded_bit_offset != bit_offset else |_| true) {
+ @compileError(std.fmt.comptimePrint("{s}.{s} should be named encoded{d}", .{ name, field.name, bit_offset }));
+ }
+ if (field.default_value_ptr != null) {
+ @compileError(std.fmt.comptimePrint("{s}.{s} should be named decoded{d}", .{ name, field.name, bit_offset }));
+ }
+ } else if (std.mem.startsWith(u8, field.name, "decoded")) {
+ if (if (std.fmt.parseInt(u5, field.name["decoded".len..], 10)) |decoded_bit_offset| decoded_bit_offset != bit_offset else |_| true) {
+ @compileError(std.fmt.comptimePrint("{s}.{s} should be named decoded{d}", .{ name, field.name, bit_offset }));
+ }
+ if (field.default_value_ptr == null) {
+ @compileError(std.fmt.comptimePrint("{s}.{s} should be named encoded{d}", .{ name, field.name, bit_offset }));
+ }
+ }
+ bit_offset += @bitSizeOf(field.type);
+ }
+ },
+ else => @compileError(name ++ " has an unexpected field type"),
+ }
+ }
+};
+
+const aarch64 = @import("../aarch64.zig");
+const assert = std.debug.assert;
+const std = @import("std");
src/codegen/aarch64/instructions.zon
@@ -0,0 +1,1343 @@
+.{
+ // C6.2.3 ADD (extended register)
+ .{
+ .pattern = "ADD <Wd|WSP>, <Wn|WSP>, <Wm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word }, .allow_sp = true } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word }, .allow_sp = true } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ },
+ .encode = .{ .add, .Wd, .Wn, .{ .register = .Wm } },
+ },
+ .{
+ .pattern = "ADD <Wd|WSP>, <Wn|WSP>, <Wm>, <extend> #<amount>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word }, .allow_sp = true } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word }, .allow_sp = true } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .extend = .{ .extend = .{ .size = .word } },
+ .amount = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 3 }, .max_valid = 4 } },
+ },
+ .encode = .{ .add, .Wd, .Wn, .{ .extended_register_explicit = .{ .register = .Wm, .option = .extend, .amount = .amount } } },
+ },
+ .{
+ .pattern = "ADD <Xd|SP>, <Xn|SP>, <Xm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .add, .Xd, .Xn, .{ .register = .Xm } },
+ },
+ .{
+ .pattern = "ADD <Xd|SP>, <Xn|SP>, <Wm>, <extend> #<amount>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .extend = .{ .extend = .{ .size = .word } },
+ .amount = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 3 }, .max_valid = 4 } },
+ },
+ .encode = .{ .add, .Xd, .Xn, .{ .extended_register_explicit = .{ .register = .Wm, .option = .extend, .amount = .amount } } },
+ },
+ .{
+ .pattern = "ADD <Xd|SP>, <Xn|SP>, <Xm>, <extend> #<amount>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .extend = .{ .extend = .{ .size = .doubleword } },
+ .amount = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 3 }, .max_valid = 4 } },
+ },
+ .encode = .{ .add, .Xd, .Xn, .{ .extended_register_explicit = .{ .register = .Xm, .option = .extend, .amount = .amount } } },
+ },
+ // C6.2.4 ADD (immediate)
+ .{
+ .pattern = "ADD <Wd|WSP>, <Wn|WSP>, #<imm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word }, .allow_sp = true } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word }, .allow_sp = true } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 12 } } },
+ },
+ .encode = .{ .add, .Wd, .Wn, .{ .immediate = .imm } },
+ },
+ .{
+ .pattern = "ADD <Wd|WSP>, <Wn|WSP>, #<imm>, LSL #<shift>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word }, .allow_sp = true } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word }, .allow_sp = true } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 12 } } },
+ .shift = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 4 }, .multiple_of = 12 } },
+ },
+ .encode = .{ .add, .Wd, .Wn, .{ .shifted_immediate = .{ .immediate = .imm, .lsl = .shift } } },
+ },
+ .{
+ .pattern = "ADD <Xd|SP>, <Xn|SP>, #<imm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 12 } } },
+ },
+ .encode = .{ .add, .Xd, .Xn, .{ .immediate = .imm } },
+ },
+ .{
+ .pattern = "ADD <Xd|SP>, <Xn|SP>, #<imm>, LSL #<shift>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 12 } } },
+ .shift = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 4 }, .multiple_of = 12 } },
+ },
+ .encode = .{ .add, .Xd, .Xn, .{ .shifted_immediate = .{ .immediate = .imm, .lsl = .shift } } },
+ },
+ // C6.2.5 ADD (shifted register)
+ .{
+ .pattern = "ADD <Wd>, <Wn>, <Wm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ },
+ .encode = .{ .add, .Wd, .Wn, .{ .register = .Wm } },
+ },
+ .{
+ .pattern = "ADD <Wd>, <Wn>, <Wm>, <shift> #<amount>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .shift = .{ .shift = .{ .allow_ror = false } },
+ .amount = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 5 } } },
+ },
+ .encode = .{ .add, .Wd, .Wn, .{ .shifted_register_explicit = .{ .register = .Wm, .shift = .shift, .amount = .amount } } },
+ },
+ .{
+ .pattern = "ADD <Xd>, <Xn>, <Xm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .add, .Xd, .Xn, .{ .register = .Xm } },
+ },
+ .{
+ .pattern = "ADD <Xd>, <Xn>, <Xm>, <shift> #<amount>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .shift = .{ .shift = .{ .allow_ror = false } },
+ .amount = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 6 } } },
+ },
+ .encode = .{ .add, .Xd, .Xn, .{ .shifted_register_explicit = .{ .register = .Xm, .shift = .shift, .amount = .amount } } },
+ },
+ // C6.2.13 AND (shifted register)
+ .{
+ .pattern = "AND <Wd>, <Wn>, <Wm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ },
+ .encode = .{ .@"and", .Wd, .Wn, .{ .register = .Wm } },
+ },
+ .{
+ .pattern = "AND <Wd>, <Wn>, <Wm>, <shift> #<amount>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .shift = .{ .shift = .{} },
+ .amount = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 5 } } },
+ },
+ .encode = .{ .@"and", .Wd, .Wn, .{ .shifted_register_explicit = .{ .register = .Wm, .shift = .shift, .amount = .amount } } },
+ },
+ .{
+ .pattern = "AND <Xd>, <Xn>, <Xm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .@"and", .Xd, .Xn, .{ .register = .Xm } },
+ },
+ .{
+ .pattern = "AND <Xd>, <Xn>, <Xm>, <shift> #<amount>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .shift = .{ .shift = .{} },
+ .amount = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 6 } } },
+ },
+ .encode = .{ .@"and", .Xd, .Xn, .{ .shifted_register_explicit = .{ .register = .Xm, .shift = .shift, .amount = .amount } } },
+ },
+ // C6.2.15 ANDS (shifted register)
+ .{
+ .pattern = "ANDS <Wd>, <Wn>, <Wm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ },
+ .encode = .{ .ands, .Wd, .Wn, .{ .register = .Wm } },
+ },
+ .{
+ .pattern = "ANDS <Wd>, <Wn>, <Wm>, <shift> #<amount>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .shift = .{ .shift = .{} },
+ .amount = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 5 } } },
+ },
+ .encode = .{ .ands, .Wd, .Wn, .{ .shifted_register_explicit = .{ .register = .Wm, .shift = .shift, .amount = .amount } } },
+ },
+ .{
+ .pattern = "ANDS <Xd>, <Xn>, <Xm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .ands, .Xd, .Xn, .{ .register = .Xm } },
+ },
+ .{
+ .pattern = "ANDS <Xd>, <Xn>, <Xm>, <shift> #<amount>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .shift = .{ .shift = .{} },
+ .amount = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 6 } } },
+ },
+ .encode = .{ .ands, .Xd, .Xn, .{ .shifted_register_explicit = .{ .register = .Xm, .shift = .shift, .amount = .amount } } },
+ },
+ // C6.2.35 BLR
+ .{
+ .pattern = "BLR <Xn>",
+ .symbols = .{
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .blr, .Xn },
+ },
+ // C6.2.30 BFM
+ .{
+ .pattern = "BFM <Wd>, <Wn>, #<immr>, #<imms>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .immr = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 5 } } },
+ .imms = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 5 } } },
+ },
+ .encode = .{ .bfm, .Wd, .Wn, .{ .N = .word, .immr = .immr, .imms = .imms } },
+ },
+ .{
+ .pattern = "BFM <Xd>, <Xn>, #<immr>, #<imms>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .immr = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 6 } } },
+ .imms = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 6 } } },
+ },
+ .encode = .{ .bfm, .Xd, .Xn, .{ .N = .doubleword, .immr = .immr, .imms = .imms } },
+ },
+ // C6.2.37 BR
+ .{
+ .pattern = "BR <Xn>",
+ .symbols = .{
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .br, .Xn },
+ },
+ // C6.2.40 BRK
+ .{
+ .pattern = "BRK #<imm>",
+ .symbols = .{
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ },
+ .encode = .{ .brk, .imm },
+ },
+ // C6.2.56 CLREX
+ .{
+ .pattern = "CLREX",
+ .symbols = .{},
+ .encode = .{ .clrex, 0b1111 },
+ },
+ .{
+ .pattern = "CLREX #<imm>",
+ .symbols = .{
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 4 } } },
+ },
+ .encode = .{ .clrex, .imm },
+ },
+ // C6.2.109 DC
+ .{
+ .pattern = "DC IVAC, <Xt>",
+ .symbols = .{
+ .Xt = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .sys, 0b000, 0b0111, 0b0110, 0b001, .Xt },
+ },
+ .{
+ .pattern = "DC ISW, <Xt>",
+ .symbols = .{
+ .Xt = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .sys, 0b000, 0b0111, 0b0110, 0b010, .Xt },
+ },
+ .{
+ .pattern = "DC CSW, <Xt>",
+ .symbols = .{
+ .Xt = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .sys, 0b000, 0b0111, 0b1010, 0b010, .Xt },
+ },
+ .{
+ .pattern = "DC CISW, <Xt>",
+ .symbols = .{
+ .Xt = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .sys, 0b000, 0b0111, 0b1110, 0b010, .Xt },
+ },
+ .{
+ .pattern = "DC ZVA, <Xt>",
+ .symbols = .{
+ .Xt = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .sys, 0b011, 0b0111, 0b0100, 0b001, .Xt },
+ },
+ .{
+ .pattern = "DC CVAC, <Xt>",
+ .symbols = .{
+ .Xt = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .sys, 0b011, 0b0111, 0b1010, 0b001, .Xt },
+ },
+ .{
+ .pattern = "DC CVAU, <Xt>",
+ .symbols = .{
+ .Xt = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .sys, 0b011, 0b0111, 0b1011, 0b001, .Xt },
+ },
+ .{
+ .pattern = "DC CIVAC, <Xt>",
+ .symbols = .{
+ .Xt = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .sys, 0b011, 0b0111, 0b1110, 0b001, .Xt },
+ },
+ // C6.2.110 DCPS1
+ .{
+ .pattern = "DCPS1",
+ .symbols = .{},
+ .encode = .{ .dcps1, 0 },
+ },
+ .{
+ .pattern = "DCPS1 #<imm>",
+ .symbols = .{
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ },
+ .encode = .{ .dcps1, .imm },
+ },
+ // C6.2.111 DCPS2
+ .{
+ .pattern = "DCPS2",
+ .symbols = .{},
+ .encode = .{ .dcps2, 0 },
+ },
+ .{
+ .pattern = "DCPS2 #<imm>",
+ .symbols = .{
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ },
+ .encode = .{ .dcps2, .imm },
+ },
+ // C6.2.112 DCPS3
+ .{
+ .pattern = "DCPS3",
+ .symbols = .{},
+ .encode = .{ .dcps3, 0 },
+ },
+ .{
+ .pattern = "DCPS3 #<imm>",
+ .symbols = .{
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ },
+ .encode = .{ .dcps3, .imm },
+ },
+ // C6.2.116 DSB
+ .{
+ .pattern = "DSB <option>",
+ .symbols = .{
+ .option = .{ .barrier = .{} },
+ },
+ .encode = .{ .dsb, .option },
+ },
+ .{
+ .pattern = "DSB #<imm>",
+ .symbols = .{
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 4 } } },
+ },
+ .encode = .{ .dsb, .imm },
+ },
+ // C6.2.120 EOR (shifted register)
+ .{
+ .pattern = "EOR <Wd>, <Wn>, <Wm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ },
+ .encode = .{ .eor, .Wd, .Wn, .{ .register = .Wm } },
+ },
+ .{
+ .pattern = "EOR <Wd>, <Wn>, <Wm>, <shift> #<amount>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .shift = .{ .shift = .{} },
+ .amount = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 5 } } },
+ },
+ .encode = .{ .eor, .Wd, .Wn, .{ .shifted_register_explicit = .{ .register = .Wm, .shift = .shift, .amount = .amount } } },
+ },
+ .{
+ .pattern = "EOR <Xd>, <Xn>, <Xm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .eor, .Xd, .Xn, .{ .register = .Xm } },
+ },
+ .{
+ .pattern = "EOR <Xd>, <Xn>, <Xm>, <shift> #<amount>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .shift = .{ .shift = .{} },
+ .amount = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 6 } } },
+ },
+ .encode = .{ .eor, .Xd, .Xn, .{ .shifted_register_explicit = .{ .register = .Xm, .shift = .shift, .amount = .amount } } },
+ },
+ // C6.2.124 EXTR
+ .{
+ .pattern = "EXTR <Wd>, <Wn>, <Wm>, #<lsb>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .lsb = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 5 } } },
+ },
+ .encode = .{ .extr, .Wd, .Wn, .Wm, .lsb },
+ },
+ .{
+ .pattern = "EXTR <Xd>, <Xn>, <Xm>, #<lsb>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .lsb = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 6 } } },
+ },
+ .encode = .{ .extr, .Xd, .Xn, .Xm, .lsb },
+ },
+ // C6.2.126 HINT
+ .{
+ .pattern = "HINT #<imm>",
+ .symbols = .{
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 7 } } },
+ },
+ .encode = .{ .hint, .imm },
+ },
+ // C6.2.127 HLT
+ .{
+ .pattern = "HLT #<imm>",
+ .symbols = .{
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ },
+ .encode = .{ .hlt, .imm },
+ },
+ // C6.2.128 HVC
+ .{
+ .pattern = "HVC #<imm>",
+ .symbols = .{
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ },
+ .encode = .{ .hvc, .imm },
+ },
+ // C6.2.129 IC
+ .{
+ .pattern = "IC IALLUIS",
+ .symbols = .{
+ .Xt = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .sys, 0b000, 0b0111, 0b0001, 0b000, .xzr },
+ },
+ .{
+ .pattern = "IC IALLUIS, <Xt>",
+ .symbols = .{
+ .Xt = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .sys, 0b000, 0b0111, 0b0001, 0b000, .Xt },
+ },
+ .{
+ .pattern = "IC IALLU",
+ .symbols = .{
+ .Xt = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .sys, 0b000, 0b0111, 0b0101, 0b000, .xzr },
+ },
+ .{
+ .pattern = "IC IALLU, <Xt>",
+ .symbols = .{
+ .Xt = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .sys, 0b000, 0b0111, 0b0101, 0b000, .Xt },
+ },
+ .{
+ .pattern = "IC IVAU",
+ .symbols = .{
+ .Xt = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .sys, 0b011, 0b0111, 0b0101, 0b001, .xzr },
+ },
+ .{
+ .pattern = "IC IVAU, <Xt>",
+ .symbols = .{
+ .Xt = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .sys, 0b011, 0b0111, 0b0101, 0b001, .Xt },
+ },
+ // C6.2.131 ISB
+ .{
+ .pattern = "ISB",
+ .symbols = .{},
+ .encode = .{ .isb, .sy },
+ },
+ .{
+ .pattern = "ISB <option>",
+ .symbols = .{
+ .option = .{ .barrier = .{ .only_sy = true } },
+ },
+ .encode = .{ .isb, .option },
+ },
+ .{
+ .pattern = "ISB #<imm>",
+ .symbols = .{
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 4 } } },
+ },
+ .encode = .{ .isb, .imm },
+ },
+ // C6.2.164 LDP
+ .{
+ .pattern = "LDP <Wt1>, <Wt2>, [<Xn|SP>], #<imm>",
+ .symbols = .{
+ .Wt1 = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wt2 = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .signed, .bits = 9 }, .multiple_of = 4 } },
+ },
+ .encode = .{ .ldp, .Wt1, .Wt2, .{ .post_index = .{ .base = .Xn, .index = .imm } } },
+ },
+ .{
+ .pattern = "LDP <Xt1>, <Xt2>, [<Xn|SP>], #<imm>",
+ .symbols = .{
+ .Xt1 = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xt2 = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .signed, .bits = 10 }, .multiple_of = 8 } },
+ },
+ .encode = .{ .ldp, .Xt1, .Xt2, .{ .post_index = .{ .base = .Xn, .index = .imm } } },
+ },
+ .{
+ .pattern = "LDP <Wt1>, <Wt2>, [<Xn|SP>, #<imm>]!",
+ .symbols = .{
+ .Wt1 = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wt2 = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .signed, .bits = 9 }, .multiple_of = 4 } },
+ },
+ .encode = .{ .ldp, .Wt1, .Wt2, .{ .pre_index = .{ .base = .Xn, .index = .imm } } },
+ },
+ .{
+ .pattern = "LDP <Xt1>, <Xt2>, [<Xn|SP>, #<imm>]!",
+ .symbols = .{
+ .Xt1 = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xt2 = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .signed, .bits = 10 }, .multiple_of = 8 } },
+ },
+ .encode = .{ .ldp, .Xt1, .Xt2, .{ .pre_index = .{ .base = .Xn, .index = .imm } } },
+ },
+ .{
+ .pattern = "LDP <Wt1>, <Wt2>, [<Xn|SP>]",
+ .symbols = .{
+ .Wt1 = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wt2 = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ },
+ .encode = .{ .ldp, .Wt1, .Wt2, .{ .base = .Xn } },
+ },
+ .{
+ .pattern = "LDP <Wt1>, <Wt2>, [<Xn|SP>, #<imm>]",
+ .symbols = .{
+ .Wt1 = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wt2 = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .signed, .bits = 9 }, .multiple_of = 4 } },
+ },
+ .encode = .{ .ldp, .Wt1, .Wt2, .{ .signed_offset = .{ .base = .Xn, .offset = .imm } } },
+ },
+ .{
+ .pattern = "LDP <Xt1>, <Xt2>, [<Xn|SP>]",
+ .symbols = .{
+ .Xt1 = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xt2 = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ },
+ .encode = .{ .ldp, .Xt1, .Xt2, .{ .base = .Xn } },
+ },
+ .{
+ .pattern = "LDP <Xt1>, <Xt2>, [<Xn|SP>, #<imm>]",
+ .symbols = .{
+ .Xt1 = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xt2 = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .signed, .bits = 10 }, .multiple_of = 8 } },
+ },
+ .encode = .{ .ldp, .Xt1, .Xt2, .{ .signed_offset = .{ .base = .Xn, .offset = .imm } } },
+ },
+ // C6.2.166 LDR (immediate)
+ .{
+ .pattern = "LDR <Wt>, [<Xn|SP>], #<simm>",
+ .symbols = .{
+ .Wt = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .simm = .{ .imm = .{ .type = .{ .signedness = .signed, .bits = 9 } } },
+ },
+ .encode = .{ .ldr, .Wt, .{ .post_index = .{ .base = .Xn, .index = .simm } } },
+ },
+ .{
+ .pattern = "LDR <Xt>, [<Xn|SP>], #<simm>",
+ .symbols = .{
+ .Xt = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .simm = .{ .imm = .{ .type = .{ .signedness = .signed, .bits = 9 } } },
+ },
+ .encode = .{ .ldr, .Xt, .{ .post_index = .{ .base = .Xn, .index = .simm } } },
+ },
+ .{
+ .pattern = "LDR <Wt>, [<Xn|SP>, #<simm>]!",
+ .symbols = .{
+ .Wt = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .simm = .{ .imm = .{ .type = .{ .signedness = .signed, .bits = 9 } } },
+ },
+ .encode = .{ .ldr, .Wt, .{ .pre_index = .{ .base = .Xn, .index = .simm } } },
+ },
+ .{
+ .pattern = "LDR <Xt>, [<Xn|SP>, #<simm>]!",
+ .symbols = .{
+ .Xt = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .simm = .{ .imm = .{ .type = .{ .signedness = .signed, .bits = 9 } } },
+ },
+ .encode = .{ .ldr, .Xt, .{ .pre_index = .{ .base = .Xn, .index = .simm } } },
+ },
+ .{
+ .pattern = "LDR <Wt>, [<Xn|SP>]",
+ .symbols = .{
+ .Wt = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ },
+ .encode = .{ .ldr, .Wt, .{ .base = .Xn } },
+ },
+ .{
+ .pattern = "LDR <Wt>, [<Xn|SP>, #<pimm>]",
+ .symbols = .{
+ .Wt = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .pimm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 14 }, .multiple_of = 4 } },
+ },
+ .encode = .{ .ldr, .Wt, .{ .unsigned_offset = .{ .base = .Xn, .offset = .pimm } } },
+ },
+ .{
+ .pattern = "LDR <Xt>, [<Xn|SP>]",
+ .symbols = .{
+ .Xt = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ },
+ .encode = .{ .ldr, .Xt, .{ .base = .Xn } },
+ },
+ .{
+ .pattern = "LDR <Xt>, [<Xn|SP>, #<pimm>]",
+ .symbols = .{
+ .Xt = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .pimm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 15 }, .multiple_of = 8 } },
+ },
+ .encode = .{ .ldr, .Xt, .{ .unsigned_offset = .{ .base = .Xn, .offset = .pimm } } },
+ },
+ // C6.2.220 MOV (to/from SP)
+ .{
+ .pattern = "MOV WSP, <Wn|WSP>",
+ .symbols = .{
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word }, .allow_sp = true } },
+ },
+ .encode = .{ .add, .wsp, .Wn, .{ .immediate = 0 } },
+ },
+ .{
+ .pattern = "MOV <Wd|WSP>, WSP",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word }, .allow_sp = true } },
+ },
+ .encode = .{ .add, .Wd, .wsp, .{ .immediate = 0 } },
+ },
+ .{
+ .pattern = "MOV SP, <Xn|SP>",
+ .symbols = .{
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ },
+ .encode = .{ .add, .sp, .Xn, .{ .immediate = 0 } },
+ },
+ .{
+ .pattern = "MOV <Xd|SP>, SP",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ },
+ .encode = .{ .add, .Xd, .sp, .{ .immediate = 0 } },
+ },
+ // C6.2.222 MOV (wide immediate)
+ .{
+ .pattern = "MOV <Wd>, #<imm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ },
+ .encode = .{ .movz, .Wd, .imm, .{ .lsl = .@"0" } },
+ },
+ .{
+ .pattern = "MOV <Xd>, #<imm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ },
+ .encode = .{ .movz, .Xd, .imm, .{ .lsl = .@"0" } },
+ },
+ // C6.2.224 MOV (register)
+ .{
+ .pattern = "MOV <Wd>, <Wm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ },
+ .encode = .{ .orr, .Wd, .wzr, .{ .register = .Wm } },
+ },
+ .{
+ .pattern = "MOV <Xd>, <Xm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .orr, .Xd, .xzr, .{ .register = .Xm } },
+ },
+ // C6.2.225 MOVK
+ .{
+ .pattern = "MOVK <Wd>, #<imm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ },
+ .encode = .{ .movk, .Wd, .imm, .{} },
+ },
+ .{
+ .pattern = "MOVK <Wd>, #<imm>, LSL #<shift>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ .shift = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 5 }, .multiple_of = 16 } },
+ },
+ .encode = .{ .movk, .Wd, .imm, .{ .lsl = .shift } },
+ },
+ .{
+ .pattern = "MOVK <Xd>, #<imm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ },
+ .encode = .{ .movk, .Xd, .imm, .{} },
+ },
+ .{
+ .pattern = "MOVK <Xd>, #<imm>, LSL #<shift>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ .shift = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 6 }, .multiple_of = 16 } },
+ },
+ .encode = .{ .movk, .Xd, .imm, .{ .lsl = .shift } },
+ },
+ // C6.2.226 MOVN
+ .{
+ .pattern = "MOVN <Wd>, #<imm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ },
+ .encode = .{ .movn, .Wd, .imm, .{} },
+ },
+ .{
+ .pattern = "MOVN <Wd>, #<imm>, LSL #<shift>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ .shift = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 5 }, .multiple_of = 16 } },
+ },
+ .encode = .{ .movn, .Wd, .imm, .{ .lsl = .shift } },
+ },
+ .{
+ .pattern = "MOVN <Xd>, #<imm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ },
+ .encode = .{ .movn, .Xd, .imm, .{} },
+ },
+ .{
+ .pattern = "MOVN <Xd>, #<imm>, LSL #<shift>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ .shift = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 6 }, .multiple_of = 16 } },
+ },
+ .encode = .{ .movn, .Xd, .imm, .{ .lsl = .shift } },
+ },
+ // C6.2.227 MOVZ
+ .{
+ .pattern = "MOVZ <Wd>, #<imm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ },
+ .encode = .{ .movz, .Wd, .imm, .{} },
+ },
+ .{
+ .pattern = "MOVZ <Wd>, #<imm>, LSL #<shift>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ .shift = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 5 }, .multiple_of = 16 } },
+ },
+ .encode = .{ .movz, .Wd, .imm, .{ .lsl = .shift } },
+ },
+ .{
+ .pattern = "MOVZ <Xd>, #<imm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ },
+ .encode = .{ .movz, .Xd, .imm, .{} },
+ },
+ .{
+ .pattern = "MOVZ <Xd>, #<imm>, LSL #<shift>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ .shift = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 6 }, .multiple_of = 16 } },
+ },
+ .encode = .{ .movz, .Xd, .imm, .{ .lsl = .shift } },
+ },
+ // C6.2.228 MRS
+ .{
+ .pattern = "MRS <Xt>, CTR_EL0",
+ .symbols = .{
+ .Xt = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .mrs, .Xt, 0b11, 0b011, 0b0000, 0b0000, 0b001 },
+ },
+ // C6.2.234 NEG
+ .{
+ .pattern = "NEG <Wd>, <Wm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ },
+ .encode = .{ .sub, .Wd, .wzr, .{ .register = .Wm } },
+ },
+ .{
+ .pattern = "NEG <Wd>, <Wm>, <shift> #<amount>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .shift = .{ .shift = .{ .allow_ror = false } },
+ .amount = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 5 } } },
+ },
+ .encode = .{ .sub, .Wd, .wzr, .{ .shifted_register_explicit = .{ .register = .Wm, .shift = .shift, .amount = .amount } } },
+ },
+ .{
+ .pattern = "NEG <Xd>, <Xm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .sub, .Xd, .xzr, .{ .register = .Xm } },
+ },
+ .{
+ .pattern = "NEG <Xd>, <Xm>, <shift> #<amount>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .shift = .{ .shift = .{ .allow_ror = false } },
+ .amount = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 6 } } },
+ },
+ .encode = .{ .sub, .Xd, .xzr, .{ .shifted_register_explicit = .{ .register = .Xm, .shift = .shift, .amount = .amount } } },
+ },
+ // C6.2.238 NOP
+ .{
+ .pattern = "NOP",
+ .symbols = .{},
+ .encode = .{.nop},
+ },
+ // C6.2.241 ORR (shifted register)
+ .{
+ .pattern = "ORR <Wd>, <Wn>, <Wm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ },
+ .encode = .{ .orr, .Wd, .Wn, .{ .register = .Wm } },
+ },
+ .{
+ .pattern = "ORR <Wd>, <Wn>, <Wm>, <shift> #<amount>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .shift = .{ .shift = .{} },
+ .amount = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 5 } } },
+ },
+ .encode = .{ .orr, .Wd, .Wn, .{ .shifted_register_explicit = .{ .register = .Wm, .shift = .shift, .amount = .amount } } },
+ },
+ .{
+ .pattern = "ORR <Xd>, <Xn>, <Xm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .orr, .Xd, .Xn, .{ .register = .Xm } },
+ },
+ .{
+ .pattern = "ORR <Xd>, <Xn>, <Xm>, <shift> #<amount>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .shift = .{ .shift = .{} },
+ .amount = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 6 } } },
+ },
+ .encode = .{ .orr, .Xd, .Xn, .{ .shifted_register_explicit = .{ .register = .Xm, .shift = .shift, .amount = .amount } } },
+ },
+ // C6.2.254 RET
+ .{
+ .pattern = "RET",
+ .symbols = .{},
+ .encode = .{ .ret, .x30 },
+ },
+ .{
+ .pattern = "RET <Xn>",
+ .symbols = .{
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .ret, .Xn },
+ },
+ // C6.2.268 SBFM
+ .{
+ .pattern = "SBFM <Wd>, <Wn>, #<immr>, #<imms>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .immr = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 5 } } },
+ .imms = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 5 } } },
+ },
+ .encode = .{ .sbfm, .Wd, .Wn, .{ .N = .word, .immr = .immr, .imms = .imms } },
+ },
+ .{
+ .pattern = "SBFM <Xd>, <Xn>, #<immr>, #<imms>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .immr = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 6 } } },
+ .imms = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 6 } } },
+ },
+ .encode = .{ .sbfm, .Xd, .Xn, .{ .N = .doubleword, .immr = .immr, .imms = .imms } },
+ },
+ // C6.2.280 SEV
+ .{
+ .pattern = "SEV",
+ .symbols = .{},
+ .encode = .{.sev},
+ },
+ // C6.2.281 SEVL
+ .{
+ .pattern = "SEVL",
+ .symbols = .{},
+ .encode = .{.sevl},
+ },
+ // C6.2.283 SMC
+ .{
+ .pattern = "SMC #<imm>",
+ .symbols = .{
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ },
+ .encode = .{ .smc, .imm },
+ },
+ // C6.2.321 STP
+ .{
+ .pattern = "STP <Wt1>, <Wt2>, [<Xn|SP>], #<imm>",
+ .symbols = .{
+ .Wt1 = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wt2 = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .signed, .bits = 9 }, .multiple_of = 4 } },
+ },
+ .encode = .{ .stp, .Wt1, .Wt2, .{ .post_index = .{ .base = .Xn, .index = .imm } } },
+ },
+ .{
+ .pattern = "STP <Xt1>, <Xt2>, [<Xn|SP>], #<imm>",
+ .symbols = .{
+ .Xt1 = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xt2 = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .signed, .bits = 10 }, .multiple_of = 8 } },
+ },
+ .encode = .{ .stp, .Xt1, .Xt2, .{ .post_index = .{ .base = .Xn, .index = .imm } } },
+ },
+ .{
+ .pattern = "STP <Wt1>, <Wt2>, [<Xn|SP>, #<imm>]!",
+ .symbols = .{
+ .Wt1 = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wt2 = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .signed, .bits = 9 }, .multiple_of = 4 } },
+ },
+ .encode = .{ .stp, .Wt1, .Wt2, .{ .pre_index = .{ .base = .Xn, .index = .imm } } },
+ },
+ .{
+ .pattern = "STP <Xt1>, <Xt2>, [<Xn|SP>, #<imm>]!",
+ .symbols = .{
+ .Xt1 = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xt2 = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .signed, .bits = 10 }, .multiple_of = 8 } },
+ },
+ .encode = .{ .stp, .Xt1, .Xt2, .{ .pre_index = .{ .base = .Xn, .index = .imm } } },
+ },
+ .{
+ .pattern = "STP <Wt1>, <Wt2>, [<Xn|SP>]",
+ .symbols = .{
+ .Wt1 = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wt2 = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ },
+ .encode = .{ .stp, .Wt1, .Wt2, .{ .base = .Xn } },
+ },
+ .{
+ .pattern = "STP <Wt1>, <Wt2>, [<Xn|SP>, #<imm>]",
+ .symbols = .{
+ .Wt1 = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wt2 = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .signed, .bits = 9 }, .multiple_of = 4 } },
+ },
+ .encode = .{ .stp, .Wt1, .Wt2, .{ .signed_offset = .{ .base = .Xn, .offset = .imm } } },
+ },
+ .{
+ .pattern = "STP <Xt1>, <Xt2>, [<Xn|SP>]",
+ .symbols = .{
+ .Xt1 = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xt2 = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ },
+ .encode = .{ .stp, .Xt1, .Xt2, .{ .base = .Xn } },
+ },
+ .{
+ .pattern = "STP <Xt1>, <Xt2>, [<Xn|SP>, #<imm>]",
+ .symbols = .{
+ .Xt1 = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xt2 = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .signed, .bits = 10 }, .multiple_of = 8 } },
+ },
+ .encode = .{ .stp, .Xt1, .Xt2, .{ .signed_offset = .{ .base = .Xn, .offset = .imm } } },
+ },
+ // C6.2.322 STR (immediate)
+ .{
+ .pattern = "STR <Wt>, [<Xn|SP>], #<simm>",
+ .symbols = .{
+ .Wt = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .simm = .{ .imm = .{ .type = .{ .signedness = .signed, .bits = 9 } } },
+ },
+ .encode = .{ .str, .Wt, .{ .post_index = .{ .base = .Xn, .index = .simm } } },
+ },
+ .{
+ .pattern = "STR <Xt>, [<Xn|SP>], #<simm>",
+ .symbols = .{
+ .Xt = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .simm = .{ .imm = .{ .type = .{ .signedness = .signed, .bits = 9 } } },
+ },
+ .encode = .{ .str, .Xt, .{ .post_index = .{ .base = .Xn, .index = .simm } } },
+ },
+ .{
+ .pattern = "STR <Wt>, [<Xn|SP>, #<simm>]!",
+ .symbols = .{
+ .Wt = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .simm = .{ .imm = .{ .type = .{ .signedness = .signed, .bits = 9 } } },
+ },
+ .encode = .{ .str, .Wt, .{ .pre_index = .{ .base = .Xn, .index = .simm } } },
+ },
+ .{
+ .pattern = "STR <Xt>, [<Xn|SP>, #<simm>]!",
+ .symbols = .{
+ .Xt = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .simm = .{ .imm = .{ .type = .{ .signedness = .signed, .bits = 9 } } },
+ },
+ .encode = .{ .str, .Xt, .{ .pre_index = .{ .base = .Xn, .index = .simm } } },
+ },
+ .{
+ .pattern = "STR <Wt>, [<Xn|SP>]",
+ .symbols = .{
+ .Wt = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ },
+ .encode = .{ .str, .Wt, .{ .base = .Xn } },
+ },
+ .{
+ .pattern = "STR <Wt>, [<Xn|SP>, #<pimm>]",
+ .symbols = .{
+ .Wt = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .pimm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 14 }, .multiple_of = 4 } },
+ },
+ .encode = .{ .str, .Wt, .{ .unsigned_offset = .{ .base = .Xn, .offset = .pimm } } },
+ },
+ .{
+ .pattern = "STR <Xt>, [<Xn|SP>]",
+ .symbols = .{
+ .Xt = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ },
+ .encode = .{ .str, .Xt, .{ .base = .Xn } },
+ },
+ .{
+ .pattern = "STR <Xt>, [<Xn|SP>, #<pimm>]",
+ .symbols = .{
+ .Xt = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .pimm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 15 }, .multiple_of = 8 } },
+ },
+ .encode = .{ .str, .Xt, .{ .unsigned_offset = .{ .base = .Xn, .offset = .pimm } } },
+ },
+ // C6.2.356 SUB (extended register)
+ .{
+ .pattern = "SUB <Wd|WSP>, <Wn|WSP>, <Wm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word }, .allow_sp = true } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word }, .allow_sp = true } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ },
+ .encode = .{ .sub, .Wd, .Wn, .{ .register = .Wm } },
+ },
+ .{
+ .pattern = "SUB <Wd|WSP>, <Wn|WSP>, <Wm>, <extend> #<amount>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word }, .allow_sp = true } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word }, .allow_sp = true } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .extend = .{ .extend = .{ .size = .word } },
+ .amount = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 3 }, .max_valid = 4 } },
+ },
+ .encode = .{ .sub, .Wd, .Wn, .{ .extended_register_explicit = .{ .register = .Wm, .option = .extend, .amount = .amount } } },
+ },
+ .{
+ .pattern = "SUB <Xd|SP>, <Xn|SP>, <Xm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .sub, .Xd, .Xn, .{ .register = .Xm } },
+ },
+ .{
+ .pattern = "SUB <Xd|SP>, <Xn|SP>, <Wm>, <extend> #<amount>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .extend = .{ .extend = .{ .size = .word } },
+ .amount = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 3 }, .max_valid = 4 } },
+ },
+ .encode = .{ .sub, .Xd, .Xn, .{ .extended_register_explicit = .{ .register = .Wm, .option = .extend, .amount = .amount } } },
+ },
+ .{
+ .pattern = "SUB <Xd|SP>, <Xn|SP>, <Xm>, <extend> #<amount>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .extend = .{ .extend = .{ .size = .doubleword } },
+ .amount = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 3 }, .max_valid = 4 } },
+ },
+ .encode = .{ .sub, .Xd, .Xn, .{ .extended_register_explicit = .{ .register = .Xm, .option = .extend, .amount = .amount } } },
+ },
+ // C6.2.357 SUB (immediate)
+ .{
+ .pattern = "SUB <Wd|WSP>, <Wn|WSP>, #<imm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word }, .allow_sp = true } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word }, .allow_sp = true } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 12 } } },
+ },
+ .encode = .{ .sub, .Wd, .Wn, .{ .immediate = .imm } },
+ },
+ .{
+ .pattern = "SUB <Wd|WSP>, <Wn|WSP>, #<imm>, LSL #<shift>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word }, .allow_sp = true } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word }, .allow_sp = true } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 12 } } },
+ .shift = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 4 }, .multiple_of = 12 } },
+ },
+ .encode = .{ .sub, .Wd, .Wn, .{ .shifted_immediate = .{ .immediate = .imm, .lsl = .shift } } },
+ },
+ .{
+ .pattern = "SUB <Xd|SP>, <Xn|SP>, #<imm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 12 } } },
+ },
+ .encode = .{ .sub, .Xd, .Xn, .{ .immediate = .imm } },
+ },
+ .{
+ .pattern = "SUB <Xd|SP>, <Xn|SP>, #<imm>, LSL #<shift>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword }, .allow_sp = true } },
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 12 } } },
+ .shift = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 4 }, .multiple_of = 12 } },
+ },
+ .encode = .{ .sub, .Xd, .Xn, .{ .shifted_immediate = .{ .immediate = .imm, .lsl = .shift } } },
+ },
+ // C6.2.358 SUB (shifted register)
+ .{
+ .pattern = "SUB <Wd>, <Wn>, <Wm>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ },
+ .encode = .{ .sub, .Wd, .Wn, .{ .register = .Wm } },
+ },
+ .{
+ .pattern = "SUB <Wd>, <Wn>, <Wm>, <shift> #<amount>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wm = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .shift = .{ .shift = .{ .allow_ror = false } },
+ .amount = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 5 } } },
+ },
+ .encode = .{ .sub, .Wd, .Wn, .{ .shifted_register_explicit = .{ .register = .Wm, .shift = .shift, .amount = .amount } } },
+ },
+ .{
+ .pattern = "SUB <Xd>, <Xn>, <Xm>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ },
+ .encode = .{ .sub, .Xd, .Xn, .{ .register = .Xm } },
+ },
+ .{
+ .pattern = "SUB <Xd>, <Xn>, <Xm>, <shift> #<amount>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xm = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .shift = .{ .shift = .{ .allow_ror = false } },
+ .amount = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 6 } } },
+ },
+ .encode = .{ .sub, .Xd, .Xn, .{ .shifted_register_explicit = .{ .register = .Xm, .shift = .shift, .amount = .amount } } },
+ },
+ // C6.2.365 SVC
+ .{
+ .pattern = "SVC #<imm>",
+ .symbols = .{
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ },
+ .encode = .{ .svc, .imm },
+ },
+ // C6.2.376 TCANCEL
+ .{
+ .pattern = "TCANCEL #<imm>",
+ .symbols = .{
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ },
+ .encode = .{ .tcancel, .imm },
+ },
+ // C6.2.385 UBFM
+ .{
+ .pattern = "UBFM <Wd>, <Wn>, #<immr>, #<imms>",
+ .symbols = .{
+ .Wd = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .Wn = .{ .reg = .{ .format = .{ .integer = .word } } },
+ .immr = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 5 } } },
+ .imms = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 5 } } },
+ },
+ .encode = .{ .ubfm, .Wd, .Wn, .{ .N = .word, .immr = .immr, .imms = .imms } },
+ },
+ .{
+ .pattern = "UBFM <Xd>, <Xn>, #<immr>, #<imms>",
+ .symbols = .{
+ .Xd = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .Xn = .{ .reg = .{ .format = .{ .integer = .doubleword } } },
+ .immr = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 6 } } },
+ .imms = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 6 } } },
+ },
+ .encode = .{ .ubfm, .Xd, .Xn, .{ .N = .doubleword, .immr = .immr, .imms = .imms } },
+ },
+ // C6.2.387 UDF
+ .{
+ .pattern = "UDF #<imm>",
+ .symbols = .{
+ .imm = .{ .imm = .{ .type = .{ .signedness = .unsigned, .bits = 16 } } },
+ },
+ .encode = .{ .udf, .imm },
+ },
+ // C6.2.396 WFE
+ .{
+ .pattern = "WFE",
+ .symbols = .{},
+ .encode = .{.wfe},
+ },
+ // C6.2.398 WFI
+ .{
+ .pattern = "WFI",
+ .symbols = .{},
+ .encode = .{.wfi},
+ },
+ // C6.2.402 YIELD
+ .{
+ .pattern = "YIELD",
+ .symbols = .{},
+ .encode = .{.yield},
+ },
+}
src/codegen/aarch64/Mir.zig
@@ -0,0 +1,275 @@
+prologue: []const Instruction,
+body: []const Instruction,
+epilogue: []const Instruction,
+literals: []const u32,
+nav_relocs: []const Reloc.Nav,
+uav_relocs: []const Reloc.Uav,
+global_relocs: []const Reloc.Global,
+literal_relocs: []const Reloc.Literal,
+
+pub const Reloc = struct {
+ label: u32,
+ addend: u64 align(@alignOf(u32)) = 0,
+
+ pub const Nav = struct {
+ nav: InternPool.Nav.Index,
+ reloc: Reloc,
+ };
+
+ pub const Uav = struct {
+ uav: InternPool.Key.Ptr.BaseAddr.Uav,
+ reloc: Reloc,
+ };
+
+ pub const Global = struct {
+ global: [*:0]const u8,
+ reloc: Reloc,
+ };
+
+ pub const Literal = struct {
+ label: u32,
+ };
+};
+
+pub fn deinit(mir: *Mir, gpa: std.mem.Allocator) void {
+ assert(mir.body.ptr + mir.body.len == mir.prologue.ptr);
+ assert(mir.prologue.ptr + mir.prologue.len == mir.epilogue.ptr);
+ gpa.free(mir.body.ptr[0 .. mir.body.len + mir.prologue.len + mir.epilogue.len]);
+ gpa.free(mir.literals);
+ gpa.free(mir.nav_relocs);
+ gpa.free(mir.uav_relocs);
+ gpa.free(mir.global_relocs);
+ gpa.free(mir.literal_relocs);
+ mir.* = undefined;
+}
+
+pub fn emit(
+ mir: Mir,
+ lf: *link.File,
+ pt: Zcu.PerThread,
+ src_loc: Zcu.LazySrcLoc,
+ func_index: InternPool.Index,
+ code: *std.ArrayListUnmanaged(u8),
+ debug_output: link.File.DebugInfoOutput,
+) !void {
+ _ = debug_output;
+ const zcu = pt.zcu;
+ const ip = &zcu.intern_pool;
+ const gpa = zcu.gpa;
+ const func = zcu.funcInfo(func_index);
+ const nav = ip.getNav(func.owner_nav);
+ const mod = zcu.navFileScope(func.owner_nav).mod.?;
+ const target = &mod.resolved_target.result;
+ mir_log.debug("{f}:", .{nav.fqn.fmt(ip)});
+
+ const func_align = switch (nav.status.fully_resolved.alignment) {
+ .none => switch (mod.optimize_mode) {
+ .Debug, .ReleaseSafe, .ReleaseFast => target_util.defaultFunctionAlignment(target),
+ .ReleaseSmall => target_util.minFunctionAlignment(target),
+ },
+ else => |a| a.maxStrict(target_util.minFunctionAlignment(target)),
+ };
+ const code_len = mir.prologue.len + mir.body.len + mir.epilogue.len;
+ const literals_align_gap = -%code_len & (@divExact(
+ @as(u5, @intCast(func_align.minStrict(.@"16").toByteUnits().?)),
+ Instruction.size,
+ ) - 1);
+ try code.ensureUnusedCapacity(gpa, Instruction.size *
+ (code_len + literals_align_gap + mir.literals.len));
+ emitInstructionsForward(code, mir.prologue);
+ emitInstructionsBackward(code, mir.body);
+ const body_end: u32 = @intCast(code.items.len);
+ emitInstructionsBackward(code, mir.epilogue);
+ code.appendNTimesAssumeCapacity(0, Instruction.size * literals_align_gap);
+ code.appendSliceAssumeCapacity(@ptrCast(mir.literals));
+ mir_log.debug("", .{});
+
+ for (mir.nav_relocs) |nav_reloc| try emitReloc(
+ lf,
+ zcu,
+ func.owner_nav,
+ switch (try @import("../../codegen.zig").genNavRef(
+ lf,
+ pt,
+ src_loc,
+ nav_reloc.nav,
+ &mod.resolved_target.result,
+ )) {
+ .sym_index => |sym_index| sym_index,
+ .fail => |em| return zcu.codegenFailMsg(func.owner_nav, em),
+ },
+ mir.body[nav_reloc.reloc.label],
+ body_end - Instruction.size * (1 + nav_reloc.reloc.label),
+ nav_reloc.reloc.addend,
+ );
+ for (mir.uav_relocs) |uav_reloc| try emitReloc(
+ lf,
+ zcu,
+ func.owner_nav,
+ switch (try lf.lowerUav(
+ pt,
+ uav_reloc.uav.val,
+ ZigType.fromInterned(uav_reloc.uav.orig_ty).ptrAlignment(zcu),
+ src_loc,
+ )) {
+ .sym_index => |sym_index| sym_index,
+ .fail => |em| return zcu.codegenFailMsg(func.owner_nav, em),
+ },
+ mir.body[uav_reloc.reloc.label],
+ body_end - Instruction.size * (1 + uav_reloc.reloc.label),
+ uav_reloc.reloc.addend,
+ );
+ for (mir.global_relocs) |global_reloc| try emitReloc(
+ lf,
+ zcu,
+ func.owner_nav,
+ if (lf.cast(.elf)) |ef|
+ try ef.getGlobalSymbol(std.mem.span(global_reloc.global), null)
+ else if (lf.cast(.macho)) |mf|
+ try mf.getGlobalSymbol(std.mem.span(global_reloc.global), null)
+ else if (lf.cast(.coff)) |cf|
+ try cf.getGlobalSymbol(std.mem.span(global_reloc.global), "compiler_rt")
+ else
+ return zcu.codegenFail(func.owner_nav, "external symbols unimplemented for {s}", .{@tagName(lf.tag)}),
+ mir.body[global_reloc.reloc.label],
+ body_end - Instruction.size * (1 + global_reloc.reloc.label),
+ global_reloc.reloc.addend,
+ );
+ const literal_reloc_offset: i19 = @intCast(mir.epilogue.len + literals_align_gap);
+ for (mir.literal_relocs) |literal_reloc| {
+ var instruction = mir.body[literal_reloc.label];
+ instruction.load_store.register_literal.group.imm19 += literal_reloc_offset;
+ instruction.write(
+ code.items[body_end - Instruction.size * (1 + literal_reloc.label) ..][0..Instruction.size],
+ );
+ }
+}
+
+fn emitInstructionsForward(code: *std.ArrayListUnmanaged(u8), instructions: []const Instruction) void {
+ for (instructions) |instruction| emitInstruction(code, instruction);
+}
+fn emitInstructionsBackward(code: *std.ArrayListUnmanaged(u8), instructions: []const Instruction) void {
+ var instruction_index = instructions.len;
+ while (instruction_index > 0) {
+ instruction_index -= 1;
+ emitInstruction(code, instructions[instruction_index]);
+ }
+}
+fn emitInstruction(code: *std.ArrayListUnmanaged(u8), instruction: Instruction) void {
+ mir_log.debug(" {f}", .{instruction});
+ instruction.write(code.addManyAsArrayAssumeCapacity(Instruction.size));
+}
+
+fn emitReloc(
+ lf: *link.File,
+ zcu: *Zcu,
+ owner_nav: InternPool.Nav.Index,
+ sym_index: u32,
+ instruction: Instruction,
+ offset: u32,
+ addend: u64,
+) !void {
+ const gpa = zcu.gpa;
+ switch (instruction.decode()) {
+ else => unreachable,
+ .branch_exception_generating_system => |decoded| if (lf.cast(.elf)) |ef| {
+ const zo = ef.zigObjectPtr().?;
+ const atom = zo.symbol(try zo.getOrCreateMetadataForNav(zcu, owner_nav)).atom(ef).?;
+ const r_type: std.elf.R_AARCH64 = switch (decoded.decode().unconditional_branch_immediate.group.op) {
+ .b => .JUMP26,
+ .bl => .CALL26,
+ };
+ try atom.addReloc(gpa, .{
+ .r_offset = offset,
+ .r_info = @as(u64, sym_index) << 32 | @intFromEnum(r_type),
+ .r_addend = @bitCast(addend),
+ }, zo);
+ } else if (lf.cast(.macho)) |mf| {
+ const zo = mf.getZigObject().?;
+ const atom = zo.symbols.items[try zo.getOrCreateMetadataForNav(mf, owner_nav)].getAtom(mf).?;
+ try atom.addReloc(mf, .{
+ .tag = .@"extern",
+ .offset = offset,
+ .target = sym_index,
+ .addend = @bitCast(addend),
+ .type = .branch,
+ .meta = .{
+ .pcrel = true,
+ .has_subtractor = false,
+ .length = 2,
+ .symbolnum = @intCast(sym_index),
+ },
+ });
+ },
+ .data_processing_immediate => |decoded| if (lf.cast(.elf)) |ef| {
+ const zo = ef.zigObjectPtr().?;
+ const atom = zo.symbol(try zo.getOrCreateMetadataForNav(zcu, owner_nav)).atom(ef).?;
+ const r_type: std.elf.R_AARCH64 = switch (decoded.decode()) {
+ else => unreachable,
+ .pc_relative_addressing => |pc_relative_addressing| switch (pc_relative_addressing.group.op) {
+ .adr => .ADR_PREL_LO21,
+ .adrp => .ADR_PREL_PG_HI21,
+ },
+ .add_subtract_immediate => |add_subtract_immediate| switch (add_subtract_immediate.group.op) {
+ .add => .ADD_ABS_LO12_NC,
+ .sub => unreachable,
+ },
+ };
+ try atom.addReloc(gpa, .{
+ .r_offset = offset,
+ .r_info = @as(u64, sym_index) << 32 | @intFromEnum(r_type),
+ .r_addend = @bitCast(addend),
+ }, zo);
+ } else if (lf.cast(.macho)) |mf| {
+ const zo = mf.getZigObject().?;
+ const atom = zo.symbols.items[try zo.getOrCreateMetadataForNav(mf, owner_nav)].getAtom(mf).?;
+ switch (decoded.decode()) {
+ else => unreachable,
+ .pc_relative_addressing => |pc_relative_addressing| switch (pc_relative_addressing.group.op) {
+ .adr => unreachable,
+ .adrp => try atom.addReloc(mf, .{
+ .tag = .@"extern",
+ .offset = offset,
+ .target = sym_index,
+ .addend = @bitCast(addend),
+ .type = .page,
+ .meta = .{
+ .pcrel = true,
+ .has_subtractor = false,
+ .length = 2,
+ .symbolnum = @intCast(sym_index),
+ },
+ }),
+ },
+ .add_subtract_immediate => |add_subtract_immediate| switch (add_subtract_immediate.group.op) {
+ .add => try atom.addReloc(mf, .{
+ .tag = .@"extern",
+ .offset = offset,
+ .target = sym_index,
+ .addend = @bitCast(addend),
+ .type = .pageoff,
+ .meta = .{
+ .pcrel = false,
+ .has_subtractor = false,
+ .length = 2,
+ .symbolnum = @intCast(sym_index),
+ },
+ }),
+ .sub => unreachable,
+ },
+ }
+ },
+ }
+}
+
+const Air = @import("../../Air.zig");
+const assert = std.debug.assert;
+const mir_log = std.log.scoped(.mir);
+const Instruction = @import("encoding.zig").Instruction;
+const InternPool = @import("../../InternPool.zig");
+const link = @import("../../link.zig");
+const Mir = @This();
+const std = @import("std");
+const target_util = @import("../../target.zig");
+const Zcu = @import("../../Zcu.zig");
+const ZigType = @import("../../Type.zig");
src/codegen/aarch64/Select.zig
@@ -0,0 +1,10981 @@
+pt: Zcu.PerThread,
+target: *const std.Target,
+air: Air,
+nav_index: InternPool.Nav.Index,
+
+// Blocks
+def_order: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, void),
+blocks: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, Block),
+loops: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, Loop),
+active_loops: std.ArrayListUnmanaged(Loop.Index),
+loop_live: struct {
+ set: std.AutoArrayHashMapUnmanaged(struct { Loop.Index, Air.Inst.Index }, void),
+ list: std.ArrayListUnmanaged(Air.Inst.Index),
+},
+dom_start: u32,
+dom_len: u32,
+dom: std.ArrayListUnmanaged(DomInt),
+
+// Wip Mir
+saved_registers: std.enums.EnumSet(Register.Alias),
+instructions: std.ArrayListUnmanaged(codegen.aarch64.encoding.Instruction),
+literals: std.ArrayListUnmanaged(u32),
+nav_relocs: std.ArrayListUnmanaged(codegen.aarch64.Mir.Reloc.Nav),
+uav_relocs: std.ArrayListUnmanaged(codegen.aarch64.Mir.Reloc.Uav),
+global_relocs: std.ArrayListUnmanaged(codegen.aarch64.Mir.Reloc.Global),
+literal_relocs: std.ArrayListUnmanaged(codegen.aarch64.Mir.Reloc.Literal),
+
+// Stack Frame
+returns: bool,
+va_list: struct {
+ __stack: Value.Indirect,
+ __gr_top: Value.Indirect,
+ __vr_top: Value.Indirect,
+},
+stack_size: u24,
+stack_align: InternPool.Alignment,
+
+// Value Tracking
+live_registers: LiveRegisters,
+live_values: std.AutoHashMapUnmanaged(Air.Inst.Index, Value.Index),
+values: std.ArrayListUnmanaged(Value),
+
+pub const LiveRegisters = std.enums.EnumArray(Register.Alias, Value.Index);
+
+pub const Block = struct {
+ live_registers: LiveRegisters,
+ target_label: u32,
+
+ pub const main: Air.Inst.Index = @enumFromInt(
+ std.math.maxInt(@typeInfo(Air.Inst.Index).@"enum".tag_type),
+ );
+
+ fn branch(block: *const Block, isel: *Select) !void {
+ if (isel.instructions.items.len > block.target_label) {
+ try isel.emit(.b(@intCast((isel.instructions.items.len + 1 - block.target_label) << 2)));
+ }
+ try isel.merge(&block.live_registers, .{});
+ }
+};
+
+pub const Loop = struct {
+ def_order: u32,
+ dom: u32,
+ depth: u32,
+ live: u32,
+ live_registers: LiveRegisters,
+ repeat_list: u32,
+
+ pub const invalid: Air.Inst.Index = @enumFromInt(
+ std.math.maxInt(@typeInfo(Air.Inst.Index).@"enum".tag_type),
+ );
+
+ pub const Index = enum(u32) {
+ _,
+
+ fn inst(li: Loop.Index, isel: *Select) Air.Inst.Index {
+ return isel.loops.keys()[@intFromEnum(li)];
+ }
+
+ fn get(li: Loop.Index, isel: *Select) *Loop {
+ return &isel.loops.values()[@intFromEnum(li)];
+ }
+ };
+
+ pub const empty_list: u32 = std.math.maxInt(u32);
+
+ fn branch(loop: *Loop, isel: *Select) !void {
+ try isel.instructions.ensureUnusedCapacity(isel.pt.zcu.gpa, 1);
+ const repeat_list_tail = loop.repeat_list;
+ loop.repeat_list = @intCast(isel.instructions.items.len);
+ isel.instructions.appendAssumeCapacity(@bitCast(repeat_list_tail));
+ try isel.merge(&loop.live_registers, .{});
+ }
+};
+
+pub fn deinit(isel: *Select) void {
+ const gpa = isel.pt.zcu.gpa;
+
+ isel.def_order.deinit(gpa);
+ isel.blocks.deinit(gpa);
+ isel.loops.deinit(gpa);
+ isel.active_loops.deinit(gpa);
+ isel.loop_live.set.deinit(gpa);
+ isel.loop_live.list.deinit(gpa);
+ isel.dom.deinit(gpa);
+
+ isel.instructions.deinit(gpa);
+ isel.literals.deinit(gpa);
+ isel.nav_relocs.deinit(gpa);
+ isel.uav_relocs.deinit(gpa);
+ isel.global_relocs.deinit(gpa);
+ isel.literal_relocs.deinit(gpa);
+
+ isel.live_values.deinit(gpa);
+ isel.values.deinit(gpa);
+
+ isel.* = undefined;
+}
+
+pub fn analyze(isel: *Select, air_body: []const Air.Inst.Index) !void {
+ const zcu = isel.pt.zcu;
+ const ip = &zcu.intern_pool;
+ const gpa = zcu.gpa;
+ const air_tags = isel.air.instructions.items(.tag);
+ const air_data = isel.air.instructions.items(.data);
+ var air_body_index: usize = 0;
+ var air_inst_index = air_body[air_body_index];
+ const initial_def_order_len = isel.def_order.count();
+ air_tag: switch (air_tags[@intFromEnum(air_inst_index)]) {
+ .arg,
+ .ret_addr,
+ .frame_addr,
+ .err_return_trace,
+ .save_err_return_trace_index,
+ .runtime_nav_ptr,
+ .c_va_start,
+ => {
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .add,
+ .add_safe,
+ .add_optimized,
+ .add_wrap,
+ .add_sat,
+ .sub,
+ .sub_safe,
+ .sub_optimized,
+ .sub_wrap,
+ .sub_sat,
+ .mul,
+ .mul_safe,
+ .mul_optimized,
+ .mul_wrap,
+ .mul_sat,
+ .div_float,
+ .div_float_optimized,
+ .div_trunc,
+ .div_trunc_optimized,
+ .div_floor,
+ .div_floor_optimized,
+ .div_exact,
+ .div_exact_optimized,
+ .rem,
+ .rem_optimized,
+ .mod,
+ .mod_optimized,
+ .max,
+ .min,
+ .bit_and,
+ .bit_or,
+ .shr,
+ .shr_exact,
+ .shl,
+ .shl_exact,
+ .shl_sat,
+ .xor,
+ .cmp_lt,
+ .cmp_lt_optimized,
+ .cmp_lte,
+ .cmp_lte_optimized,
+ .cmp_eq,
+ .cmp_eq_optimized,
+ .cmp_gte,
+ .cmp_gte_optimized,
+ .cmp_gt,
+ .cmp_gt_optimized,
+ .cmp_neq,
+ .cmp_neq_optimized,
+ .bool_and,
+ .bool_or,
+ .array_elem_val,
+ .slice_elem_val,
+ .ptr_elem_val,
+ => {
+ const bin_op = air_data[@intFromEnum(air_inst_index)].bin_op;
+
+ try isel.analyzeUse(bin_op.lhs);
+ try isel.analyzeUse(bin_op.rhs);
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .ptr_add,
+ .ptr_sub,
+ .add_with_overflow,
+ .sub_with_overflow,
+ .mul_with_overflow,
+ .shl_with_overflow,
+ .slice,
+ .slice_elem_ptr,
+ .ptr_elem_ptr,
+ => {
+ const ty_pl = air_data[@intFromEnum(air_inst_index)].ty_pl;
+ const bin_op = isel.air.extraData(Air.Bin, ty_pl.payload).data;
+
+ try isel.analyzeUse(bin_op.lhs);
+ try isel.analyzeUse(bin_op.rhs);
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .alloc => {
+ const ty = air_data[@intFromEnum(air_inst_index)].ty;
+
+ isel.stack_align = isel.stack_align.maxStrict(ty.ptrAlignment(zcu));
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .inferred_alloc,
+ .inferred_alloc_comptime,
+ .wasm_memory_size,
+ .wasm_memory_grow,
+ .work_item_id,
+ .work_group_size,
+ .work_group_id,
+ => unreachable,
+ .ret_ptr => {
+ const ty = air_data[@intFromEnum(air_inst_index)].ty;
+
+ if (isel.live_values.get(Block.main)) |ret_vi| switch (ret_vi.parent(isel)) {
+ .unallocated, .stack_slot => isel.stack_align = isel.stack_align.maxStrict(ty.ptrAlignment(zcu)),
+ .value, .constant => unreachable,
+ .address => |address_vi| try isel.live_values.putNoClobber(gpa, air_inst_index, address_vi.ref(isel)),
+ };
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .assembly => {
+ const ty_pl = air_data[@intFromEnum(air_inst_index)].ty_pl;
+ const extra = isel.air.extraData(Air.Asm, ty_pl.payload);
+ const operands: []const Air.Inst.Ref = @ptrCast(isel.air.extra.items[extra.end..][0 .. extra.data.flags.outputs_len + extra.data.inputs_len]);
+
+ for (operands) |operand| if (operand != .none) try isel.analyzeUse(operand);
+ if (ty_pl.ty != .void_type) try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .not,
+ .clz,
+ .ctz,
+ .popcount,
+ .byte_swap,
+ .bit_reverse,
+ .abs,
+ .load,
+ .fptrunc,
+ .fpext,
+ .intcast,
+ .intcast_safe,
+ .trunc,
+ .optional_payload,
+ .optional_payload_ptr,
+ .optional_payload_ptr_set,
+ .wrap_optional,
+ .unwrap_errunion_payload,
+ .unwrap_errunion_err,
+ .unwrap_errunion_payload_ptr,
+ .unwrap_errunion_err_ptr,
+ .errunion_payload_ptr_set,
+ .wrap_errunion_payload,
+ .wrap_errunion_err,
+ .struct_field_ptr_index_0,
+ .struct_field_ptr_index_1,
+ .struct_field_ptr_index_2,
+ .struct_field_ptr_index_3,
+ .get_union_tag,
+ .ptr_slice_len_ptr,
+ .ptr_slice_ptr_ptr,
+ .array_to_slice,
+ .int_from_float,
+ .int_from_float_optimized,
+ .int_from_float_safe,
+ .int_from_float_optimized_safe,
+ .float_from_int,
+ .splat,
+ .error_set_has_value,
+ .addrspace_cast,
+ .c_va_arg,
+ .c_va_copy,
+ => {
+ const ty_op = air_data[@intFromEnum(air_inst_index)].ty_op;
+
+ try isel.analyzeUse(ty_op.operand);
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .bitcast => {
+ const ty_op = air_data[@intFromEnum(air_inst_index)].ty_op;
+ maybe_noop: {
+ if (ty_op.ty.toInterned().? != isel.air.typeOf(ty_op.operand, ip).toIntern()) break :maybe_noop;
+ if (true) break :maybe_noop;
+ if (ty_op.operand.toIndex()) |src_air_inst_index| {
+ if (isel.hints.get(src_air_inst_index)) |hint_vpsi| {
+ try isel.hints.putNoClobber(gpa, air_inst_index, hint_vpsi);
+ }
+ }
+ }
+ try isel.analyzeUse(ty_op.operand);
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ inline .block, .dbg_inline_block => |air_tag| {
+ const ty_pl = air_data[@intFromEnum(air_inst_index)].ty_pl;
+ const extra = isel.air.extraData(switch (air_tag) {
+ else => comptime unreachable,
+ .block => Air.Block,
+ .dbg_inline_block => Air.DbgInlineBlock,
+ }, ty_pl.payload);
+ const result_ty = ty_pl.ty.toInterned().?;
+
+ if (result_ty == .noreturn_type) {
+ try isel.analyze(@ptrCast(isel.air.extra.items[extra.end..][0..extra.data.body_len]));
+
+ air_body_index += 1;
+ break :air_tag;
+ }
+
+ assert(!(try isel.blocks.getOrPut(gpa, air_inst_index)).found_existing);
+ try isel.analyze(@ptrCast(isel.air.extra.items[extra.end..][0..extra.data.body_len]));
+ const block_entry = isel.blocks.pop().?;
+ assert(block_entry.key == air_inst_index);
+
+ if (result_ty != .void_type) try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .loop => {
+ const ty_pl = air_data[@intFromEnum(air_inst_index)].ty_pl;
+ const extra = isel.air.extraData(Air.Block, ty_pl.payload);
+
+ const initial_dom_start = isel.dom_start;
+ const initial_dom_len = isel.dom_len;
+ isel.dom_start = @intCast(isel.dom.items.len);
+ isel.dom_len = @intCast(isel.blocks.count());
+ try isel.active_loops.append(gpa, @enumFromInt(isel.loops.count()));
+ try isel.loops.putNoClobber(gpa, air_inst_index, .{
+ .def_order = @intCast(isel.def_order.count()),
+ .dom = isel.dom_start,
+ .depth = isel.dom_len,
+ .live = 0,
+ .live_registers = undefined,
+ .repeat_list = undefined,
+ });
+ try isel.dom.appendNTimes(gpa, 0, std.math.divCeil(usize, isel.dom_len, @bitSizeOf(DomInt)) catch unreachable);
+ try isel.analyze(@ptrCast(isel.air.extra.items[extra.end..][0..extra.data.body_len]));
+ for (
+ isel.dom.items[initial_dom_start..].ptr,
+ isel.dom.items[isel.dom_start..][0 .. std.math.divCeil(usize, initial_dom_len, @bitSizeOf(DomInt)) catch unreachable],
+ ) |*initial_dom, loop_dom| initial_dom.* |= loop_dom;
+ isel.dom_start = initial_dom_start;
+ isel.dom_len = initial_dom_len;
+ assert(isel.active_loops.pop().?.inst(isel) == air_inst_index);
+
+ air_body_index += 1;
+ },
+ .repeat, .trap, .unreach => air_body_index += 1,
+ .br => {
+ const br = air_data[@intFromEnum(air_inst_index)].br;
+ const block_index = isel.blocks.getIndex(br.block_inst).?;
+ if (block_index < isel.dom_len) isel.dom.items[isel.dom_start + block_index / @bitSizeOf(DomInt)] |= @as(DomInt, 1) << @truncate(block_index);
+ try isel.analyzeUse(br.operand);
+
+ air_body_index += 1;
+ },
+ .breakpoint,
+ .dbg_stmt,
+ .dbg_empty_stmt,
+ .dbg_var_ptr,
+ .dbg_var_val,
+ .dbg_arg_inline,
+ => {
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .call,
+ .call_always_tail,
+ .call_never_tail,
+ .call_never_inline,
+ => {
+ const pl_op = air_data[@intFromEnum(air_inst_index)].pl_op;
+ const extra = isel.air.extraData(Air.Call, pl_op.payload);
+ const args: []const Air.Inst.Ref = @ptrCast(isel.air.extra.items[extra.end..][0..extra.data.args_len]);
+ isel.saved_registers.insert(.lr);
+
+ try isel.analyzeUse(pl_op.operand);
+ var param_it: CallAbiIterator = .init;
+ for (args) |arg| {
+ const restore_values_len = isel.values.items.len;
+ defer isel.values.shrinkRetainingCapacity(restore_values_len);
+ const param_vi = try param_it.param(isel, isel.air.typeOf(arg, ip)) orelse continue;
+ const param_parent = param_vi.parent(isel);
+ switch (switch (param_parent) {
+ .unallocated, .stack_slot => param_parent,
+ .value, .constant => unreachable,
+ .address => |address_vi| address_vi.parent(isel),
+ }) {
+ .unallocated => {},
+ .stack_slot => |stack_slot| {
+ assert(stack_slot.base == .sp);
+ isel.stack_size = @max(isel.stack_size, stack_slot.offset);
+ },
+ .value, .constant, .address => unreachable,
+ }
+
+ try isel.analyzeUse(arg);
+ }
+
+ var ret_it: CallAbiIterator = .init;
+ if (try ret_it.ret(isel, isel.air.typeOfIndex(air_inst_index, ip))) |ret_vi| {
+ tracking_log.debug("${d} <- %{d}", .{ @intFromEnum(ret_vi), @intFromEnum(air_inst_index) });
+ switch (ret_vi.parent(isel)) {
+ .unallocated, .stack_slot => {},
+ .value, .constant => unreachable,
+ .address => |address_vi| {
+ defer address_vi.deref(isel);
+ const ret_value = ret_vi.get(isel);
+ ret_value.flags.parent_tag = .unallocated;
+ ret_value.parent_payload = .{ .unallocated = {} };
+ },
+ }
+ try isel.live_values.putNoClobber(gpa, air_inst_index, ret_vi);
+
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+ }
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .sqrt,
+ .sin,
+ .cos,
+ .tan,
+ .exp,
+ .exp2,
+ .log,
+ .log2,
+ .log10,
+ .floor,
+ .ceil,
+ .round,
+ .trunc_float,
+ .neg,
+ .neg_optimized,
+ .is_null,
+ .is_non_null,
+ .is_null_ptr,
+ .is_non_null_ptr,
+ .is_err,
+ .is_non_err,
+ .is_err_ptr,
+ .is_non_err_ptr,
+ .is_named_enum_value,
+ .tag_name,
+ .error_name,
+ .cmp_lt_errors_len,
+ => {
+ const un_op = air_data[@intFromEnum(air_inst_index)].un_op;
+
+ try isel.analyzeUse(un_op);
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .cmp_vector, .cmp_vector_optimized => {
+ const ty_pl = air_data[@intFromEnum(air_inst_index)].ty_pl;
+ const extra = isel.air.extraData(Air.VectorCmp, ty_pl.payload).data;
+
+ try isel.analyzeUse(extra.lhs);
+ try isel.analyzeUse(extra.rhs);
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .cond_br => {
+ const pl_op = air_data[@intFromEnum(air_inst_index)].pl_op;
+ const extra = isel.air.extraData(Air.CondBr, pl_op.payload);
+
+ try isel.analyzeUse(pl_op.operand);
+
+ try isel.analyze(@ptrCast(isel.air.extra.items[extra.end..][0..extra.data.then_body_len]));
+ try isel.analyze(@ptrCast(isel.air.extra.items[extra.end + extra.data.then_body_len ..][0..extra.data.else_body_len]));
+
+ air_body_index += 1;
+ },
+ .switch_br => {
+ const switch_br = isel.air.unwrapSwitch(air_inst_index);
+
+ try isel.analyzeUse(switch_br.operand);
+
+ var cases_it = switch_br.iterateCases();
+ while (cases_it.next()) |case| try isel.analyze(case.body);
+ if (switch_br.else_body_len > 0) try isel.analyze(cases_it.elseBody());
+
+ air_body_index += 1;
+ },
+ .loop_switch_br => {
+ const switch_br = isel.air.unwrapSwitch(air_inst_index);
+
+ const initial_dom_start = isel.dom_start;
+ const initial_dom_len = isel.dom_len;
+ isel.dom_start = @intCast(isel.dom.items.len);
+ isel.dom_len = @intCast(isel.blocks.count());
+ try isel.active_loops.append(gpa, @enumFromInt(isel.loops.count()));
+ try isel.loops.putNoClobber(gpa, air_inst_index, .{
+ .def_order = @intCast(isel.def_order.count()),
+ .dom = isel.dom_start,
+ .depth = isel.dom_len,
+ .live = 0,
+ .live_registers = undefined,
+ .repeat_list = undefined,
+ });
+ try isel.dom.appendNTimes(gpa, 0, std.math.divCeil(usize, isel.dom_len, @bitSizeOf(DomInt)) catch unreachable);
+
+ var cases_it = switch_br.iterateCases();
+ while (cases_it.next()) |case| try isel.analyze(case.body);
+ if (switch_br.else_body_len > 0) try isel.analyze(cases_it.elseBody());
+
+ for (
+ isel.dom.items[initial_dom_start..].ptr,
+ isel.dom.items[isel.dom_start..][0 .. std.math.divCeil(usize, initial_dom_len, @bitSizeOf(DomInt)) catch unreachable],
+ ) |*initial_dom, loop_dom| initial_dom.* |= loop_dom;
+ isel.dom_start = initial_dom_start;
+ isel.dom_len = initial_dom_len;
+ assert(isel.active_loops.pop().?.inst(isel) == air_inst_index);
+
+ air_body_index += 1;
+ },
+ .switch_dispatch => {
+ const br = air_data[@intFromEnum(air_inst_index)].br;
+
+ try isel.analyzeUse(br.operand);
+
+ air_body_index += 1;
+ },
+ .@"try", .try_cold, .try_ptr, .try_ptr_cold => {
+ const pl_op = air_data[@intFromEnum(air_inst_index)].pl_op;
+ const extra = isel.air.extraData(Air.Try, pl_op.payload);
+
+ try isel.analyzeUse(pl_op.operand);
+ try isel.analyze(@ptrCast(isel.air.extra.items[extra.end..][0..extra.data.body_len]));
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .ret, .ret_safe, .ret_load => {
+ const un_op = air_data[@intFromEnum(air_inst_index)].un_op;
+ isel.returns = true;
+
+ const block_index = 0;
+ assert(isel.blocks.keys()[block_index] == Block.main);
+ if (isel.dom_len > 0) isel.dom.items[isel.dom_start] |= 1 << block_index;
+
+ try isel.analyzeUse(un_op);
+
+ air_body_index += 1;
+ },
+ .store,
+ .store_safe,
+ .set_union_tag,
+ .memset,
+ .memset_safe,
+ .memcpy,
+ .memmove,
+ .atomic_store_unordered,
+ .atomic_store_monotonic,
+ .atomic_store_release,
+ .atomic_store_seq_cst,
+ => {
+ const bin_op = air_data[@intFromEnum(air_inst_index)].bin_op;
+
+ try isel.analyzeUse(bin_op.lhs);
+ try isel.analyzeUse(bin_op.rhs);
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .struct_field_ptr, .struct_field_val => {
+ const ty_pl = air_data[@intFromEnum(air_inst_index)].ty_pl;
+ const extra = isel.air.extraData(Air.StructField, ty_pl.payload).data;
+
+ try isel.analyzeUse(extra.struct_operand);
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .slice_len => {
+ const ty_op = air_data[@intFromEnum(air_inst_index)].ty_op;
+
+ try isel.analyzeUse(ty_op.operand);
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ const slice_vi = try isel.use(ty_op.operand);
+ var len_part_it = slice_vi.field(isel.air.typeOf(ty_op.operand, ip), 8, 8);
+ if (try len_part_it.only(isel)) |len_part_vi|
+ try isel.live_values.putNoClobber(gpa, air_inst_index, len_part_vi.ref(isel));
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .slice_ptr => {
+ const ty_op = air_data[@intFromEnum(air_inst_index)].ty_op;
+
+ try isel.analyzeUse(ty_op.operand);
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ const slice_vi = try isel.use(ty_op.operand);
+ var ptr_part_it = slice_vi.field(isel.air.typeOf(ty_op.operand, ip), 0, 8);
+ if (try ptr_part_it.only(isel)) |ptr_part_vi|
+ try isel.live_values.putNoClobber(gpa, air_inst_index, ptr_part_vi.ref(isel));
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .reduce, .reduce_optimized => {
+ const reduce = air_data[@intFromEnum(air_inst_index)].reduce;
+
+ try isel.analyzeUse(reduce.operand);
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .shuffle_one => {
+ const extra = isel.air.unwrapShuffleOne(zcu, air_inst_index);
+
+ try isel.analyzeUse(extra.operand);
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .shuffle_two => {
+ const extra = isel.air.unwrapShuffleTwo(zcu, air_inst_index);
+
+ try isel.analyzeUse(extra.operand_a);
+ try isel.analyzeUse(extra.operand_b);
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .select, .mul_add => {
+ const pl_op = air_data[@intFromEnum(air_inst_index)].pl_op;
+ const bin_op = isel.air.extraData(Air.Bin, pl_op.payload).data;
+
+ try isel.analyzeUse(pl_op.operand);
+ try isel.analyzeUse(bin_op.lhs);
+ try isel.analyzeUse(bin_op.rhs);
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .cmpxchg_weak, .cmpxchg_strong => {
+ const ty_pl = air_data[@intFromEnum(air_inst_index)].ty_pl;
+ const extra = isel.air.extraData(Air.Cmpxchg, ty_pl.payload).data;
+
+ try isel.analyzeUse(extra.ptr);
+ try isel.analyzeUse(extra.expected_value);
+ try isel.analyzeUse(extra.new_value);
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .atomic_load => {
+ const atomic_load = air_data[@intFromEnum(air_inst_index)].atomic_load;
+
+ try isel.analyzeUse(atomic_load.ptr);
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .atomic_rmw => {
+ const pl_op = air_data[@intFromEnum(air_inst_index)].pl_op;
+ const extra = isel.air.extraData(Air.AtomicRmw, pl_op.payload).data;
+
+ try isel.analyzeUse(extra.operand);
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .aggregate_init => {
+ const ty_pl = air_data[@intFromEnum(air_inst_index)].ty_pl;
+ const elements: []const Air.Inst.Ref = @ptrCast(isel.air.extra.items[ty_pl.payload..][0..@intCast(ty_pl.ty.toType().arrayLen(zcu))]);
+
+ for (elements) |element| try isel.analyzeUse(element);
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .union_init => {
+ const ty_pl = air_data[@intFromEnum(air_inst_index)].ty_pl;
+ const extra = isel.air.extraData(Air.UnionInit, ty_pl.payload).data;
+
+ try isel.analyzeUse(extra.init);
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .prefetch => {
+ const prefetch = air_data[@intFromEnum(air_inst_index)].prefetch;
+
+ try isel.analyzeUse(prefetch.ptr);
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .field_parent_ptr => {
+ const ty_pl = air_data[@intFromEnum(air_inst_index)].ty_pl;
+ const extra = isel.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
+
+ try isel.analyzeUse(extra.field_ptr);
+ try isel.def_order.putNoClobber(gpa, air_inst_index, {});
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .set_err_return_trace, .c_va_end => {
+ const un_op = air_data[@intFromEnum(air_inst_index)].un_op;
+
+ try isel.analyzeUse(un_op);
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ .vector_store_elem => {
+ const vector_store_elem = air_data[@intFromEnum(air_inst_index)].vector_store_elem;
+ const bin_op = isel.air.extraData(Air.Bin, vector_store_elem.payload).data;
+
+ try isel.analyzeUse(vector_store_elem.vector_ptr);
+ try isel.analyzeUse(bin_op.lhs);
+ try isel.analyzeUse(bin_op.rhs);
+
+ air_body_index += 1;
+ air_inst_index = air_body[air_body_index];
+ continue :air_tag air_tags[@intFromEnum(air_inst_index)];
+ },
+ }
+ assert(air_body_index == air_body.len);
+ isel.def_order.shrinkRetainingCapacity(initial_def_order_len);
+}
+
+fn analyzeUse(isel: *Select, air_ref: Air.Inst.Ref) !void {
+ const air_inst_index = air_ref.toIndex() orelse return;
+ const def_order_index = isel.def_order.getIndex(air_inst_index).?;
+
+ // Loop liveness
+ var active_loop_index = isel.active_loops.items.len;
+ while (active_loop_index > 0) {
+ const prev_active_loop_index = active_loop_index - 1;
+ const active_loop = isel.active_loops.items[prev_active_loop_index];
+ if (def_order_index >= active_loop.get(isel).def_order) break;
+ active_loop_index = prev_active_loop_index;
+ }
+ if (active_loop_index < isel.active_loops.items.len) {
+ const active_loop = isel.active_loops.items[active_loop_index];
+ const loop_live_gop =
+ try isel.loop_live.set.getOrPut(isel.pt.zcu.gpa, .{ active_loop, air_inst_index });
+ if (!loop_live_gop.found_existing) active_loop.get(isel).live += 1;
+ }
+}
+
+pub fn finishAnalysis(isel: *Select) !void {
+ const gpa = isel.pt.zcu.gpa;
+
+ // Loop Liveness
+ if (isel.loops.count() > 0) {
+ try isel.loops.ensureUnusedCapacity(gpa, 1);
+
+ const loop_live_len: u32 = @intCast(isel.loop_live.set.count());
+ if (loop_live_len > 0) {
+ try isel.loop_live.list.resize(gpa, loop_live_len);
+
+ const loops = isel.loops.values();
+ for (loops[1..], loops[0 .. loops.len - 1]) |*loop, prev_loop| loop.live += prev_loop.live;
+ assert(loops[loops.len - 1].live == loop_live_len);
+
+ for (isel.loop_live.set.keys()) |entry| {
+ const loop, const inst = entry;
+ const loop_live = &loop.get(isel).live;
+ loop_live.* -= 1;
+ isel.loop_live.list.items[loop_live.*] = inst;
+ }
+ assert(loops[0].live == 0);
+ }
+
+ const invalid_gop = isel.loops.getOrPutAssumeCapacity(Loop.invalid);
+ assert(!invalid_gop.found_existing);
+ invalid_gop.value_ptr.live = loop_live_len;
+ }
+}
+
+pub fn body(isel: *Select, air_body: []const Air.Inst.Index) !void {
+ const zcu = isel.pt.zcu;
+ const ip = &zcu.intern_pool;
+ const gpa = zcu.gpa;
+
+ {
+ var live_reg_it = isel.live_registers.iterator();
+ while (live_reg_it.next()) |live_reg_entry| switch (live_reg_entry.value.*) {
+ _ => {
+ const ra = &live_reg_entry.value.get(isel).location_payload.small.register;
+ assert(ra.* == live_reg_entry.key);
+ ra.* = .zr;
+ live_reg_entry.value.* = .free;
+ },
+ .allocating => live_reg_entry.value.* = .free,
+ .free => {},
+ };
+ }
+
+ var air: struct {
+ isel: *Select,
+ tag_items: []const Air.Inst.Tag,
+ data_items: []const Air.Inst.Data,
+ body: []const Air.Inst.Index,
+ body_index: u32,
+ inst_index: Air.Inst.Index,
+
+ fn tag(it: *@This(), inst_index: Air.Inst.Index) Air.Inst.Tag {
+ return it.tag_items[@intFromEnum(inst_index)];
+ }
+
+ fn data(it: *@This(), inst_index: Air.Inst.Index) Air.Inst.Data {
+ return it.data_items[@intFromEnum(inst_index)];
+ }
+
+ fn next(it: *@This()) ?Air.Inst.Tag {
+ if (it.body_index == 0) {
+ @branchHint(.unlikely);
+ return null;
+ }
+ it.body_index -= 1;
+ it.inst_index = it.body[it.body_index];
+ wip_mir_log.debug("{f}", .{it.fmtAir(it.inst_index)});
+ return it.tag(it.inst_index);
+ }
+
+ fn fmtAir(it: @This(), inst: Air.Inst.Index) struct {
+ isel: *Select,
+ inst: Air.Inst.Index,
+ pub fn format(fmt_air: @This(), writer: *std.Io.Writer) std.Io.Writer.Error!void {
+ fmt_air.isel.air.writeInst(writer, fmt_air.inst, fmt_air.isel.pt, null);
+ }
+ } {
+ return .{ .isel = it.isel, .inst = inst };
+ }
+ } = .{
+ .isel = isel,
+ .tag_items = isel.air.instructions.items(.tag),
+ .data_items = isel.air.instructions.items(.data),
+ .body = air_body,
+ .body_index = @intCast(air_body.len),
+ .inst_index = undefined,
+ };
+ air_tag: switch (air.next().?) {
+ else => |air_tag| return isel.fail("unimplemented {s}", .{@tagName(air_tag)}),
+ .arg => {
+ const arg_vi = isel.live_values.fetchRemove(air.inst_index).?.value;
+ defer arg_vi.deref(isel);
+ switch (arg_vi.parent(isel)) {
+ .unallocated, .stack_slot => if (arg_vi.hint(isel)) |arg_ra| {
+ try arg_vi.defLiveIn(isel, arg_ra, comptime &.initFill(.free));
+ } else {
+ var arg_part_it = arg_vi.parts(isel);
+ while (arg_part_it.next()) |arg_part| {
+ try arg_part.defLiveIn(isel, arg_part.hint(isel).?, comptime &.initFill(.free));
+ }
+ },
+ .value, .constant => unreachable,
+ .address => |address_vi| try address_vi.defLiveIn(isel, address_vi.hint(isel).?, comptime &.initFill(.free)),
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .add, .add_optimized, .add_wrap, .sub, .sub_optimized, .sub_wrap => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| unused: {
+ defer res_vi.value.deref(isel);
+
+ const bin_op = air.data(air.inst_index).bin_op;
+ const ty = isel.air.typeOf(bin_op.lhs, ip);
+ if (!ty.isRuntimeFloat()) try res_vi.value.addOrSubtract(isel, ty, try isel.use(bin_op.lhs), switch (air_tag) {
+ else => unreachable,
+ .add, .add_wrap => .add,
+ .sub, .sub_wrap => .sub,
+ }, try isel.use(bin_op.rhs), .{ .wrap = switch (air_tag) {
+ else => unreachable,
+ .add, .sub => false,
+ .add_wrap, .sub_wrap => true,
+ } }) else switch (ty.floatBits(isel.target)) {
+ else => unreachable,
+ 16, 32, 64 => |bits| {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const need_fcvt = switch (bits) {
+ else => unreachable,
+ 16 => !isel.target.cpu.has(.aarch64, .fullfp16),
+ 32, 64 => false,
+ };
+ if (need_fcvt) try isel.emit(.fcvt(res_ra.h(), res_ra.s()));
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ const lhs_ra = if (need_fcvt) try isel.allocVecReg() else lhs_mat.ra;
+ defer if (need_fcvt) isel.freeReg(lhs_ra);
+ const rhs_ra = if (need_fcvt) try isel.allocVecReg() else rhs_mat.ra;
+ defer if (need_fcvt) isel.freeReg(rhs_ra);
+ try isel.emit(bits: switch (bits) {
+ else => unreachable,
+ 16 => if (need_fcvt) continue :bits 32 else switch (air_tag) {
+ else => unreachable,
+ .add, .add_optimized => .fadd(res_ra.h(), lhs_ra.h(), rhs_ra.h()),
+ .sub, .sub_optimized => .fsub(res_ra.h(), lhs_ra.h(), rhs_ra.h()),
+ },
+ 32 => switch (air_tag) {
+ else => unreachable,
+ .add, .add_optimized => .fadd(res_ra.s(), lhs_ra.s(), rhs_ra.s()),
+ .sub, .sub_optimized => .fsub(res_ra.s(), lhs_ra.s(), rhs_ra.s()),
+ },
+ 64 => switch (air_tag) {
+ else => unreachable,
+ .add, .add_optimized => .fadd(res_ra.d(), lhs_ra.d(), rhs_ra.d()),
+ .sub, .sub_optimized => .fsub(res_ra.d(), lhs_ra.d(), rhs_ra.d()),
+ },
+ });
+ if (need_fcvt) {
+ try isel.emit(.fcvt(rhs_ra.s(), rhs_mat.ra.h()));
+ try isel.emit(.fcvt(lhs_ra.s(), lhs_mat.ra.h()));
+ }
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ },
+ 80, 128 => |bits| {
+ try call.prepareReturn(isel);
+ switch (bits) {
+ else => unreachable,
+ 16, 32, 64, 128 => try call.returnLiveIn(isel, res_vi.value, .v0),
+ 80 => {
+ var res_hi16_it = res_vi.value.field(ty, 8, 8);
+ const res_hi16_vi = try res_hi16_it.only(isel);
+ try call.returnLiveIn(isel, res_hi16_vi.?, .r1);
+ var res_lo64_it = res_vi.value.field(ty, 0, 8);
+ const res_lo64_vi = try res_lo64_it.only(isel);
+ try call.returnLiveIn(isel, res_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishReturn(isel);
+
+ try call.prepareCallee(isel);
+ try isel.global_relocs.append(gpa, .{
+ .global = switch (air_tag) {
+ else => unreachable,
+ .add, .add_optimized => switch (bits) {
+ else => unreachable,
+ 16 => "__addhf3",
+ 32 => "__addsf3",
+ 64 => "__adddf3",
+ 80 => "__addxf3",
+ 128 => "__addtf3",
+ },
+ .sub, .sub_optimized => switch (bits) {
+ else => unreachable,
+ 16 => "__subhf3",
+ 32 => "__subsf3",
+ 64 => "__subdf3",
+ 80 => "__subxf3",
+ 128 => "__subtf3",
+ },
+ },
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+ try call.finishCallee(isel);
+
+ try call.prepareParams(isel);
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ switch (bits) {
+ else => unreachable,
+ 16, 32, 64, 128 => {
+ try call.paramLiveOut(isel, rhs_vi, .v1);
+ try call.paramLiveOut(isel, lhs_vi, .v0);
+ },
+ 80 => {
+ var rhs_hi16_it = rhs_vi.field(ty, 8, 8);
+ const rhs_hi16_vi = try rhs_hi16_it.only(isel);
+ try call.paramLiveOut(isel, rhs_hi16_vi.?, .r3);
+ var rhs_lo64_it = rhs_vi.field(ty, 0, 8);
+ const rhs_lo64_vi = try rhs_lo64_it.only(isel);
+ try call.paramLiveOut(isel, rhs_lo64_vi.?, .r2);
+ var lhs_hi16_it = lhs_vi.field(ty, 8, 8);
+ const lhs_hi16_vi = try lhs_hi16_it.only(isel);
+ try call.paramLiveOut(isel, lhs_hi16_vi.?, .r1);
+ var lhs_lo64_it = lhs_vi.field(ty, 0, 8);
+ const lhs_lo64_vi = try lhs_lo64_it.only(isel);
+ try call.paramLiveOut(isel, lhs_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishParams(isel);
+ },
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .add_sat, .sub_sat => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| unused: {
+ defer res_vi.value.deref(isel);
+
+ const bin_op = air.data(air.inst_index).bin_op;
+ const ty = isel.air.typeOf(bin_op.lhs, ip);
+ if (!ty.isAbiInt(zcu)) return isel.fail("bad {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+ const int_info = ty.intInfo(zcu);
+ switch (int_info.bits) {
+ 0 => unreachable,
+ 32, 64 => |bits| switch (int_info.signedness) {
+ .signed => return isel.fail("bad {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) }),
+ .unsigned => {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ const unsat_res_ra = try isel.allocIntReg();
+ defer isel.freeReg(unsat_res_ra);
+ switch (air_tag) {
+ else => unreachable,
+ .add_sat => switch (bits) {
+ else => unreachable,
+ 32 => {
+ try isel.emit(.csinv(res_ra.w(), unsat_res_ra.w(), .wzr, .invert(.cs)));
+ try isel.emit(.adds(unsat_res_ra.w(), lhs_mat.ra.w(), .{ .register = rhs_mat.ra.w() }));
+ },
+ 64 => {
+ try isel.emit(.csinv(res_ra.x(), unsat_res_ra.x(), .xzr, .invert(.cs)));
+ try isel.emit(.adds(unsat_res_ra.x(), lhs_mat.ra.x(), .{ .register = rhs_mat.ra.x() }));
+ },
+ },
+ .sub_sat => switch (bits) {
+ else => unreachable,
+ 32 => {
+ try isel.emit(.csel(res_ra.w(), unsat_res_ra.w(), .wzr, .invert(.cc)));
+ try isel.emit(.subs(unsat_res_ra.w(), lhs_mat.ra.w(), .{ .register = rhs_mat.ra.w() }));
+ },
+ 64 => {
+ try isel.emit(.csel(res_ra.x(), unsat_res_ra.x(), .xzr, .invert(.cc)));
+ try isel.emit(.subs(unsat_res_ra.x(), lhs_mat.ra.x(), .{ .register = rhs_mat.ra.x() }));
+ },
+ },
+ }
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ },
+ },
+ else => return isel.fail("too big {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) }),
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .mul, .mul_optimized, .mul_wrap => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| unused: {
+ defer res_vi.value.deref(isel);
+
+ const bin_op = air.data(air.inst_index).bin_op;
+ const ty = isel.air.typeOf(bin_op.lhs, ip);
+ if (!ty.isRuntimeFloat()) {
+ if (!ty.isAbiInt(zcu)) return isel.fail("bad {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+ const int_info = ty.intInfo(zcu);
+ switch (int_info.bits) {
+ 0 => unreachable,
+ 1 => {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ switch (int_info.signedness) {
+ .signed => switch (air_tag) {
+ else => unreachable,
+ .mul => break :unused try isel.emit(.orr(res_ra.w(), .wzr, .{ .register = .wzr })),
+ .mul_wrap => {},
+ },
+ .unsigned => {},
+ }
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ try isel.emit(.@"and"(res_ra.w(), lhs_mat.ra.w(), .{ .register = rhs_mat.ra.w() }));
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ },
+ 2...32 => |bits| {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ switch (air_tag) {
+ else => unreachable,
+ .mul => {},
+ .mul_wrap => switch (bits) {
+ else => unreachable,
+ 1...31 => try isel.emit(switch (int_info.signedness) {
+ .signed => .sbfm(res_ra.w(), res_ra.w(), .{
+ .N = .word,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }),
+ .unsigned => .ubfm(res_ra.w(), res_ra.w(), .{
+ .N = .word,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }),
+ }),
+ 32 => {},
+ },
+ }
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ try isel.emit(.madd(res_ra.w(), lhs_mat.ra.w(), rhs_mat.ra.w(), .wzr));
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ },
+ 33...64 => |bits| {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ switch (air_tag) {
+ else => unreachable,
+ .mul => {},
+ .mul_wrap => switch (bits) {
+ else => unreachable,
+ 33...63 => try isel.emit(switch (int_info.signedness) {
+ .signed => .sbfm(res_ra.x(), res_ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }),
+ .unsigned => .ubfm(res_ra.x(), res_ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }),
+ }),
+ 64 => {},
+ },
+ }
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ try isel.emit(.madd(res_ra.x(), lhs_mat.ra.x(), rhs_mat.ra.x(), .xzr));
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ },
+ 65...128 => |bits| {
+ var res_hi64_it = res_vi.value.field(ty, 8, 8);
+ const res_hi64_vi = try res_hi64_it.only(isel);
+ const res_hi64_ra = try res_hi64_vi.?.defReg(isel);
+ var res_lo64_it = res_vi.value.field(ty, 0, 8);
+ const res_lo64_vi = try res_lo64_it.only(isel);
+ const res_lo64_ra = try res_lo64_vi.?.defReg(isel);
+ if (res_hi64_ra == null and res_lo64_ra == null) break :unused;
+ if (res_hi64_ra) |res_ra| switch (air_tag) {
+ else => unreachable,
+ .mul => {},
+ .mul_wrap => switch (bits) {
+ else => unreachable,
+ 65...127 => try isel.emit(switch (int_info.signedness) {
+ .signed => .sbfm(res_ra.x(), res_ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }),
+ .unsigned => .ubfm(res_ra.x(), res_ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }),
+ }),
+ 128 => {},
+ },
+ };
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_lo64_mat, const rhs_lo64_mat = lo64_mat: {
+ const res_hi64_lock: RegLock = if (res_hi64_ra != null and res_lo64_ra != null)
+ isel.lockReg(res_hi64_ra.?)
+ else
+ .empty;
+ defer res_hi64_lock.unlock(isel);
+
+ var rhs_lo64_it = rhs_vi.field(ty, 0, 8);
+ const rhs_lo64_vi = try rhs_lo64_it.only(isel);
+ const rhs_lo64_mat = try rhs_lo64_vi.?.matReg(isel);
+ var lhs_lo64_it = lhs_vi.field(ty, 0, 8);
+ const lhs_lo64_vi = try lhs_lo64_it.only(isel);
+ const lhs_lo64_mat = try lhs_lo64_vi.?.matReg(isel);
+ break :lo64_mat .{ lhs_lo64_mat, rhs_lo64_mat };
+ };
+ if (res_lo64_ra) |res_ra| try isel.emit(.madd(res_ra.x(), lhs_lo64_mat.ra.x(), rhs_lo64_mat.ra.x(), .xzr));
+ if (res_hi64_ra) |res_ra| {
+ var rhs_hi64_it = rhs_vi.field(ty, 8, 8);
+ const rhs_hi64_vi = try rhs_hi64_it.only(isel);
+ const rhs_hi64_mat = try rhs_hi64_vi.?.matReg(isel);
+ var lhs_hi64_it = lhs_vi.field(ty, 8, 8);
+ const lhs_hi64_vi = try lhs_hi64_it.only(isel);
+ const lhs_hi64_mat = try lhs_hi64_vi.?.matReg(isel);
+ const acc_ra = try isel.allocIntReg();
+ defer isel.freeReg(acc_ra);
+ try isel.emit(.madd(res_ra.x(), lhs_hi64_mat.ra.x(), rhs_lo64_mat.ra.x(), acc_ra.x()));
+ try isel.emit(.madd(acc_ra.x(), lhs_lo64_mat.ra.x(), rhs_hi64_mat.ra.x(), acc_ra.x()));
+ try isel.emit(.umulh(acc_ra.x(), lhs_lo64_mat.ra.x(), rhs_lo64_mat.ra.x()));
+ try rhs_hi64_mat.finish(isel);
+ try lhs_hi64_mat.finish(isel);
+ }
+ try rhs_lo64_mat.finish(isel);
+ try lhs_lo64_mat.finish(isel);
+ },
+ else => return isel.fail("too big {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) }),
+ }
+ } else switch (ty.floatBits(isel.target)) {
+ else => unreachable,
+ 16, 32, 64 => |bits| {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const need_fcvt = switch (bits) {
+ else => unreachable,
+ 16 => !isel.target.cpu.has(.aarch64, .fullfp16),
+ 32, 64 => false,
+ };
+ if (need_fcvt) try isel.emit(.fcvt(res_ra.h(), res_ra.s()));
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ const lhs_ra = if (need_fcvt) try isel.allocVecReg() else lhs_mat.ra;
+ defer if (need_fcvt) isel.freeReg(lhs_ra);
+ const rhs_ra = if (need_fcvt) try isel.allocVecReg() else rhs_mat.ra;
+ defer if (need_fcvt) isel.freeReg(rhs_ra);
+ try isel.emit(bits: switch (bits) {
+ else => unreachable,
+ 16 => if (need_fcvt)
+ continue :bits 32
+ else
+ .fmul(res_ra.h(), lhs_ra.h(), rhs_ra.h()),
+ 32 => .fmul(res_ra.s(), lhs_ra.s(), rhs_ra.s()),
+ 64 => .fmul(res_ra.d(), lhs_ra.d(), rhs_ra.d()),
+ });
+ if (need_fcvt) {
+ try isel.emit(.fcvt(rhs_ra.s(), rhs_mat.ra.h()));
+ try isel.emit(.fcvt(lhs_ra.s(), lhs_mat.ra.h()));
+ }
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ },
+ 80, 128 => |bits| {
+ try call.prepareReturn(isel);
+ switch (bits) {
+ else => unreachable,
+ 16, 32, 64, 128 => try call.returnLiveIn(isel, res_vi.value, .v0),
+ 80 => {
+ var res_hi16_it = res_vi.value.field(ty, 8, 8);
+ const res_hi16_vi = try res_hi16_it.only(isel);
+ try call.returnLiveIn(isel, res_hi16_vi.?, .r1);
+ var res_lo64_it = res_vi.value.field(ty, 0, 8);
+ const res_lo64_vi = try res_lo64_it.only(isel);
+ try call.returnLiveIn(isel, res_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishReturn(isel);
+
+ try call.prepareCallee(isel);
+ try isel.global_relocs.append(gpa, .{
+ .global = switch (bits) {
+ else => unreachable,
+ 16 => "__mulhf3",
+ 32 => "__mulsf3",
+ 64 => "__muldf3",
+ 80 => "__mulxf3",
+ 128 => "__multf3",
+ },
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+ try call.finishCallee(isel);
+
+ try call.prepareParams(isel);
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ switch (bits) {
+ else => unreachable,
+ 16, 32, 64, 128 => {
+ try call.paramLiveOut(isel, rhs_vi, .v1);
+ try call.paramLiveOut(isel, lhs_vi, .v0);
+ },
+ 80 => {
+ var rhs_hi16_it = rhs_vi.field(ty, 8, 8);
+ const rhs_hi16_vi = try rhs_hi16_it.only(isel);
+ try call.paramLiveOut(isel, rhs_hi16_vi.?, .r3);
+ var rhs_lo64_it = rhs_vi.field(ty, 0, 8);
+ const rhs_lo64_vi = try rhs_lo64_it.only(isel);
+ try call.paramLiveOut(isel, rhs_lo64_vi.?, .r2);
+ var lhs_hi16_it = lhs_vi.field(ty, 8, 8);
+ const lhs_hi16_vi = try lhs_hi16_it.only(isel);
+ try call.paramLiveOut(isel, lhs_hi16_vi.?, .r1);
+ var lhs_lo64_it = lhs_vi.field(ty, 0, 8);
+ const lhs_lo64_vi = try lhs_lo64_it.only(isel);
+ try call.paramLiveOut(isel, lhs_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishParams(isel);
+ },
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .mul_sat => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| unused: {
+ defer res_vi.value.deref(isel);
+
+ const bin_op = air.data(air.inst_index).bin_op;
+ const ty = isel.air.typeOf(bin_op.lhs, ip);
+ if (!ty.isAbiInt(zcu)) return isel.fail("bad {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+ const int_info = ty.intInfo(zcu);
+ switch (int_info.bits) {
+ 0 => unreachable,
+ 1 => {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ switch (int_info.signedness) {
+ .signed => try isel.emit(.orr(res_ra.w(), .wzr, .{ .register = .wzr })),
+ .unsigned => {
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ try isel.emit(.@"and"(res_ra.w(), lhs_mat.ra.w(), .{ .register = rhs_mat.ra.w() }));
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ },
+ }
+ },
+ 2...32 => |bits| {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const saturated_ra = switch (int_info.signedness) {
+ .signed => try isel.allocIntReg(),
+ .unsigned => switch (bits) {
+ else => unreachable,
+ 2...31 => try isel.allocIntReg(),
+ 32 => .zr,
+ },
+ };
+ defer if (saturated_ra != .zr) isel.freeReg(saturated_ra);
+ const unwrapped_ra = try isel.allocIntReg();
+ defer isel.freeReg(unwrapped_ra);
+ try isel.emit(switch (saturated_ra) {
+ else => .csel(res_ra.w(), unwrapped_ra.w(), saturated_ra.w(), .eq),
+ .zr => .csinv(res_ra.w(), unwrapped_ra.w(), saturated_ra.w(), .eq),
+ });
+ switch (bits) {
+ else => unreachable,
+ 2...7, 9...15, 17...31 => switch (int_info.signedness) {
+ .signed => {
+ const wrapped_ra = try isel.allocIntReg();
+ defer isel.freeReg(wrapped_ra);
+ switch (bits) {
+ else => unreachable,
+ 1...7, 9...15 => {
+ try isel.emit(.subs(.wzr, unwrapped_ra.w(), .{ .register = wrapped_ra.w() }));
+ try isel.emit(.sbfm(wrapped_ra.w(), unwrapped_ra.w(), .{
+ .N = .word,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }));
+ },
+ 17...31 => {
+ try isel.emit(.subs(.xzr, unwrapped_ra.x(), .{ .register = wrapped_ra.x() }));
+ try isel.emit(.sbfm(wrapped_ra.x(), unwrapped_ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }));
+ },
+ }
+ },
+ .unsigned => switch (bits) {
+ else => unreachable,
+ 1...7, 9...15 => try isel.emit(.ands(.wzr, unwrapped_ra.w(), .{ .immediate = .{
+ .N = .word,
+ .immr = @intCast(32 - bits),
+ .imms = @intCast(32 - bits - 1),
+ } })),
+ 17...31 => try isel.emit(.ands(.xzr, unwrapped_ra.x(), .{ .immediate = .{
+ .N = .doubleword,
+ .immr = @intCast(64 - bits),
+ .imms = @intCast(64 - bits - 1),
+ } })),
+ },
+ },
+ 8 => try isel.emit(.subs(.wzr, unwrapped_ra.w(), .{ .extended_register = .{
+ .register = unwrapped_ra.w(),
+ .extend = switch (int_info.signedness) {
+ .signed => .{ .sxtb = 0 },
+ .unsigned => .{ .uxtb = 0 },
+ },
+ } })),
+ 16 => try isel.emit(.subs(.wzr, unwrapped_ra.w(), .{ .extended_register = .{
+ .register = unwrapped_ra.w(),
+ .extend = switch (int_info.signedness) {
+ .signed => .{ .sxth = 0 },
+ .unsigned => .{ .uxth = 0 },
+ },
+ } })),
+ 32 => try isel.emit(.subs(.xzr, unwrapped_ra.x(), .{ .extended_register = .{
+ .register = unwrapped_ra.w(),
+ .extend = switch (int_info.signedness) {
+ .signed => .{ .sxtw = 0 },
+ .unsigned => .{ .uxtw = 0 },
+ },
+ } })),
+ }
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ switch (int_info.signedness) {
+ .signed => {
+ try isel.emit(.eor(saturated_ra.w(), saturated_ra.w(), .{ .immediate = .{
+ .N = .word,
+ .immr = 0,
+ .imms = @intCast(bits - 1 - 1),
+ } }));
+ try isel.emit(.sbfm(saturated_ra.w(), saturated_ra.w(), .{
+ .N = .word,
+ .immr = @intCast(bits - 1),
+ .imms = @intCast(bits - 1 + 1 - 1),
+ }));
+ try isel.emit(.eor(saturated_ra.w(), lhs_mat.ra.w(), .{ .register = rhs_mat.ra.w() }));
+ },
+ .unsigned => switch (bits) {
+ else => unreachable,
+ 2...31 => try isel.movImmediate(saturated_ra.w(), @as(u32, std.math.maxInt(u32)) >> @intCast(32 - bits)),
+ 32 => {},
+ },
+ }
+ switch (bits) {
+ else => unreachable,
+ 2...16 => try isel.emit(.madd(unwrapped_ra.w(), lhs_mat.ra.w(), rhs_mat.ra.w(), .wzr)),
+ 17...32 => switch (int_info.signedness) {
+ .signed => try isel.emit(.smaddl(unwrapped_ra.x(), lhs_mat.ra.w(), rhs_mat.ra.w(), .xzr)),
+ .unsigned => try isel.emit(.umaddl(unwrapped_ra.x(), lhs_mat.ra.w(), rhs_mat.ra.w(), .xzr)),
+ },
+ }
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ },
+ 33...64 => |bits| {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const saturated_ra = switch (int_info.signedness) {
+ .signed => try isel.allocIntReg(),
+ .unsigned => switch (bits) {
+ else => unreachable,
+ 33...63 => try isel.allocIntReg(),
+ 64 => .zr,
+ },
+ };
+ defer if (saturated_ra != .zr) isel.freeReg(saturated_ra);
+ const unwrapped_lo64_ra = try isel.allocIntReg();
+ defer isel.freeReg(unwrapped_lo64_ra);
+ const unwrapped_hi64_ra = try isel.allocIntReg();
+ defer isel.freeReg(unwrapped_hi64_ra);
+ try isel.emit(switch (saturated_ra) {
+ else => .csel(res_ra.x(), unwrapped_lo64_ra.x(), saturated_ra.x(), .eq),
+ .zr => .csinv(res_ra.x(), unwrapped_lo64_ra.x(), saturated_ra.x(), .eq),
+ });
+ switch (int_info.signedness) {
+ .signed => switch (bits) {
+ else => unreachable,
+ 32...63 => {
+ const wrapped_lo64_ra = try isel.allocIntReg();
+ defer isel.freeReg(wrapped_lo64_ra);
+ try isel.emit(.ccmp(
+ unwrapped_lo64_ra.x(),
+ .{ .register = wrapped_lo64_ra.x() },
+ .{ .n = false, .z = false, .c = false, .v = false },
+ .eq,
+ ));
+ try isel.emit(.subs(.xzr, unwrapped_hi64_ra.x(), .{ .shifted_register = .{
+ .register = unwrapped_lo64_ra.x(),
+ .shift = .{ .asr = 63 },
+ } }));
+ try isel.emit(.sbfm(wrapped_lo64_ra.x(), unwrapped_lo64_ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }));
+ },
+ 64 => try isel.emit(.subs(.xzr, unwrapped_hi64_ra.x(), .{ .shifted_register = .{
+ .register = unwrapped_lo64_ra.x(),
+ .shift = .{ .asr = @intCast(bits - 1) },
+ } })),
+ },
+ .unsigned => switch (bits) {
+ else => unreachable,
+ 32...63 => {
+ const overflow_ra = try isel.allocIntReg();
+ defer isel.freeReg(overflow_ra);
+ try isel.emit(.subs(.xzr, overflow_ra.x(), .{ .immediate = 0 }));
+ try isel.emit(.orr(overflow_ra.x(), unwrapped_hi64_ra.x(), .{ .shifted_register = .{
+ .register = unwrapped_lo64_ra.x(),
+ .shift = .{ .lsr = @intCast(bits) },
+ } }));
+ },
+ 64 => try isel.emit(.subs(.xzr, unwrapped_hi64_ra.x(), .{ .immediate = 0 })),
+ },
+ }
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ switch (int_info.signedness) {
+ .signed => {
+ try isel.emit(.eor(saturated_ra.x(), saturated_ra.x(), .{ .immediate = .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(bits - 1 - 1),
+ } }));
+ try isel.emit(.sbfm(saturated_ra.x(), saturated_ra.x(), .{
+ .N = .doubleword,
+ .immr = @intCast(bits - 1),
+ .imms = @intCast(bits - 1 + 1 - 1),
+ }));
+ try isel.emit(.eor(saturated_ra.x(), lhs_mat.ra.x(), .{ .register = rhs_mat.ra.x() }));
+ try isel.emit(.madd(unwrapped_lo64_ra.x(), lhs_mat.ra.x(), rhs_mat.ra.x(), .xzr));
+ try isel.emit(.smulh(unwrapped_hi64_ra.x(), lhs_mat.ra.x(), rhs_mat.ra.x()));
+ },
+ .unsigned => {
+ switch (bits) {
+ else => unreachable,
+ 32...63 => try isel.movImmediate(saturated_ra.x(), @as(u64, std.math.maxInt(u64)) >> @intCast(64 - bits)),
+ 64 => {},
+ }
+ try isel.emit(.madd(unwrapped_lo64_ra.x(), lhs_mat.ra.x(), rhs_mat.ra.x(), .xzr));
+ try isel.emit(.umulh(unwrapped_hi64_ra.x(), lhs_mat.ra.x(), rhs_mat.ra.x()));
+ },
+ }
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ },
+ else => return isel.fail("too big {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) }),
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .div_float, .div_float_optimized => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| unused: {
+ defer res_vi.value.deref(isel);
+
+ const bin_op = air.data(air.inst_index).bin_op;
+ const ty = isel.air.typeOf(bin_op.lhs, ip);
+ switch (ty.floatBits(isel.target)) {
+ else => unreachable,
+ 16, 32, 64 => |bits| {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const need_fcvt = switch (bits) {
+ else => unreachable,
+ 16 => !isel.target.cpu.has(.aarch64, .fullfp16),
+ 32, 64 => false,
+ };
+ if (need_fcvt) try isel.emit(.fcvt(res_ra.h(), res_ra.s()));
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ const lhs_ra = if (need_fcvt) try isel.allocVecReg() else lhs_mat.ra;
+ defer if (need_fcvt) isel.freeReg(lhs_ra);
+ const rhs_ra = if (need_fcvt) try isel.allocVecReg() else rhs_mat.ra;
+ defer if (need_fcvt) isel.freeReg(rhs_ra);
+ try isel.emit(bits: switch (bits) {
+ else => unreachable,
+ 16 => if (need_fcvt)
+ continue :bits 32
+ else
+ .fdiv(res_ra.h(), lhs_ra.h(), rhs_ra.h()),
+ 32 => .fdiv(res_ra.s(), lhs_ra.s(), rhs_ra.s()),
+ 64 => .fdiv(res_ra.d(), lhs_ra.d(), rhs_ra.d()),
+ });
+ if (need_fcvt) {
+ try isel.emit(.fcvt(rhs_ra.s(), rhs_mat.ra.h()));
+ try isel.emit(.fcvt(lhs_ra.s(), lhs_mat.ra.h()));
+ }
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ },
+ 80, 128 => |bits| {
+ try call.prepareReturn(isel);
+ switch (bits) {
+ else => unreachable,
+ 16, 32, 64, 128 => try call.returnLiveIn(isel, res_vi.value, .v0),
+ 80 => {
+ var res_hi16_it = res_vi.value.field(ty, 8, 8);
+ const res_hi16_vi = try res_hi16_it.only(isel);
+ try call.returnLiveIn(isel, res_hi16_vi.?, .r1);
+ var res_lo64_it = res_vi.value.field(ty, 0, 8);
+ const res_lo64_vi = try res_lo64_it.only(isel);
+ try call.returnLiveIn(isel, res_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishReturn(isel);
+
+ try call.prepareCallee(isel);
+ try isel.global_relocs.append(gpa, .{
+ .global = switch (bits) {
+ else => unreachable,
+ 16 => "__divhf3",
+ 32 => "__divsf3",
+ 64 => "__divdf3",
+ 80 => "__divxf3",
+ 128 => "__divtf3",
+ },
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+ try call.finishCallee(isel);
+
+ try call.prepareParams(isel);
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ switch (bits) {
+ else => unreachable,
+ 16, 32, 64, 128 => {
+ try call.paramLiveOut(isel, rhs_vi, .v1);
+ try call.paramLiveOut(isel, lhs_vi, .v0);
+ },
+ 80 => {
+ var rhs_hi16_it = rhs_vi.field(ty, 8, 8);
+ const rhs_hi16_vi = try rhs_hi16_it.only(isel);
+ try call.paramLiveOut(isel, rhs_hi16_vi.?, .r3);
+ var rhs_lo64_it = rhs_vi.field(ty, 0, 8);
+ const rhs_lo64_vi = try rhs_lo64_it.only(isel);
+ try call.paramLiveOut(isel, rhs_lo64_vi.?, .r2);
+ var lhs_hi16_it = lhs_vi.field(ty, 8, 8);
+ const lhs_hi16_vi = try lhs_hi16_it.only(isel);
+ try call.paramLiveOut(isel, lhs_hi16_vi.?, .r1);
+ var lhs_lo64_it = lhs_vi.field(ty, 0, 8);
+ const lhs_lo64_vi = try lhs_lo64_it.only(isel);
+ try call.paramLiveOut(isel, lhs_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishParams(isel);
+ },
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .div_trunc, .div_trunc_optimized, .div_floor, .div_floor_optimized, .div_exact, .div_exact_optimized => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| unused: {
+ defer res_vi.value.deref(isel);
+
+ const bin_op = air.data(air.inst_index).bin_op;
+ const ty = isel.air.typeOf(bin_op.lhs, ip);
+ if (!ty.isRuntimeFloat()) {
+ if (!ty.isAbiInt(zcu)) return isel.fail("bad {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+ const int_info = ty.intInfo(zcu);
+ switch (int_info.bits) {
+ 0 => unreachable,
+ 1...64 => |bits| {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ const div_ra = div_ra: switch (air_tag) {
+ else => unreachable,
+ .div_trunc, .div_exact => res_ra,
+ .div_floor => switch (int_info.signedness) {
+ .signed => {
+ const div_ra = try isel.allocIntReg();
+ errdefer isel.freeReg(div_ra);
+ const rem_ra = try isel.allocIntReg();
+ defer isel.freeReg(rem_ra);
+ switch (bits) {
+ else => unreachable,
+ 1...32 => {
+ try isel.emit(.sub(res_ra.w(), div_ra.w(), .{ .register = rem_ra.w() }));
+ try isel.emit(.csinc(rem_ra.w(), .wzr, .wzr, .ge));
+ try isel.emit(.ccmp(
+ rem_ra.w(),
+ .{ .immediate = 0 },
+ .{ .n = false, .z = false, .c = false, .v = false },
+ .ne,
+ ));
+ try isel.emit(.eor(rem_ra.w(), rem_ra.w(), .{ .register = rhs_mat.ra.w() }));
+ try isel.emit(.subs(.wzr, rem_ra.w(), .{ .immediate = 0 }));
+ try isel.emit(.msub(rem_ra.w(), div_ra.w(), rhs_mat.ra.w(), lhs_mat.ra.w()));
+ },
+ 33...64 => {
+ try isel.emit(.sub(res_ra.x(), div_ra.x(), .{ .register = rem_ra.x() }));
+ try isel.emit(.csinc(rem_ra.x(), .xzr, .xzr, .ge));
+ try isel.emit(.ccmp(
+ rem_ra.x(),
+ .{ .immediate = 0 },
+ .{ .n = false, .z = false, .c = false, .v = false },
+ .ne,
+ ));
+ try isel.emit(.eor(rem_ra.x(), rem_ra.x(), .{ .register = rhs_mat.ra.x() }));
+ try isel.emit(.subs(.xzr, rem_ra.x(), .{ .immediate = 0 }));
+ try isel.emit(.msub(rem_ra.x(), div_ra.x(), rhs_mat.ra.x(), lhs_mat.ra.x()));
+ },
+ }
+ break :div_ra div_ra;
+ },
+ .unsigned => res_ra,
+ },
+ };
+ defer if (div_ra != res_ra) isel.freeReg(div_ra);
+ try isel.emit(switch (bits) {
+ else => unreachable,
+ 1...32 => switch (int_info.signedness) {
+ .signed => .sdiv(div_ra.w(), lhs_mat.ra.w(), rhs_mat.ra.w()),
+ .unsigned => .udiv(div_ra.w(), lhs_mat.ra.w(), rhs_mat.ra.w()),
+ },
+ 33...64 => switch (int_info.signedness) {
+ .signed => .sdiv(div_ra.x(), lhs_mat.ra.x(), rhs_mat.ra.x()),
+ .unsigned => .udiv(div_ra.x(), lhs_mat.ra.x(), rhs_mat.ra.x()),
+ },
+ });
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ },
+ 65...128 => {
+ switch (air_tag) {
+ else => unreachable,
+ .div_trunc, .div_exact => {},
+ .div_floor => switch (int_info.signedness) {
+ .signed => return isel.fail("unimplemented {s}", .{@tagName(air_tag)}),
+ .unsigned => {},
+ },
+ }
+
+ try call.prepareReturn(isel);
+ var res_hi64_it = res_vi.value.field(ty, 8, 8);
+ const res_hi64_vi = try res_hi64_it.only(isel);
+ try call.returnLiveIn(isel, res_hi64_vi.?, .r1);
+ var res_lo64_it = res_vi.value.field(ty, 0, 8);
+ const res_lo64_vi = try res_lo64_it.only(isel);
+ try call.returnLiveIn(isel, res_lo64_vi.?, .r0);
+ try call.finishReturn(isel);
+
+ try call.prepareCallee(isel);
+ try isel.global_relocs.append(gpa, .{
+ .global = switch (int_info.signedness) {
+ .signed => "__divti3",
+ .unsigned => "__udivti3",
+ },
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+ try call.finishCallee(isel);
+
+ try call.prepareParams(isel);
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ var rhs_hi64_it = rhs_vi.field(ty, 8, 8);
+ const rhs_hi64_vi = try rhs_hi64_it.only(isel);
+ try call.paramLiveOut(isel, rhs_hi64_vi.?, .r3);
+ var rhs_lo64_it = rhs_vi.field(ty, 0, 8);
+ const rhs_lo64_vi = try rhs_lo64_it.only(isel);
+ try call.paramLiveOut(isel, rhs_lo64_vi.?, .r2);
+ var lhs_hi64_it = lhs_vi.field(ty, 8, 8);
+ const lhs_hi64_vi = try lhs_hi64_it.only(isel);
+ try call.paramLiveOut(isel, lhs_hi64_vi.?, .r1);
+ var lhs_lo64_it = lhs_vi.field(ty, 0, 8);
+ const lhs_lo64_vi = try lhs_lo64_it.only(isel);
+ try call.paramLiveOut(isel, lhs_lo64_vi.?, .r0);
+ try call.finishParams(isel);
+ },
+ else => return isel.fail("too big {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) }),
+ }
+ } else switch (ty.floatBits(isel.target)) {
+ else => unreachable,
+ 16, 32, 64 => |bits| {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const need_fcvt = switch (bits) {
+ else => unreachable,
+ 16 => !isel.target.cpu.has(.aarch64, .fullfp16),
+ 32, 64 => false,
+ };
+ if (need_fcvt) try isel.emit(.fcvt(res_ra.h(), res_ra.s()));
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ const lhs_ra = if (need_fcvt) try isel.allocVecReg() else lhs_mat.ra;
+ defer if (need_fcvt) isel.freeReg(lhs_ra);
+ const rhs_ra = if (need_fcvt) try isel.allocVecReg() else rhs_mat.ra;
+ defer if (need_fcvt) isel.freeReg(rhs_ra);
+ bits: switch (bits) {
+ else => unreachable,
+ 16 => if (need_fcvt) continue :bits 32 else {
+ switch (air_tag) {
+ else => unreachable,
+ .div_trunc, .div_trunc_optimized => try isel.emit(.frintz(res_ra.h(), res_ra.h())),
+ .div_floor, .div_floor_optimized => try isel.emit(.frintm(res_ra.h(), res_ra.h())),
+ .div_exact, .div_exact_optimized => {},
+ }
+ try isel.emit(.fdiv(res_ra.h(), lhs_ra.h(), rhs_ra.h()));
+ },
+ 32 => {
+ switch (air_tag) {
+ else => unreachable,
+ .div_trunc, .div_trunc_optimized => try isel.emit(.frintz(res_ra.s(), res_ra.s())),
+ .div_floor, .div_floor_optimized => try isel.emit(.frintm(res_ra.s(), res_ra.s())),
+ .div_exact, .div_exact_optimized => {},
+ }
+ try isel.emit(.fdiv(res_ra.s(), lhs_ra.s(), rhs_ra.s()));
+ },
+ 64 => {
+ switch (air_tag) {
+ else => unreachable,
+ .div_trunc, .div_trunc_optimized => try isel.emit(.frintz(res_ra.d(), res_ra.d())),
+ .div_floor, .div_floor_optimized => try isel.emit(.frintm(res_ra.d(), res_ra.d())),
+ .div_exact, .div_exact_optimized => {},
+ }
+ try isel.emit(.fdiv(res_ra.d(), lhs_ra.d(), rhs_ra.d()));
+ },
+ }
+ if (need_fcvt) {
+ try isel.emit(.fcvt(rhs_ra.s(), rhs_mat.ra.h()));
+ try isel.emit(.fcvt(lhs_ra.s(), lhs_mat.ra.h()));
+ }
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ },
+ 80, 128 => |bits| {
+ try call.prepareReturn(isel);
+ switch (bits) {
+ else => unreachable,
+ 16, 32, 64, 128 => try call.returnLiveIn(isel, res_vi.value, .v0),
+ 80 => {
+ var res_hi16_it = res_vi.value.field(ty, 8, 8);
+ const res_hi16_vi = try res_hi16_it.only(isel);
+ try call.returnLiveIn(isel, res_hi16_vi.?, .r1);
+ var res_lo64_it = res_vi.value.field(ty, 0, 8);
+ const res_lo64_vi = try res_lo64_it.only(isel);
+ try call.returnLiveIn(isel, res_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishReturn(isel);
+
+ try call.prepareCallee(isel);
+ switch (air_tag) {
+ else => unreachable,
+ .div_trunc, .div_trunc_optimized => {
+ try isel.global_relocs.append(gpa, .{
+ .global = switch (bits) {
+ else => unreachable,
+ 16 => "__trunch",
+ 32 => "truncf",
+ 64 => "trunc",
+ 80 => "__truncx",
+ 128 => "truncq",
+ },
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+ },
+ .div_floor, .div_floor_optimized => {
+ try isel.global_relocs.append(gpa, .{
+ .global = switch (bits) {
+ else => unreachable,
+ 16 => "__floorh",
+ 32 => "floorf",
+ 64 => "floor",
+ 80 => "__floorx",
+ 128 => "floorq",
+ },
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+ },
+ .div_exact, .div_exact_optimized => {},
+ }
+ try isel.global_relocs.append(gpa, .{
+ .global = switch (bits) {
+ else => unreachable,
+ 16 => "__divhf3",
+ 32 => "__divsf3",
+ 64 => "__divdf3",
+ 80 => "__divxf3",
+ 128 => "__divtf3",
+ },
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+ try call.finishCallee(isel);
+
+ try call.prepareParams(isel);
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ switch (bits) {
+ else => unreachable,
+ 16, 32, 64, 128 => {
+ try call.paramLiveOut(isel, rhs_vi, .v1);
+ try call.paramLiveOut(isel, lhs_vi, .v0);
+ },
+ 80 => {
+ var rhs_hi16_it = rhs_vi.field(ty, 8, 8);
+ const rhs_hi16_vi = try rhs_hi16_it.only(isel);
+ try call.paramLiveOut(isel, rhs_hi16_vi.?, .r3);
+ var rhs_lo64_it = rhs_vi.field(ty, 0, 8);
+ const rhs_lo64_vi = try rhs_lo64_it.only(isel);
+ try call.paramLiveOut(isel, rhs_lo64_vi.?, .r2);
+ var lhs_hi16_it = lhs_vi.field(ty, 8, 8);
+ const lhs_hi16_vi = try lhs_hi16_it.only(isel);
+ try call.paramLiveOut(isel, lhs_hi16_vi.?, .r1);
+ var lhs_lo64_it = lhs_vi.field(ty, 0, 8);
+ const lhs_lo64_vi = try lhs_lo64_it.only(isel);
+ try call.paramLiveOut(isel, lhs_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishParams(isel);
+ },
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .rem => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| unused: {
+ defer res_vi.value.deref(isel);
+
+ const bin_op = air.data(air.inst_index).bin_op;
+ const ty = isel.air.typeOf(bin_op.lhs, ip);
+ if (!ty.isRuntimeFloat()) {
+ if (!ty.isAbiInt(zcu)) return isel.fail("bad {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+ const int_info = ty.intInfo(zcu);
+ if (int_info.bits > 64) return isel.fail("too big {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ const div_ra = try isel.allocIntReg();
+ defer isel.freeReg(div_ra);
+ switch (int_info.bits) {
+ else => unreachable,
+ 1...32 => {
+ try isel.emit(.msub(res_ra.w(), div_ra.w(), rhs_mat.ra.w(), lhs_mat.ra.w()));
+ try isel.emit(switch (int_info.signedness) {
+ .signed => .sdiv(div_ra.w(), lhs_mat.ra.w(), rhs_mat.ra.w()),
+ .unsigned => .udiv(div_ra.w(), lhs_mat.ra.w(), rhs_mat.ra.w()),
+ });
+ },
+ 33...64 => {
+ try isel.emit(.msub(res_ra.x(), div_ra.x(), rhs_mat.ra.x(), lhs_mat.ra.x()));
+ try isel.emit(switch (int_info.signedness) {
+ .signed => .sdiv(div_ra.x(), lhs_mat.ra.x(), rhs_mat.ra.x()),
+ .unsigned => .udiv(div_ra.x(), lhs_mat.ra.x(), rhs_mat.ra.x()),
+ });
+ },
+ }
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ } else {
+ const bits = ty.floatBits(isel.target);
+
+ try call.prepareReturn(isel);
+ switch (bits) {
+ else => unreachable,
+ 16, 32, 64, 128 => try call.returnLiveIn(isel, res_vi.value, .v0),
+ 80 => {
+ var res_hi16_it = res_vi.value.field(ty, 8, 8);
+ const res_hi16_vi = try res_hi16_it.only(isel);
+ try call.returnLiveIn(isel, res_hi16_vi.?, .r1);
+ var res_lo64_it = res_vi.value.field(ty, 0, 8);
+ const res_lo64_vi = try res_lo64_it.only(isel);
+ try call.returnLiveIn(isel, res_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishReturn(isel);
+
+ try call.prepareCallee(isel);
+ try isel.global_relocs.append(gpa, .{
+ .global = switch (bits) {
+ else => unreachable,
+ 16 => "__fmodh",
+ 32 => "fmodf",
+ 64 => "fmod",
+ 80 => "__fmodx",
+ 128 => "fmodq",
+ },
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+ try call.finishCallee(isel);
+
+ try call.prepareParams(isel);
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ switch (bits) {
+ else => unreachable,
+ 16, 32, 64, 128 => {
+ try call.paramLiveOut(isel, rhs_vi, .v1);
+ try call.paramLiveOut(isel, lhs_vi, .v0);
+ },
+ 80 => {
+ var rhs_hi16_it = rhs_vi.field(ty, 8, 8);
+ const rhs_hi16_vi = try rhs_hi16_it.only(isel);
+ try call.paramLiveOut(isel, rhs_hi16_vi.?, .r3);
+ var rhs_lo64_it = rhs_vi.field(ty, 0, 8);
+ const rhs_lo64_vi = try rhs_lo64_it.only(isel);
+ try call.paramLiveOut(isel, rhs_lo64_vi.?, .r2);
+ var lhs_hi16_it = lhs_vi.field(ty, 8, 8);
+ const lhs_hi16_vi = try lhs_hi16_it.only(isel);
+ try call.paramLiveOut(isel, lhs_hi16_vi.?, .r1);
+ var lhs_lo64_it = lhs_vi.field(ty, 0, 8);
+ const lhs_lo64_vi = try lhs_lo64_it.only(isel);
+ try call.paramLiveOut(isel, lhs_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishParams(isel);
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .ptr_add, .ptr_sub => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| unused: {
+ defer res_vi.value.deref(isel);
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+
+ const ty_pl = air.data(air.inst_index).ty_pl;
+ const bin_op = isel.air.extraData(Air.Bin, ty_pl.payload).data;
+ const elem_size = ty_pl.ty.toType().elemType2(zcu).abiSize(zcu);
+
+ const base_vi = try isel.use(bin_op.lhs);
+ var base_part_it = base_vi.field(ty_pl.ty.toType(), 0, 8);
+ const base_part_vi = try base_part_it.only(isel);
+ const base_part_mat = try base_part_vi.?.matReg(isel);
+ const index_vi = try isel.use(bin_op.rhs);
+ try isel.elemPtr(res_ra, base_part_mat.ra, switch (air_tag) {
+ else => unreachable,
+ .ptr_add => .add,
+ .ptr_sub => .sub,
+ }, elem_size, index_vi);
+ try base_part_mat.finish(isel);
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .max, .min => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| unused: {
+ defer res_vi.value.deref(isel);
+
+ const bin_op = air.data(air.inst_index).bin_op;
+ const ty = isel.air.typeOf(bin_op.lhs, ip);
+ if (!ty.isRuntimeFloat()) {
+ if (!ty.isAbiInt(zcu)) return isel.fail("bad {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+ const int_info = ty.intInfo(zcu);
+ if (int_info.bits > 64) return isel.fail("too big {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ const cond: codegen.aarch64.encoding.ConditionCode = switch (air_tag) {
+ else => unreachable,
+ .max => switch (int_info.signedness) {
+ .signed => .ge,
+ .unsigned => .hs,
+ },
+ .min => switch (int_info.signedness) {
+ .signed => .lt,
+ .unsigned => .lo,
+ },
+ };
+ switch (int_info.bits) {
+ else => unreachable,
+ 1...32 => {
+ try isel.emit(.csel(res_ra.w(), lhs_mat.ra.w(), rhs_mat.ra.w(), cond));
+ try isel.emit(.subs(.wzr, lhs_mat.ra.w(), .{ .register = rhs_mat.ra.w() }));
+ },
+ 33...64 => {
+ try isel.emit(.csel(res_ra.x(), lhs_mat.ra.x(), rhs_mat.ra.x(), cond));
+ try isel.emit(.subs(.xzr, lhs_mat.ra.x(), .{ .register = rhs_mat.ra.x() }));
+ },
+ }
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ } else switch (ty.floatBits(isel.target)) {
+ else => unreachable,
+ 16, 32, 64 => |bits| {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const need_fcvt = switch (bits) {
+ else => unreachable,
+ 16 => !isel.target.cpu.has(.aarch64, .fullfp16),
+ 32, 64 => false,
+ };
+ if (need_fcvt) try isel.emit(.fcvt(res_ra.h(), res_ra.s()));
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ const lhs_ra = if (need_fcvt) try isel.allocVecReg() else lhs_mat.ra;
+ defer if (need_fcvt) isel.freeReg(lhs_ra);
+ const rhs_ra = if (need_fcvt) try isel.allocVecReg() else rhs_mat.ra;
+ defer if (need_fcvt) isel.freeReg(rhs_ra);
+ try isel.emit(bits: switch (bits) {
+ else => unreachable,
+ 16 => if (need_fcvt) continue :bits 32 else switch (air_tag) {
+ else => unreachable,
+ .max => .fmaxnm(res_ra.h(), lhs_ra.h(), rhs_ra.h()),
+ .min => .fminnm(res_ra.h(), lhs_ra.h(), rhs_ra.h()),
+ },
+ 32 => switch (air_tag) {
+ else => unreachable,
+ .max => .fmaxnm(res_ra.s(), lhs_ra.s(), rhs_ra.s()),
+ .min => .fminnm(res_ra.s(), lhs_ra.s(), rhs_ra.s()),
+ },
+ 64 => switch (air_tag) {
+ else => unreachable,
+ .max => .fmaxnm(res_ra.d(), lhs_ra.d(), rhs_ra.d()),
+ .min => .fminnm(res_ra.d(), lhs_ra.d(), rhs_ra.d()),
+ },
+ });
+ if (need_fcvt) {
+ try isel.emit(.fcvt(rhs_ra.s(), rhs_mat.ra.h()));
+ try isel.emit(.fcvt(lhs_ra.s(), lhs_mat.ra.h()));
+ }
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ },
+ 80, 128 => |bits| {
+ try call.prepareReturn(isel);
+ switch (bits) {
+ else => unreachable,
+ 16, 32, 64, 128 => try call.returnLiveIn(isel, res_vi.value, .v0),
+ 80 => {
+ var res_hi16_it = res_vi.value.field(ty, 8, 8);
+ const res_hi16_vi = try res_hi16_it.only(isel);
+ try call.returnLiveIn(isel, res_hi16_vi.?, .r1);
+ var res_lo64_it = res_vi.value.field(ty, 0, 8);
+ const res_lo64_vi = try res_lo64_it.only(isel);
+ try call.returnLiveIn(isel, res_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishReturn(isel);
+
+ try call.prepareCallee(isel);
+ try isel.global_relocs.append(gpa, .{
+ .global = switch (air_tag) {
+ else => unreachable,
+ .max => switch (bits) {
+ else => unreachable,
+ 16 => "__fmaxh",
+ 32 => "fmaxf",
+ 64 => "fmax",
+ 80 => "__fmaxx",
+ 128 => "fmaxq",
+ },
+ .min => switch (bits) {
+ else => unreachable,
+ 16 => "__fminh",
+ 32 => "fminf",
+ 64 => "fmin",
+ 80 => "__fminx",
+ 128 => "fminq",
+ },
+ },
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+ try call.finishCallee(isel);
+
+ try call.prepareParams(isel);
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ switch (bits) {
+ else => unreachable,
+ 16, 32, 64, 128 => {
+ try call.paramLiveOut(isel, rhs_vi, .v1);
+ try call.paramLiveOut(isel, lhs_vi, .v0);
+ },
+ 80 => {
+ var rhs_hi16_it = rhs_vi.field(ty, 8, 8);
+ const rhs_hi16_vi = try rhs_hi16_it.only(isel);
+ try call.paramLiveOut(isel, rhs_hi16_vi.?, .r3);
+ var rhs_lo64_it = rhs_vi.field(ty, 0, 8);
+ const rhs_lo64_vi = try rhs_lo64_it.only(isel);
+ try call.paramLiveOut(isel, rhs_lo64_vi.?, .r2);
+ var lhs_hi16_it = lhs_vi.field(ty, 8, 8);
+ const lhs_hi16_vi = try lhs_hi16_it.only(isel);
+ try call.paramLiveOut(isel, lhs_hi16_vi.?, .r1);
+ var lhs_lo64_it = lhs_vi.field(ty, 0, 8);
+ const lhs_lo64_vi = try lhs_lo64_it.only(isel);
+ try call.paramLiveOut(isel, lhs_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishParams(isel);
+ },
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .add_with_overflow, .sub_with_overflow => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| {
+ defer res_vi.value.deref(isel);
+
+ const ty_pl = air.data(air.inst_index).ty_pl;
+ const bin_op = isel.air.extraData(Air.Bin, ty_pl.payload).data;
+ const ty = isel.air.typeOf(bin_op.lhs, ip);
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const ty_size = lhs_vi.size(isel);
+ var overflow_it = res_vi.value.field(ty_pl.ty.toType(), ty_size, 1);
+ const overflow_vi = try overflow_it.only(isel);
+ var wrapped_it = res_vi.value.field(ty_pl.ty.toType(), 0, ty_size);
+ const wrapped_vi = try wrapped_it.only(isel);
+ try wrapped_vi.?.addOrSubtract(isel, ty, lhs_vi, switch (air_tag) {
+ else => unreachable,
+ .add_with_overflow => .add,
+ .sub_with_overflow => .sub,
+ }, rhs_vi, .{ .wrap = true, .overflow_ra = try overflow_vi.?.defReg(isel) orelse .zr });
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .alloc, .ret_ptr => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |ptr_vi| unused: {
+ defer ptr_vi.value.deref(isel);
+ switch (air_tag) {
+ else => unreachable,
+ .alloc => {},
+ .ret_ptr => if (isel.live_values.get(Block.main)) |ret_vi| switch (ret_vi.parent(isel)) {
+ .unallocated, .stack_slot => {},
+ .value, .constant => unreachable,
+ .address => break :unused,
+ },
+ }
+ const ptr_ra = try ptr_vi.value.defReg(isel) orelse break :unused;
+
+ const ty = air.data(air.inst_index).ty;
+ const slot_size = ty.childType(zcu).abiSize(zcu);
+ const slot_align = ty.ptrAlignment(zcu);
+ const slot_offset = slot_align.forward(isel.stack_size);
+ isel.stack_size = @intCast(slot_offset + slot_size);
+ const lo12: u12 = @truncate(slot_offset >> 0);
+ const hi12: u12 = @intCast(slot_offset >> 12);
+ if (hi12 > 0) try isel.emit(.add(
+ ptr_ra.x(),
+ if (lo12 > 0) ptr_ra.x() else .sp,
+ .{ .shifted_immediate = .{ .immediate = hi12, .lsl = .@"12" } },
+ ));
+ if (lo12 > 0 or hi12 == 0) try isel.emit(.add(ptr_ra.x(), .sp, .{ .immediate = lo12 }));
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .assembly => {
+ const ty_pl = air.data(air.inst_index).ty_pl;
+ const extra = isel.air.extraData(Air.Asm, ty_pl.payload);
+ var extra_index = extra.end;
+ const outputs: []const Air.Inst.Ref = @ptrCast(isel.air.extra.items[extra_index..][0..extra.data.flags.outputs_len]);
+ extra_index += outputs.len;
+ const inputs: []const Air.Inst.Ref = @ptrCast(isel.air.extra.items[extra_index..][0..extra.data.inputs_len]);
+ extra_index += inputs.len;
+
+ var as: codegen.aarch64.Assemble = .{
+ .source = undefined,
+ .operands = .empty,
+ };
+ defer as.operands.deinit(gpa);
+
+ for (outputs) |output| {
+ const extra_bytes = std.mem.sliceAsBytes(isel.air.extra.items[extra_index..]);
+ const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(isel.air.extra.items[extra_index..]), 0);
+ const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
+ // This equation accounts for the fact that even if we have exactly 4 bytes
+ // for the string, we still use the next u32 for the null terminator.
+ extra_index += (constraint.len + name.len + (2 + 3)) / 4;
+
+ switch (output) {
+ else => return isel.fail("invalid constraint: '{s}'", .{constraint}),
+ .none => if (std.mem.startsWith(u8, constraint, "={") and std.mem.endsWith(u8, constraint, "}")) {
+ const output_reg = Register.parse(constraint["={".len .. constraint.len - "}".len]) orelse
+ return isel.fail("invalid constraint: '{s}'", .{constraint});
+ const output_ra = output_reg.alias;
+ if (isel.live_values.fetchRemove(air.inst_index)) |output_vi| {
+ defer output_vi.value.deref(isel);
+ try output_vi.value.defLiveIn(isel, output_reg.alias, comptime &.initFill(.free));
+ isel.freeReg(output_ra);
+ }
+ if (!std.mem.eql(u8, name, "_")) {
+ const operand_gop = try as.operands.getOrPut(gpa, name);
+ if (operand_gop.found_existing) return isel.fail("duplicate output name: '{s}'", .{name});
+ operand_gop.value_ptr.* = .{ .register = switch (ty_pl.ty.toType().abiSize(zcu)) {
+ 0 => unreachable,
+ 1...4 => output_ra.w(),
+ 5...8 => output_ra.x(),
+ else => return isel.fail("too big output type: '{f}'", .{isel.fmtType(ty_pl.ty.toType())}),
+ } };
+ }
+ } else if (std.mem.eql(u8, constraint, "=r")) {
+ const output_ra = if (isel.live_values.fetchRemove(air.inst_index)) |output_vi| output_ra: {
+ defer output_vi.value.deref(isel);
+ break :output_ra try output_vi.value.defReg(isel) orelse try isel.allocIntReg();
+ } else try isel.allocIntReg();
+ if (!std.mem.eql(u8, name, "_")) {
+ const operand_gop = try as.operands.getOrPut(gpa, name);
+ if (operand_gop.found_existing) return isel.fail("duplicate output name: '{s}'", .{name});
+ operand_gop.value_ptr.* = .{ .register = switch (ty_pl.ty.toType().abiSize(zcu)) {
+ 0 => unreachable,
+ 1...4 => output_ra.w(),
+ 5...8 => output_ra.x(),
+ else => return isel.fail("too big output type: '{f}'", .{isel.fmtType(ty_pl.ty.toType())}),
+ } };
+ }
+ } else return isel.fail("invalid constraint: '{s}'", .{constraint}),
+ }
+ }
+
+ const input_mats = try gpa.alloc(Value.Materialize, inputs.len);
+ defer gpa.free(input_mats);
+ const inputs_extra_index = extra_index;
+ for (inputs, input_mats) |input, *input_mat| {
+ const extra_bytes = std.mem.sliceAsBytes(isel.air.extra.items[extra_index..]);
+ const constraint = std.mem.sliceTo(extra_bytes, 0);
+ const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
+ // This equation accounts for the fact that even if we have exactly 4 bytes
+ // for the string, we still use the next u32 for the null terminator.
+ extra_index += (constraint.len + name.len + (2 + 3)) / 4;
+
+ if (std.mem.startsWith(u8, constraint, "{") and std.mem.endsWith(u8, constraint, "}")) {
+ const input_reg = Register.parse(constraint["{".len .. constraint.len - "}".len]) orelse
+ return isel.fail("invalid constraint: '{s}'", .{constraint});
+ input_mat.* = .{ .vi = try isel.use(input), .ra = input_reg.alias };
+ if (!std.mem.eql(u8, name, "_")) {
+ const operand_gop = try as.operands.getOrPut(gpa, name);
+ if (operand_gop.found_existing) return isel.fail("duplicate input name: '{s}'", .{name});
+ const input_ty = isel.air.typeOf(input, ip);
+ operand_gop.value_ptr.* = .{ .register = switch (input_ty.abiSize(zcu)) {
+ 0 => unreachable,
+ 1...4 => input_reg.alias.w(),
+ 5...8 => input_reg.alias.x(),
+ else => return isel.fail("too big input type: '{f}'", .{
+ isel.fmtType(isel.air.typeOf(input, ip)),
+ }),
+ } };
+ }
+ } else if (std.mem.eql(u8, constraint, "r")) {
+ const input_vi = try isel.use(input);
+ input_mat.* = try input_vi.matReg(isel);
+ if (!std.mem.eql(u8, name, "_")) {
+ const operand_gop = try as.operands.getOrPut(gpa, name);
+ if (operand_gop.found_existing) return isel.fail("duplicate input name: '{s}'", .{name});
+ operand_gop.value_ptr.* = .{ .register = switch (input_vi.size(isel)) {
+ 0 => unreachable,
+ 1...4 => input_mat.ra.w(),
+ 5...8 => input_mat.ra.x(),
+ else => return isel.fail("too big input type: '{f}'", .{
+ isel.fmtType(isel.air.typeOf(input, ip)),
+ }),
+ } };
+ }
+ } else if (std.mem.eql(u8, name, "_")) {
+ input_mat.vi = try isel.use(input);
+ } else return isel.fail("invalid constraint: '{s}'", .{constraint});
+ }
+
+ const clobbers = ip.indexToKey(extra.data.clobbers).aggregate;
+ const clobbers_ty: ZigType = .fromInterned(clobbers.ty);
+ for (0..clobbers_ty.structFieldCount(zcu)) |field_index| {
+ switch (switch (clobbers.storage) {
+ .bytes => unreachable,
+ .elems => |elems| elems[field_index],
+ .repeated_elem => |repeated_elem| repeated_elem,
+ }) {
+ else => unreachable,
+ .bool_false => continue,
+ .bool_true => {},
+ }
+ const clobber_name = clobbers_ty.structFieldName(field_index, zcu).toSlice(ip).?;
+ if (std.mem.eql(u8, clobber_name, "memory")) continue;
+ if (std.mem.eql(u8, clobber_name, "nzcv")) continue;
+ const clobber_reg = Register.parse(clobber_name) orelse
+ return isel.fail("unable to parse clobber: '{s}'", .{clobber_name});
+ const live_vi = isel.live_registers.getPtr(clobber_reg.alias);
+ switch (live_vi.*) {
+ _ => {},
+ .allocating => return isel.fail("clobbered twice: '{s}'", .{clobber_name}),
+ .free => live_vi.* = .allocating,
+ }
+ }
+ for (0..clobbers_ty.structFieldCount(zcu)) |field_index| {
+ switch (switch (clobbers.storage) {
+ .bytes => unreachable,
+ .elems => |elems| elems[field_index],
+ .repeated_elem => |repeated_elem| repeated_elem,
+ }) {
+ else => unreachable,
+ .bool_false => continue,
+ .bool_true => {},
+ }
+ const clobber_name = clobbers_ty.structFieldName(field_index, zcu).toSlice(ip).?;
+ if (std.mem.eql(u8, clobber_name, "memory")) continue;
+ if (std.mem.eql(u8, clobber_name, "nzcv")) continue;
+ const clobber_ra = Register.parse(clobber_name).?.alias;
+ const live_vi = isel.live_registers.getPtr(clobber_ra);
+ switch (live_vi.*) {
+ _ => {
+ if (!try isel.fill(clobber_ra))
+ return isel.fail("unable to clobber: '{s}'", .{clobber_name});
+ assert(live_vi.* == .free);
+ live_vi.* = .allocating;
+ },
+ .allocating => {},
+ .free => unreachable,
+ }
+ }
+
+ as.source = std.mem.sliceAsBytes(isel.air.extra.items[extra_index..])[0..extra.data.source_len :0];
+ const asm_start = isel.instructions.items.len;
+ while (as.nextInstruction() catch |err| switch (err) {
+ error.InvalidSyntax => {
+ const remaining_source = std.mem.span(as.source);
+ return isel.fail("unable to assemble: '{s}'", .{std.mem.trim(
+ u8,
+ as.source[0 .. std.mem.indexOfScalar(u8, remaining_source, '\n') orelse remaining_source.len],
+ &std.ascii.whitespace,
+ )});
+ },
+ }) |instruction| try isel.emit(instruction);
+ std.mem.reverse(codegen.aarch64.encoding.Instruction, isel.instructions.items[asm_start..]);
+
+ extra_index = inputs_extra_index;
+ for (input_mats) |input_mat| {
+ const extra_bytes = std.mem.sliceAsBytes(isel.air.extra.items[extra_index..]);
+ const constraint = std.mem.sliceTo(extra_bytes, 0);
+ const name = std.mem.sliceTo(extra_bytes[constraint.len + 1 ..], 0);
+ // This equation accounts for the fact that even if we have exactly 4 bytes
+ // for the string, we still use the next u32 for the null terminator.
+ extra_index += (constraint.len + name.len + (2 + 3)) / 4;
+
+ if (std.mem.startsWith(u8, constraint, "{") and std.mem.endsWith(u8, constraint, "}")) {
+ try input_mat.vi.liveOut(isel, input_mat.ra);
+ } else if (std.mem.eql(u8, constraint, "r")) {
+ try input_mat.finish(isel);
+ } else if (std.mem.eql(u8, name, "_")) {
+ try input_mat.vi.mat(isel);
+ } else unreachable;
+ }
+
+ for (0..clobbers_ty.structFieldCount(zcu)) |field_index| {
+ switch (switch (clobbers.storage) {
+ .bytes => unreachable,
+ .elems => |elems| elems[field_index],
+ .repeated_elem => |repeated_elem| repeated_elem,
+ }) {
+ else => unreachable,
+ .bool_false => continue,
+ .bool_true => {},
+ }
+ const clobber_name = clobbers_ty.structFieldName(field_index, zcu).toSlice(ip).?;
+ if (std.mem.eql(u8, clobber_name, "memory")) continue;
+ if (std.mem.eql(u8, clobber_name, "cc")) continue;
+ isel.freeReg(Register.parse(clobber_name).?.alias);
+ }
+
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .bit_and, .bit_or, .xor, .bool_and, .bool_or => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| {
+ defer res_vi.value.deref(isel);
+
+ const bin_op = air.data(air.inst_index).bin_op;
+ const ty = isel.air.typeOf(bin_op.lhs, ip);
+ const int_info: std.builtin.Type.Int = if (ty.toIntern() == .bool_type)
+ .{ .signedness = .unsigned, .bits = 1 }
+ else if (ty.isAbiInt(zcu))
+ ty.intInfo(zcu)
+ else
+ return isel.fail("bad {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+ if (int_info.bits > 128) return isel.fail("too big {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ var offset = res_vi.value.size(isel);
+ while (offset > 0) {
+ const size = @min(offset, 8);
+ offset -= size;
+ var res_part_it = res_vi.value.field(ty, offset, size);
+ const res_part_vi = try res_part_it.only(isel);
+ const res_part_ra = try res_part_vi.?.defReg(isel) orelse continue;
+ var lhs_part_it = lhs_vi.field(ty, offset, size);
+ const lhs_part_vi = try lhs_part_it.only(isel);
+ const lhs_part_mat = try lhs_part_vi.?.matReg(isel);
+ var rhs_part_it = rhs_vi.field(ty, offset, size);
+ const rhs_part_vi = try rhs_part_it.only(isel);
+ const rhs_part_mat = try rhs_part_vi.?.matReg(isel);
+ try isel.emit(switch (air_tag) {
+ else => unreachable,
+ .bit_and, .bool_and => switch (size) {
+ else => unreachable,
+ 1, 2, 4 => .@"and"(res_part_ra.w(), lhs_part_mat.ra.w(), .{ .register = rhs_part_mat.ra.w() }),
+ 8 => .@"and"(res_part_ra.x(), lhs_part_mat.ra.x(), .{ .register = rhs_part_mat.ra.x() }),
+ },
+ .bit_or, .bool_or => switch (size) {
+ else => unreachable,
+ 1, 2, 4 => .orr(res_part_ra.w(), lhs_part_mat.ra.w(), .{ .register = rhs_part_mat.ra.w() }),
+ 8 => .orr(res_part_ra.x(), lhs_part_mat.ra.x(), .{ .register = rhs_part_mat.ra.x() }),
+ },
+ .xor => switch (size) {
+ else => unreachable,
+ 1, 2, 4 => .eor(res_part_ra.w(), lhs_part_mat.ra.w(), .{ .register = rhs_part_mat.ra.w() }),
+ 8 => .eor(res_part_ra.x(), lhs_part_mat.ra.x(), .{ .register = rhs_part_mat.ra.x() }),
+ },
+ });
+ try rhs_part_mat.finish(isel);
+ try lhs_part_mat.finish(isel);
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .shr, .shr_exact, .shl, .shl_exact => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| unused: {
+ defer res_vi.value.deref(isel);
+
+ const bin_op = air.data(air.inst_index).bin_op;
+ const ty = isel.air.typeOf(bin_op.lhs, ip);
+ if (!ty.isAbiInt(zcu)) return isel.fail("bad {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+ const int_info = ty.intInfo(zcu);
+ switch (int_info.bits) {
+ 0 => unreachable,
+ 1...64 => |bits| {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ switch (air_tag) {
+ else => unreachable,
+ .shr, .shr_exact, .shl_exact => {},
+ .shl => switch (bits) {
+ else => unreachable,
+ 1...31 => try isel.emit(switch (int_info.signedness) {
+ .signed => .sbfm(res_ra.w(), res_ra.w(), .{
+ .N = .word,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }),
+ .unsigned => .ubfm(res_ra.w(), res_ra.w(), .{
+ .N = .word,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }),
+ }),
+ 32 => {},
+ 33...63 => try isel.emit(switch (int_info.signedness) {
+ .signed => .sbfm(res_ra.x(), res_ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }),
+ .unsigned => .ubfm(res_ra.x(), res_ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }),
+ }),
+ 64 => {},
+ },
+ }
+
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ try isel.emit(switch (air_tag) {
+ else => unreachable,
+ .shr, .shr_exact => switch (bits) {
+ else => unreachable,
+ 1...32 => switch (int_info.signedness) {
+ .signed => .asrv(res_ra.w(), lhs_mat.ra.w(), rhs_mat.ra.w()),
+ .unsigned => .lsrv(res_ra.w(), lhs_mat.ra.w(), rhs_mat.ra.w()),
+ },
+ 33...64 => switch (int_info.signedness) {
+ .signed => .asrv(res_ra.x(), lhs_mat.ra.x(), rhs_mat.ra.x()),
+ .unsigned => .lsrv(res_ra.x(), lhs_mat.ra.x(), rhs_mat.ra.x()),
+ },
+ },
+ .shl, .shl_exact => switch (bits) {
+ else => unreachable,
+ 1...32 => .lslv(res_ra.w(), lhs_mat.ra.w(), rhs_mat.ra.w()),
+ 33...64 => .lslv(res_ra.x(), lhs_mat.ra.x(), rhs_mat.ra.x()),
+ },
+ });
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ },
+ 65...128 => |bits| {
+ var res_hi64_it = res_vi.value.field(ty, 8, 8);
+ const res_hi64_vi = try res_hi64_it.only(isel);
+ const res_hi64_ra = try res_hi64_vi.?.defReg(isel);
+ var res_lo64_it = res_vi.value.field(ty, 0, 8);
+ const res_lo64_vi = try res_lo64_it.only(isel);
+ const res_lo64_ra = try res_lo64_vi.?.defReg(isel);
+ if (res_hi64_ra == null and res_lo64_ra == null) break :unused;
+ if (res_hi64_ra) |res_ra| switch (air_tag) {
+ else => unreachable,
+ .shr, .shr_exact, .shl_exact => {},
+ .shl => switch (bits) {
+ else => unreachable,
+ 65...127 => try isel.emit(switch (int_info.signedness) {
+ .signed => .sbfm(res_ra.x(), res_ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(bits - 64 - 1),
+ }),
+ .unsigned => .ubfm(res_ra.x(), res_ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(bits - 64 - 1),
+ }),
+ }),
+ 128 => {},
+ },
+ };
+
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const lhs_hi64_mat = lhs_hi64_mat: {
+ const res_lock: RegLock = switch (air_tag) {
+ else => unreachable,
+ .shr, .shr_exact => switch (int_info.signedness) {
+ .signed => if (res_lo64_ra) |res_ra| isel.lockReg(res_ra) else .empty,
+ .unsigned => .empty,
+ },
+ .shl, .shl_exact => .empty,
+ };
+ defer res_lock.unlock(isel);
+ var lhs_hi64_it = lhs_vi.field(ty, 8, 8);
+ const lhs_hi64_vi = try lhs_hi64_it.only(isel);
+ break :lhs_hi64_mat try lhs_hi64_vi.?.matReg(isel);
+ };
+ var lhs_lo64_it = lhs_vi.field(ty, 0, 8);
+ const lhs_lo64_vi = try lhs_lo64_it.only(isel);
+ const lhs_lo64_mat = try lhs_lo64_vi.?.matReg(isel);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ const lo64_ra = lo64_ra: {
+ const res_lock: RegLock = switch (air_tag) {
+ else => unreachable,
+ .shr, .shr_exact => switch (int_info.signedness) {
+ .signed => if (res_lo64_ra) |res_ra| isel.tryLockReg(res_ra) else .empty,
+ .unsigned => .empty,
+ },
+ .shl, .shl_exact => if (res_hi64_ra) |res_ra| isel.tryLockReg(res_ra) else .empty,
+ };
+ defer res_lock.unlock(isel);
+ break :lo64_ra try isel.allocIntReg();
+ };
+ defer isel.freeReg(lo64_ra);
+ const hi64_ra = hi64_ra: {
+ const res_lock: RegLock = switch (air_tag) {
+ else => unreachable,
+ .shr, .shr_exact => if (res_lo64_ra) |res_ra| isel.tryLockReg(res_ra) else .empty,
+ .shl, .shl_exact => .empty,
+ };
+ defer res_lock.unlock(isel);
+ break :hi64_ra try isel.allocIntReg();
+ };
+ defer isel.freeReg(hi64_ra);
+ switch (air_tag) {
+ else => unreachable,
+ .shr, .shr_exact => {
+ if (res_hi64_ra) |res_ra| switch (int_info.signedness) {
+ .signed => {
+ try isel.emit(.csel(res_ra.x(), hi64_ra.x(), lo64_ra.x(), .eq));
+ try isel.emit(.sbfm(lo64_ra.x(), lhs_hi64_mat.ra.x(), .{
+ .N = .doubleword,
+ .immr = @intCast(bits - 64 - 1),
+ .imms = @intCast(bits - 64 - 1),
+ }));
+ },
+ .unsigned => try isel.emit(.csel(res_ra.x(), hi64_ra.x(), .xzr, .eq)),
+ };
+ if (res_lo64_ra) |res_ra| try isel.emit(.csel(res_ra.x(), lo64_ra.x(), hi64_ra.x(), .eq));
+ switch (int_info.signedness) {
+ .signed => try isel.emit(.asrv(hi64_ra.x(), lhs_hi64_mat.ra.x(), rhs_mat.ra.x())),
+ .unsigned => try isel.emit(.lsrv(hi64_ra.x(), lhs_hi64_mat.ra.x(), rhs_mat.ra.x())),
+ }
+ },
+ .shl, .shl_exact => {
+ if (res_lo64_ra) |res_ra| try isel.emit(.csel(res_ra.x(), lo64_ra.x(), .xzr, .eq));
+ if (res_hi64_ra) |res_ra| try isel.emit(.csel(res_ra.x(), hi64_ra.x(), lo64_ra.x(), .eq));
+ try isel.emit(.lslv(lo64_ra.x(), lhs_lo64_mat.ra.x(), rhs_mat.ra.x()));
+ },
+ }
+ try isel.emit(.ands(.wzr, rhs_mat.ra.w(), .{ .immediate = .{ .N = .word, .immr = 32 - 6, .imms = 0 } }));
+ switch (air_tag) {
+ else => unreachable,
+ .shr, .shr_exact => if (res_lo64_ra) |_| {
+ try isel.emit(.orr(
+ lo64_ra.x(),
+ lo64_ra.x(),
+ .{ .shifted_register = .{ .register = hi64_ra.x(), .shift = .{ .lsl = 1 } } },
+ ));
+ try isel.emit(.lslv(hi64_ra.x(), lhs_hi64_mat.ra.x(), hi64_ra.x()));
+ try isel.emit(.lsrv(lo64_ra.x(), lhs_lo64_mat.ra.x(), rhs_mat.ra.x()));
+ try isel.emit(.orn(hi64_ra.w(), .wzr, .{ .register = rhs_mat.ra.w() }));
+ },
+ .shl, .shl_exact => if (res_hi64_ra) |_| {
+ try isel.emit(.orr(
+ hi64_ra.x(),
+ hi64_ra.x(),
+ .{ .shifted_register = .{ .register = lo64_ra.x(), .shift = .{ .lsr = 1 } } },
+ ));
+ try isel.emit(.lsrv(lo64_ra.x(), lhs_lo64_mat.ra.x(), lo64_ra.x()));
+ try isel.emit(.lslv(hi64_ra.x(), lhs_hi64_mat.ra.x(), rhs_mat.ra.x()));
+ try isel.emit(.orn(lo64_ra.w(), .wzr, .{ .register = rhs_mat.ra.w() }));
+ },
+ }
+ try rhs_mat.finish(isel);
+ try lhs_lo64_mat.finish(isel);
+ try lhs_hi64_mat.finish(isel);
+ break :unused;
+ },
+ else => return isel.fail("too big {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) }),
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .not => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| {
+ defer res_vi.value.deref(isel);
+
+ const ty_op = air.data(air.inst_index).ty_op;
+ const ty = ty_op.ty.toType();
+ const int_info: std.builtin.Type.Int = int_info: {
+ if (ty_op.ty == .bool_type) break :int_info .{ .signedness = .unsigned, .bits = 1 };
+ if (!ty.isAbiInt(zcu)) return isel.fail("bad {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+ break :int_info ty.intInfo(zcu);
+ };
+ if (int_info.bits > 128) return isel.fail("too big {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+
+ const src_vi = try isel.use(ty_op.operand);
+ var offset = res_vi.value.size(isel);
+ while (offset > 0) {
+ const size = @min(offset, 8);
+ offset -= size;
+ var res_part_it = res_vi.value.field(ty, offset, size);
+ const res_part_vi = try res_part_it.only(isel);
+ const res_part_ra = try res_part_vi.?.defReg(isel) orelse continue;
+ var src_part_it = src_vi.field(ty, offset, size);
+ const src_part_vi = try src_part_it.only(isel);
+ const src_part_mat = try src_part_vi.?.matReg(isel);
+ try isel.emit(switch (int_info.signedness) {
+ .signed => switch (size) {
+ else => unreachable,
+ 1, 2, 4 => .orn(res_part_ra.w(), .wzr, .{ .register = src_part_mat.ra.w() }),
+ 8 => .orn(res_part_ra.x(), .xzr, .{ .register = src_part_mat.ra.x() }),
+ },
+ .unsigned => switch (@min(int_info.bits - 8 * offset, 64)) {
+ else => unreachable,
+ 1...31 => |bits| .eor(res_part_ra.w(), src_part_mat.ra.w(), .{ .immediate = .{
+ .N = .word,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ } }),
+ 32 => .orn(res_part_ra.w(), .wzr, .{ .register = src_part_mat.ra.w() }),
+ 33...63 => |bits| .eor(res_part_ra.x(), src_part_mat.ra.x(), .{ .immediate = .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ } }),
+ 64 => .orn(res_part_ra.x(), .xzr, .{ .register = src_part_mat.ra.x() }),
+ },
+ });
+ try src_part_mat.finish(isel);
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .bitcast => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |dst_vi| unused: {
+ defer dst_vi.value.deref(isel);
+ const ty_op = air.data(air.inst_index).ty_op;
+ const dst_ty = ty_op.ty.toType();
+ const dst_tag = dst_ty.zigTypeTag(zcu);
+ const src_ty = isel.air.typeOf(ty_op.operand, ip);
+ const src_tag = src_ty.zigTypeTag(zcu);
+ if (dst_ty.isAbiInt(zcu) and (src_tag == .bool or src_ty.isAbiInt(zcu))) {
+ const dst_int_info = dst_ty.intInfo(zcu);
+ const src_int_info: std.builtin.Type.Int = if (src_tag == .bool) .{ .signedness = undefined, .bits = 1 } else src_ty.intInfo(zcu);
+ assert(dst_int_info.bits == src_int_info.bits);
+ if (dst_tag != .@"struct" and src_tag != .@"struct" and src_tag != .bool and dst_int_info.signedness == src_int_info.signedness) {
+ try dst_vi.value.move(isel, ty_op.operand);
+ } else switch (dst_int_info.bits) {
+ 0 => unreachable,
+ 1...31 => |dst_bits| {
+ const dst_ra = try dst_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ try isel.emit(switch (dst_int_info.signedness) {
+ .signed => .sbfm(dst_ra.w(), src_mat.ra.w(), .{
+ .N = .word,
+ .immr = 0,
+ .imms = @intCast(dst_bits - 1),
+ }),
+ .unsigned => .ubfm(dst_ra.w(), src_mat.ra.w(), .{
+ .N = .word,
+ .immr = 0,
+ .imms = @intCast(dst_bits - 1),
+ }),
+ });
+ try src_mat.finish(isel);
+ },
+ 32 => try dst_vi.value.move(isel, ty_op.operand),
+ 33...63 => |dst_bits| {
+ const dst_ra = try dst_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ try isel.emit(switch (dst_int_info.signedness) {
+ .signed => .sbfm(dst_ra.x(), src_mat.ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(dst_bits - 1),
+ }),
+ .unsigned => .ubfm(dst_ra.x(), src_mat.ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(dst_bits - 1),
+ }),
+ });
+ try src_mat.finish(isel);
+ },
+ 64 => try dst_vi.value.move(isel, ty_op.operand),
+ 65...127 => |dst_bits| {
+ const src_vi = try isel.use(ty_op.operand);
+ var dst_hi64_it = dst_vi.value.field(dst_ty, 8, 8);
+ const dst_hi64_vi = try dst_hi64_it.only(isel);
+ if (try dst_hi64_vi.?.defReg(isel)) |dst_hi64_ra| {
+ var src_hi64_it = src_vi.field(src_ty, 8, 8);
+ const src_hi64_vi = try src_hi64_it.only(isel);
+ const src_hi64_mat = try src_hi64_vi.?.matReg(isel);
+ try isel.emit(switch (dst_int_info.signedness) {
+ .signed => .sbfm(dst_hi64_ra.x(), src_hi64_mat.ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(dst_bits - 64 - 1),
+ }),
+ .unsigned => .ubfm(dst_hi64_ra.x(), src_hi64_mat.ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(dst_bits - 64 - 1),
+ }),
+ });
+ try src_hi64_mat.finish(isel);
+ }
+ var dst_lo64_it = dst_vi.value.field(dst_ty, 0, 8);
+ const dst_lo64_vi = try dst_lo64_it.only(isel);
+ if (try dst_lo64_vi.?.defReg(isel)) |dst_lo64_ra| {
+ var src_lo64_it = src_vi.field(src_ty, 0, 8);
+ const src_lo64_vi = try src_lo64_it.only(isel);
+ try src_lo64_vi.?.liveOut(isel, dst_lo64_ra);
+ }
+ },
+ 128 => try dst_vi.value.move(isel, ty_op.operand),
+ else => return isel.fail("bad {s} {f} {f}", .{ @tagName(air_tag), isel.fmtType(dst_ty), isel.fmtType(src_ty) }),
+ }
+ } else if ((dst_ty.isPtrAtRuntime(zcu) or dst_ty.isAbiInt(zcu)) and (src_ty.isPtrAtRuntime(zcu) or src_ty.isAbiInt(zcu))) {
+ try dst_vi.value.move(isel, ty_op.operand);
+ } else if (dst_ty.isSliceAtRuntime(zcu) and src_ty.isSliceAtRuntime(zcu)) {
+ try dst_vi.value.move(isel, ty_op.operand);
+ } else if (dst_tag == .error_union and src_tag == .error_union) {
+ assert(dst_ty.errorUnionSet(zcu).hasRuntimeBitsIgnoreComptime(zcu) ==
+ src_ty.errorUnionSet(zcu).hasRuntimeBitsIgnoreComptime(zcu));
+ if (dst_ty.errorUnionPayload(zcu).toIntern() == src_ty.errorUnionPayload(zcu).toIntern()) {
+ try dst_vi.value.move(isel, ty_op.operand);
+ } else return isel.fail("bad {s} {f} {f}", .{ @tagName(air_tag), isel.fmtType(dst_ty), isel.fmtType(src_ty) });
+ } else if (dst_tag == .float and src_tag == .float) {
+ assert(dst_ty.floatBits(isel.target) == src_ty.floatBits(isel.target));
+ try dst_vi.value.move(isel, ty_op.operand);
+ } else if (dst_ty.isAbiInt(zcu) and src_tag == .float) {
+ const dst_int_info = dst_ty.intInfo(zcu);
+ assert(dst_int_info.bits == src_ty.floatBits(isel.target));
+ switch (dst_int_info.bits) {
+ else => unreachable,
+ 16 => {
+ const dst_ra = try dst_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ switch (dst_int_info.signedness) {
+ .signed => try isel.emit(.smov(dst_ra.w(), src_mat.ra.@"h[]"(0))),
+ .unsigned => try isel.emit(if (isel.target.cpu.has(.aarch64, .fullfp16))
+ .fmov(dst_ra.w(), .{ .register = src_mat.ra.h() })
+ else
+ .umov(dst_ra.w(), src_mat.ra.@"h[]"(0))),
+ }
+ try src_mat.finish(isel);
+ },
+ 32 => {
+ const dst_ra = try dst_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ try isel.emit(.fmov(dst_ra.w(), .{ .register = src_mat.ra.s() }));
+ try src_mat.finish(isel);
+ },
+ 64 => {
+ const dst_ra = try dst_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ try isel.emit(.fmov(dst_ra.x(), .{ .register = src_mat.ra.d() }));
+ try src_mat.finish(isel);
+ },
+ 80 => switch (dst_int_info.signedness) {
+ .signed => {
+ const src_vi = try isel.use(ty_op.operand);
+ var dst_hi16_it = dst_vi.value.field(dst_ty, 8, 8);
+ const dst_hi16_vi = try dst_hi16_it.only(isel);
+ if (try dst_hi16_vi.?.defReg(isel)) |dst_hi16_ra| {
+ var src_hi16_it = src_vi.field(src_ty, 8, 8);
+ const src_hi16_vi = try src_hi16_it.only(isel);
+ const src_hi16_mat = try src_hi16_vi.?.matReg(isel);
+ try isel.emit(.sbfm(dst_hi16_ra.x(), src_hi16_mat.ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = 16 - 1,
+ }));
+ try src_hi16_mat.finish(isel);
+ }
+ var dst_lo64_it = dst_vi.value.field(dst_ty, 0, 8);
+ const dst_lo64_vi = try dst_lo64_it.only(isel);
+ if (try dst_lo64_vi.?.defReg(isel)) |dst_lo64_ra| {
+ var src_lo64_it = src_vi.field(src_ty, 0, 8);
+ const src_lo64_vi = try src_lo64_it.only(isel);
+ try src_lo64_vi.?.liveOut(isel, dst_lo64_ra);
+ }
+ },
+ else => try dst_vi.value.move(isel, ty_op.operand),
+ },
+ 128 => {
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ var dst_hi64_it = dst_vi.value.field(dst_ty, 8, 8);
+ const dst_hi64_vi = try dst_hi64_it.only(isel);
+ if (try dst_hi64_vi.?.defReg(isel)) |dst_hi64_ra| try isel.emit(.fmov(dst_hi64_ra.x(), .{ .register = src_mat.ra.@"d[]"(1) }));
+ var dst_lo64_it = dst_vi.value.field(dst_ty, 0, 8);
+ const dst_lo64_vi = try dst_lo64_it.only(isel);
+ if (try dst_lo64_vi.?.defReg(isel)) |dst_lo64_ra| try isel.emit(.fmov(dst_lo64_ra.x(), .{ .register = src_mat.ra.d() }));
+ try src_mat.finish(isel);
+ },
+ }
+ } else if (dst_tag == .float and src_ty.isAbiInt(zcu)) {
+ const src_int_info = src_ty.intInfo(zcu);
+ assert(dst_ty.floatBits(isel.target) == src_int_info.bits);
+ switch (src_int_info.bits) {
+ else => unreachable,
+ 16 => {
+ const dst_ra = try dst_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ try isel.emit(.fmov(
+ if (isel.target.cpu.has(.aarch64, .fullfp16)) dst_ra.h() else dst_ra.s(),
+ .{ .register = src_mat.ra.w() },
+ ));
+ try src_mat.finish(isel);
+ },
+ 32 => {
+ const dst_ra = try dst_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ try isel.emit(.fmov(dst_ra.s(), .{ .register = src_mat.ra.w() }));
+ try src_mat.finish(isel);
+ },
+ 64 => {
+ const dst_ra = try dst_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ try isel.emit(.fmov(dst_ra.d(), .{ .register = src_mat.ra.x() }));
+ try src_mat.finish(isel);
+ },
+ 80 => switch (src_int_info.signedness) {
+ .signed => {
+ const src_vi = try isel.use(ty_op.operand);
+ var dst_hi16_it = dst_vi.value.field(dst_ty, 8, 8);
+ const dst_hi16_vi = try dst_hi16_it.only(isel);
+ if (try dst_hi16_vi.?.defReg(isel)) |dst_hi16_ra| {
+ var src_hi16_it = src_vi.field(src_ty, 8, 8);
+ const src_hi16_vi = try src_hi16_it.only(isel);
+ const src_hi16_mat = try src_hi16_vi.?.matReg(isel);
+ try isel.emit(.ubfm(dst_hi16_ra.x(), src_hi16_mat.ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = 16 - 1,
+ }));
+ try src_hi16_mat.finish(isel);
+ }
+ var dst_lo64_it = dst_vi.value.field(dst_ty, 0, 8);
+ const dst_lo64_vi = try dst_lo64_it.only(isel);
+ if (try dst_lo64_vi.?.defReg(isel)) |dst_lo64_ra| {
+ var src_lo64_it = src_vi.field(src_ty, 0, 8);
+ const src_lo64_vi = try src_lo64_it.only(isel);
+ try src_lo64_vi.?.liveOut(isel, dst_lo64_ra);
+ }
+ },
+ else => try dst_vi.value.move(isel, ty_op.operand),
+ },
+ 128 => {
+ const dst_ra = try dst_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ var src_hi64_it = src_vi.field(src_ty, 8, 8);
+ const src_hi64_vi = try src_hi64_it.only(isel);
+ const src_hi64_mat = try src_hi64_vi.?.matReg(isel);
+ try isel.emit(.fmov(dst_ra.@"d[]"(1), .{ .register = src_hi64_mat.ra.x() }));
+ try src_hi64_mat.finish(isel);
+ var src_lo64_it = src_vi.field(src_ty, 0, 8);
+ const src_lo64_vi = try src_lo64_it.only(isel);
+ const src_lo64_mat = try src_lo64_vi.?.matReg(isel);
+ try isel.emit(.fmov(dst_ra.d(), .{ .register = src_lo64_mat.ra.x() }));
+ try src_lo64_mat.finish(isel);
+ },
+ }
+ } else if (dst_ty.isAbiInt(zcu) and src_tag == .array and src_ty.childType(zcu).isAbiInt(zcu)) {
+ const dst_int_info = dst_ty.intInfo(zcu);
+ const src_child_int_info = src_ty.childType(zcu).intInfo(zcu);
+ const src_len = src_ty.arrayLenIncludingSentinel(zcu);
+ assert(dst_int_info.bits == src_child_int_info.bits * src_len);
+ const src_child_size = src_ty.childType(zcu).abiSize(zcu);
+ if (8 * src_child_size == src_child_int_info.bits) {
+ try dst_vi.value.defAddr(isel, dst_ty, dst_int_info, comptime &.initFill(.free)) orelse break :unused;
+
+ try call.prepareReturn(isel);
+ try call.finishReturn(isel);
+
+ try call.prepareCallee(isel);
+ try isel.global_relocs.append(gpa, .{
+ .global = "memcpy",
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+ try call.finishCallee(isel);
+
+ try call.prepareParams(isel);
+ const src_vi = try isel.use(ty_op.operand);
+ try isel.movImmediate(.x2, src_child_size * src_len);
+ try call.paramAddress(isel, src_vi, .r1);
+ try call.paramAddress(isel, dst_vi.value, .r0);
+ try call.finishParams(isel);
+ } else return isel.fail("bad {s} {f} {f}", .{ @tagName(air_tag), isel.fmtType(dst_ty), isel.fmtType(src_ty) });
+ } else if (dst_tag == .array and dst_ty.childType(zcu).isAbiInt(zcu) and src_ty.isAbiInt(zcu)) {
+ const dst_child_int_info = dst_ty.childType(zcu).intInfo(zcu);
+ const src_int_info = src_ty.intInfo(zcu);
+ const dst_len = dst_ty.arrayLenIncludingSentinel(zcu);
+ assert(dst_child_int_info.bits * dst_len == src_int_info.bits);
+ const dst_child_size = dst_ty.childType(zcu).abiSize(zcu);
+ if (8 * dst_child_size == dst_child_int_info.bits) {
+ try dst_vi.value.defAddr(isel, dst_ty, null, comptime &.initFill(.free)) orelse break :unused;
+
+ try call.prepareReturn(isel);
+ try call.finishReturn(isel);
+
+ try call.prepareCallee(isel);
+ try isel.global_relocs.append(gpa, .{
+ .global = "memcpy",
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+ try call.finishCallee(isel);
+
+ try call.prepareParams(isel);
+ const src_vi = try isel.use(ty_op.operand);
+ try isel.movImmediate(.x2, dst_child_size * dst_len);
+ try call.paramAddress(isel, src_vi, .r1);
+ try call.paramAddress(isel, dst_vi.value, .r0);
+ try call.finishParams(isel);
+ } else return isel.fail("bad {s} {f} {f}", .{ @tagName(air_tag), isel.fmtType(dst_ty), isel.fmtType(src_ty) });
+ } else return isel.fail("bad {s} {f} {f}", .{ @tagName(air_tag), isel.fmtType(dst_ty), isel.fmtType(src_ty) });
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .block => {
+ const ty_pl = air.data(air.inst_index).ty_pl;
+ const extra = isel.air.extraData(Air.Block, ty_pl.payload);
+
+ if (ty_pl.ty != .noreturn_type) {
+ isel.blocks.putAssumeCapacityNoClobber(air.inst_index, .{
+ .live_registers = isel.live_registers,
+ .target_label = @intCast(isel.instructions.items.len),
+ });
+ }
+ try isel.body(@ptrCast(isel.air.extra.items[extra.end..][0..extra.data.body_len]));
+ if (ty_pl.ty != .noreturn_type) {
+ const block_entry = isel.blocks.pop().?;
+ assert(block_entry.key == air.inst_index);
+ if (isel.live_values.fetchRemove(air.inst_index)) |result_vi| result_vi.value.deref(isel);
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .loop => {
+ const ty_pl = air.data(air.inst_index).ty_pl;
+ const extra = isel.air.extraData(Air.Block, ty_pl.payload);
+ const loops = isel.loops.values();
+ const loop_index = isel.loops.getIndex(air.inst_index).?;
+ const loop = &loops[loop_index];
+
+ tracking_log.debug("{f}", .{
+ isel.fmtDom(air.inst_index, loop.dom, @intCast(isel.blocks.count())),
+ });
+ tracking_log.debug("{f}", .{isel.fmtLoopLive(air.inst_index)});
+ assert(loop.depth == isel.blocks.count());
+
+ if (false) {
+ // loops are dumb...
+ for (isel.loop_live.list.items[loop.live..loops[loop_index + 1].live]) |live_inst| {
+ const live_vi = try isel.use(live_inst.toRef());
+ try live_vi.mat(isel);
+ }
+
+ // IT'S DOM TIME!!!
+ for (isel.blocks.values(), 0..) |*block, dom_index| {
+ if (@as(u1, @truncate(isel.dom.items[
+ loop.dom + dom_index / @bitSizeOf(DomInt)
+ ] >> @truncate(dom_index))) == 0) continue;
+ var live_reg_it = block.live_registers.iterator();
+ while (live_reg_it.next()) |live_reg_entry| switch (live_reg_entry.value.*) {
+ _ => |live_vi| try live_vi.mat(isel),
+ .allocating => unreachable,
+ .free => {},
+ };
+ }
+ }
+
+ loop.live_registers = isel.live_registers;
+ loop.repeat_list = Loop.empty_list;
+ try isel.body(@ptrCast(isel.air.extra.items[extra.end..][0..extra.data.body_len]));
+ try isel.merge(&loop.live_registers, .{ .fill_extra = true });
+
+ var repeat_label = loop.repeat_list;
+ assert(repeat_label != Loop.empty_list);
+ while (repeat_label != Loop.empty_list) {
+ const instruction = &isel.instructions.items[repeat_label];
+ const next_repeat_label = instruction.*;
+ instruction.* = .b(-@as(i28, @intCast((isel.instructions.items.len - 1 - repeat_label) << 2)));
+ repeat_label = @bitCast(next_repeat_label);
+ }
+
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .repeat => {
+ const repeat = air.data(air.inst_index).repeat;
+ try isel.loops.getPtr(repeat.loop_inst).?.branch(isel);
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .br => {
+ const br = air.data(air.inst_index).br;
+ const block = isel.blocks.getPtr(br.block_inst).?;
+ try block.branch(isel);
+ if (isel.live_values.get(br.block_inst)) |dst_vi| try dst_vi.move(isel, br.operand);
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .trap => {
+ try isel.emit(.brk(0x1));
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .breakpoint => {
+ try isel.emit(.brk(0xf000));
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .call => {
+ const pl_op = air.data(air.inst_index).pl_op;
+ const extra = isel.air.extraData(Air.Call, pl_op.payload);
+ const args: []const Air.Inst.Ref = @ptrCast(isel.air.extra.items[extra.end..][0..extra.data.args_len]);
+
+ try call.prepareReturn(isel);
+ const maybe_def_ret_vi = isel.live_values.fetchRemove(air.inst_index);
+ var maybe_ret_addr_vi: ?Value.Index = null;
+ if (maybe_def_ret_vi) |def_ret_vi| {
+ defer def_ret_vi.value.deref(isel);
+
+ var ret_it: CallAbiIterator = .init;
+ const ret_vi = try ret_it.ret(isel, isel.air.typeOfIndex(air.inst_index, ip));
+ defer ret_vi.?.deref(isel);
+ switch (ret_vi.?.parent(isel)) {
+ .unallocated, .stack_slot => if (ret_vi.?.hint(isel)) |ret_ra| {
+ try call.returnLiveIn(isel, def_ret_vi.value, ret_ra);
+ } else {
+ var def_ret_part_it = def_ret_vi.value.parts(isel);
+ var ret_part_it = ret_vi.?.parts(isel);
+ while (def_ret_part_it.next()) |ret_part_vi| {
+ try call.returnLiveIn(isel, ret_part_vi, ret_part_it.next().?.hint(isel).?);
+ }
+ },
+ .value, .constant => unreachable,
+ .address => |address_vi| {
+ maybe_ret_addr_vi = address_vi;
+ _ = try def_ret_vi.value.defAddr(
+ isel,
+ isel.air.typeOfIndex(air.inst_index, ip),
+ null,
+ &call.caller_saved_regs,
+ );
+ },
+ }
+ }
+ try call.finishReturn(isel);
+
+ try call.prepareCallee(isel);
+ if (pl_op.operand.toInterned()) |ct_callee| {
+ try isel.nav_relocs.append(gpa, switch (ip.indexToKey(ct_callee)) {
+ else => unreachable,
+ inline .@"extern", .func => |func| .{
+ .nav = func.owner_nav,
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ },
+ .ptr => |ptr| .{
+ .nav = ptr.base_addr.nav,
+ .reloc = .{
+ .label = @intCast(isel.instructions.items.len),
+ .addend = ptr.byte_offset,
+ },
+ },
+ });
+ try isel.emit(.bl(0));
+ } else {
+ const callee_vi = try isel.use(pl_op.operand);
+ const callee_mat = try callee_vi.matReg(isel);
+ try isel.emit(.blr(callee_mat.ra.x()));
+ try callee_mat.finish(isel);
+ }
+ try call.finishCallee(isel);
+
+ try call.prepareParams(isel);
+ if (maybe_ret_addr_vi) |ret_addr_vi| try call.paramAddress(
+ isel,
+ maybe_def_ret_vi.?.value,
+ ret_addr_vi.hint(isel).?,
+ );
+ var param_it: CallAbiIterator = .init;
+ for (args) |arg| {
+ const param_vi = try param_it.param(isel, isel.air.typeOf(arg, ip)) orelse continue;
+ defer param_vi.deref(isel);
+ const arg_vi = try isel.use(arg);
+ const passed_vi = switch (param_vi.parent(isel)) {
+ .unallocated, .stack_slot => param_vi,
+ .value, .constant => unreachable,
+ .address => |address_vi| {
+ try call.paramAddress(isel, arg_vi, address_vi.hint(isel).?);
+ continue;
+ },
+ };
+ if (passed_vi.hint(isel)) |param_ra| {
+ try call.paramLiveOut(isel, arg_vi, param_ra);
+ } else {
+ var param_part_it = passed_vi.parts(isel);
+ var arg_part_it = arg_vi.parts(isel);
+ if (arg_part_it.only()) |_| {
+ try isel.values.ensureUnusedCapacity(isel.pt.zcu.gpa, param_part_it.remaining);
+ arg_vi.setParts(isel, param_part_it.remaining);
+ while (param_part_it.next()) |param_part_vi| _ = arg_vi.addPart(
+ isel,
+ param_part_vi.get(isel).offset_from_parent,
+ param_part_vi.size(isel),
+ );
+ param_part_it = passed_vi.parts(isel);
+ arg_part_it = arg_vi.parts(isel);
+ }
+ while (param_part_it.next()) |param_part_vi| {
+ const arg_part_vi = arg_part_it.next().?;
+ assert(arg_part_vi.get(isel).offset_from_parent ==
+ param_part_vi.get(isel).offset_from_parent);
+ assert(arg_part_vi.size(isel) == param_part_vi.size(isel));
+ try call.paramLiveOut(isel, arg_part_vi, param_part_vi.hint(isel).?);
+ }
+ }
+ }
+ try call.finishParams(isel);
+
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .clz => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| unused: {
+ defer res_vi.value.deref(isel);
+
+ const ty_op = air.data(air.inst_index).ty_op;
+ const ty = isel.air.typeOf(ty_op.operand, ip);
+ if (!ty.isAbiInt(zcu)) return isel.fail("bad {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+ const int_info = ty.intInfo(zcu);
+ switch (int_info.bits) {
+ 0 => unreachable,
+ 1...64 => {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ try isel.clzLimb(res_ra, int_info, src_mat.ra);
+ try src_mat.finish(isel);
+ },
+ 65...128 => |bits| {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ var src_hi64_it = src_vi.field(ty, 8, 8);
+ const src_hi64_vi = try src_hi64_it.only(isel);
+ const src_hi64_mat = try src_hi64_vi.?.matReg(isel);
+ var src_lo64_it = src_vi.field(ty, 0, 8);
+ const src_lo64_vi = try src_lo64_it.only(isel);
+ const src_lo64_mat = try src_lo64_vi.?.matReg(isel);
+ const lo64_ra = try isel.allocIntReg();
+ defer isel.freeReg(lo64_ra);
+ const hi64_ra = try isel.allocIntReg();
+ defer isel.freeReg(hi64_ra);
+ try isel.emit(.csel(res_ra.w(), lo64_ra.w(), hi64_ra.w(), .eq));
+ try isel.emit(.add(lo64_ra.w(), lo64_ra.w(), .{ .immediate = @intCast(bits - 64) }));
+ try isel.emit(.subs(.xzr, src_hi64_mat.ra.x(), .{ .immediate = 0 }));
+ try isel.clzLimb(hi64_ra, .{ .signedness = int_info.signedness, .bits = bits - 64 }, src_hi64_mat.ra);
+ try isel.clzLimb(lo64_ra, .{ .signedness = .unsigned, .bits = 64 }, src_lo64_mat.ra);
+ try src_hi64_mat.finish(isel);
+ try src_lo64_mat.finish(isel);
+ },
+ else => return isel.fail("too big {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) }),
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .ctz => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| unused: {
+ defer res_vi.value.deref(isel);
+
+ const ty_op = air.data(air.inst_index).ty_op;
+ const ty = isel.air.typeOf(ty_op.operand, ip);
+ if (!ty.isAbiInt(zcu)) return isel.fail("bad {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+ const int_info = ty.intInfo(zcu);
+ switch (int_info.bits) {
+ 0 => unreachable,
+ 1...64 => {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ try isel.ctzLimb(res_ra, int_info, src_mat.ra);
+ try src_mat.finish(isel);
+ },
+ 65...128 => |bits| {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ var src_hi64_it = src_vi.field(ty, 8, 8);
+ const src_hi64_vi = try src_hi64_it.only(isel);
+ const src_hi64_mat = try src_hi64_vi.?.matReg(isel);
+ var src_lo64_it = src_vi.field(ty, 0, 8);
+ const src_lo64_vi = try src_lo64_it.only(isel);
+ const src_lo64_mat = try src_lo64_vi.?.matReg(isel);
+ const lo64_ra = try isel.allocIntReg();
+ defer isel.freeReg(lo64_ra);
+ const hi64_ra = try isel.allocIntReg();
+ defer isel.freeReg(hi64_ra);
+ try isel.emit(.csel(res_ra.w(), lo64_ra.w(), hi64_ra.w(), .ne));
+ try isel.emit(.add(hi64_ra.w(), hi64_ra.w(), .{ .immediate = 64 }));
+ try isel.emit(.subs(.xzr, src_lo64_mat.ra.x(), .{ .immediate = 0 }));
+ try isel.ctzLimb(hi64_ra, .{ .signedness = .unsigned, .bits = 64 }, src_hi64_mat.ra);
+ try isel.ctzLimb(lo64_ra, .{ .signedness = int_info.signedness, .bits = bits - 64 }, src_lo64_mat.ra);
+ try src_hi64_mat.finish(isel);
+ try src_lo64_mat.finish(isel);
+ },
+ else => return isel.fail("too big {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) }),
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .popcount => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| unused: {
+ defer res_vi.value.deref(isel);
+
+ const ty_op = air.data(air.inst_index).ty_op;
+ const ty = isel.air.typeOf(ty_op.operand, ip);
+ if (!ty.isAbiInt(zcu)) return isel.fail("bad {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+ const int_info = ty.intInfo(zcu);
+ if (int_info.bits > 64) return isel.fail("too big {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ const vec_ra = try isel.allocVecReg();
+ defer isel.freeReg(vec_ra);
+ try isel.emit(.umov(res_ra.w(), vec_ra.@"b[]"(0)));
+ switch (int_info.bits) {
+ else => unreachable,
+ 1...8 => {},
+ 9...16 => try isel.emit(.addp(vec_ra.@"8b"(), vec_ra.@"8b"(), .{ .vector = vec_ra.@"8b"() })),
+ 17...64 => try isel.emit(.addv(vec_ra.b(), vec_ra.@"8b"())),
+ }
+ try isel.emit(.cnt(vec_ra.@"8b"(), vec_ra.@"8b"()));
+ switch (int_info.bits) {
+ else => unreachable,
+ 1...31 => |bits| switch (int_info.signedness) {
+ .signed => {
+ try isel.emit(.fmov(vec_ra.s(), .{ .register = res_ra.w() }));
+ try isel.emit(.ubfm(res_ra.w(), src_mat.ra.w(), .{
+ .N = .word,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }));
+ },
+ .unsigned => try isel.emit(.fmov(vec_ra.s(), .{ .register = src_mat.ra.w() })),
+ },
+ 32 => try isel.emit(.fmov(vec_ra.s(), .{ .register = src_mat.ra.w() })),
+ 33...63 => |bits| switch (int_info.signedness) {
+ .signed => {
+ try isel.emit(.fmov(vec_ra.d(), .{ .register = res_ra.x() }));
+ try isel.emit(.ubfm(res_ra.x(), src_mat.ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }));
+ },
+ .unsigned => try isel.emit(.fmov(vec_ra.d(), .{ .register = src_mat.ra.x() })),
+ },
+ 64 => try isel.emit(.fmov(vec_ra.d(), .{ .register = src_mat.ra.x() })),
+ }
+ try src_mat.finish(isel);
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .byte_swap => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| unused: {
+ defer res_vi.value.deref(isel);
+
+ const ty_op = air.data(air.inst_index).ty_op;
+ const ty = ty_op.ty.toType();
+ if (!ty.isAbiInt(zcu)) return isel.fail("bad {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+ const int_info = ty.intInfo(zcu);
+ if (int_info.bits > 64) return isel.fail("too big {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+
+ if (int_info.bits == 8) break :unused try res_vi.value.move(isel, ty_op.operand);
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ switch (int_info.bits) {
+ else => unreachable,
+ 16 => switch (int_info.signedness) {
+ .signed => {
+ try isel.emit(.sbfm(res_ra.w(), res_ra.w(), .{
+ .N = .word,
+ .immr = 32 - 16,
+ .imms = 32 - 1,
+ }));
+ try isel.emit(.rev(res_ra.w(), src_mat.ra.w()));
+ },
+ .unsigned => try isel.emit(.rev16(res_ra.w(), src_mat.ra.w())),
+ },
+ 24 => {
+ switch (int_info.signedness) {
+ .signed => try isel.emit(.sbfm(res_ra.w(), res_ra.w(), .{
+ .N = .word,
+ .immr = 32 - 24,
+ .imms = 32 - 1,
+ })),
+ .unsigned => try isel.emit(.ubfm(res_ra.w(), res_ra.w(), .{
+ .N = .word,
+ .immr = 32 - 24,
+ .imms = 32 - 1,
+ })),
+ }
+ try isel.emit(.rev(res_ra.w(), src_mat.ra.w()));
+ },
+ 32 => try isel.emit(.rev(res_ra.w(), src_mat.ra.w())),
+ 40, 48, 56 => |bits| {
+ switch (int_info.signedness) {
+ .signed => try isel.emit(.sbfm(res_ra.x(), res_ra.x(), .{
+ .N = .doubleword,
+ .immr = @intCast(64 - bits),
+ .imms = 64 - 1,
+ })),
+ .unsigned => try isel.emit(.ubfm(res_ra.x(), res_ra.x(), .{
+ .N = .doubleword,
+ .immr = @intCast(64 - bits),
+ .imms = 64 - 1,
+ })),
+ }
+ try isel.emit(.rev(res_ra.x(), src_mat.ra.x()));
+ },
+ 64 => try isel.emit(.rev(res_ra.x(), src_mat.ra.x())),
+ }
+ try src_mat.finish(isel);
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .bit_reverse => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| unused: {
+ defer res_vi.value.deref(isel);
+
+ const ty_op = air.data(air.inst_index).ty_op;
+ const ty = ty_op.ty.toType();
+ if (!ty.isAbiInt(zcu)) return isel.fail("bad {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+ const int_info = ty.intInfo(zcu);
+ if (int_info.bits > 64) return isel.fail("too big {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ switch (int_info.bits) {
+ else => unreachable,
+ 1...31 => |bits| {
+ switch (int_info.signedness) {
+ .signed => try isel.emit(.sbfm(res_ra.w(), res_ra.w(), .{
+ .N = .word,
+ .immr = @intCast(32 - bits),
+ .imms = 32 - 1,
+ })),
+ .unsigned => try isel.emit(.ubfm(res_ra.w(), res_ra.w(), .{
+ .N = .word,
+ .immr = @intCast(32 - bits),
+ .imms = 32 - 1,
+ })),
+ }
+ try isel.emit(.rbit(res_ra.w(), src_mat.ra.w()));
+ },
+ 32 => try isel.emit(.rbit(res_ra.w(), src_mat.ra.w())),
+ 33...63 => |bits| {
+ switch (int_info.signedness) {
+ .signed => try isel.emit(.sbfm(res_ra.x(), res_ra.x(), .{
+ .N = .doubleword,
+ .immr = @intCast(64 - bits),
+ .imms = 64 - 1,
+ })),
+ .unsigned => try isel.emit(.ubfm(res_ra.x(), res_ra.x(), .{
+ .N = .doubleword,
+ .immr = @intCast(64 - bits),
+ .imms = 64 - 1,
+ })),
+ }
+ try isel.emit(.rbit(res_ra.x(), src_mat.ra.x()));
+ },
+ 64 => try isel.emit(.rbit(res_ra.x(), src_mat.ra.x())),
+ }
+ try src_mat.finish(isel);
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .sqrt, .floor, .ceil, .round, .trunc_float => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| unused: {
+ defer res_vi.value.deref(isel);
+
+ const un_op = air.data(air.inst_index).un_op;
+ const ty = isel.air.typeOf(un_op, ip);
+ switch (ty.floatBits(isel.target)) {
+ else => unreachable,
+ 16, 32, 64 => |bits| {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const need_fcvt = switch (bits) {
+ else => unreachable,
+ 16 => !isel.target.cpu.has(.aarch64, .fullfp16),
+ 32, 64 => false,
+ };
+ if (need_fcvt) try isel.emit(.fcvt(res_ra.h(), res_ra.s()));
+ const src_vi = try isel.use(un_op);
+ const src_mat = try src_vi.matReg(isel);
+ const src_ra = if (need_fcvt) try isel.allocVecReg() else src_mat.ra;
+ defer if (need_fcvt) isel.freeReg(src_ra);
+ try isel.emit(bits: switch (bits) {
+ else => unreachable,
+ 16 => if (need_fcvt) continue :bits 32 else switch (air_tag) {
+ else => unreachable,
+ .sqrt => .fsqrt(res_ra.h(), src_ra.h()),
+ .floor => .frintm(res_ra.h(), src_ra.h()),
+ .ceil => .frintp(res_ra.h(), src_ra.h()),
+ .round => .frinta(res_ra.h(), src_ra.h()),
+ .trunc_float => .frintz(res_ra.h(), src_ra.h()),
+ },
+ 32 => switch (air_tag) {
+ else => unreachable,
+ .sqrt => .fsqrt(res_ra.s(), src_ra.s()),
+ .floor => .frintm(res_ra.s(), src_ra.s()),
+ .ceil => .frintp(res_ra.s(), src_ra.s()),
+ .round => .frinta(res_ra.s(), src_ra.s()),
+ .trunc_float => .frintz(res_ra.s(), src_ra.s()),
+ },
+ 64 => switch (air_tag) {
+ else => unreachable,
+ .sqrt => .fsqrt(res_ra.d(), src_ra.d()),
+ .floor => .frintm(res_ra.d(), src_ra.d()),
+ .ceil => .frintp(res_ra.d(), src_ra.d()),
+ .round => .frinta(res_ra.d(), src_ra.d()),
+ .trunc_float => .frintz(res_ra.d(), src_ra.d()),
+ },
+ });
+ if (need_fcvt) try isel.emit(.fcvt(src_ra.s(), src_mat.ra.h()));
+ try src_mat.finish(isel);
+ },
+ 80, 128 => |bits| {
+ try call.prepareReturn(isel);
+ switch (bits) {
+ else => unreachable,
+ 16, 32, 64, 128 => try call.returnLiveIn(isel, res_vi.value, .v0),
+ 80 => {
+ var res_hi16_it = res_vi.value.field(ty, 8, 8);
+ const res_hi16_vi = try res_hi16_it.only(isel);
+ try call.returnLiveIn(isel, res_hi16_vi.?, .r1);
+ var res_lo64_it = res_vi.value.field(ty, 0, 8);
+ const res_lo64_vi = try res_lo64_it.only(isel);
+ try call.returnLiveIn(isel, res_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishReturn(isel);
+
+ try call.prepareCallee(isel);
+ try isel.global_relocs.append(gpa, .{
+ .global = switch (air_tag) {
+ else => unreachable,
+ .sqrt => switch (bits) {
+ else => unreachable,
+ 16 => "__sqrth",
+ 32 => "sqrtf",
+ 64 => "sqrt",
+ 80 => "__sqrtx",
+ 128 => "sqrtq",
+ },
+ .floor => switch (bits) {
+ else => unreachable,
+ 16 => "__floorh",
+ 32 => "floorf",
+ 64 => "floor",
+ 80 => "__floorx",
+ 128 => "floorq",
+ },
+ .ceil => switch (bits) {
+ else => unreachable,
+ 16 => "__ceilh",
+ 32 => "ceilf",
+ 64 => "ceil",
+ 80 => "__ceilx",
+ 128 => "ceilq",
+ },
+ .round => switch (bits) {
+ else => unreachable,
+ 16 => "__roundh",
+ 32 => "roundf",
+ 64 => "round",
+ 80 => "__roundx",
+ 128 => "roundq",
+ },
+ .trunc_float => switch (bits) {
+ else => unreachable,
+ 16 => "__trunch",
+ 32 => "truncf",
+ 64 => "trunc",
+ 80 => "__truncx",
+ 128 => "truncq",
+ },
+ },
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+ try call.finishCallee(isel);
+
+ try call.prepareParams(isel);
+ const src_vi = try isel.use(un_op);
+ switch (bits) {
+ else => unreachable,
+ 16, 32, 64, 128 => try call.paramLiveOut(isel, src_vi, .v0),
+ 80 => {
+ var src_hi16_it = src_vi.field(ty, 8, 8);
+ const src_hi16_vi = try src_hi16_it.only(isel);
+ try call.paramLiveOut(isel, src_hi16_vi.?, .r1);
+ var src_lo64_it = src_vi.field(ty, 0, 8);
+ const src_lo64_vi = try src_lo64_it.only(isel);
+ try call.paramLiveOut(isel, src_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishParams(isel);
+ },
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .sin, .cos, .tan, .exp, .exp2, .log, .log2, .log10 => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| {
+ defer res_vi.value.deref(isel);
+
+ const un_op = air.data(air.inst_index).un_op;
+ const ty = isel.air.typeOf(un_op, ip);
+ const bits = ty.floatBits(isel.target);
+ try call.prepareReturn(isel);
+ switch (bits) {
+ else => unreachable,
+ 16, 32, 64, 128 => try call.returnLiveIn(isel, res_vi.value, .v0),
+ 80 => {
+ var res_hi16_it = res_vi.value.field(ty, 8, 8);
+ const res_hi16_vi = try res_hi16_it.only(isel);
+ try call.returnLiveIn(isel, res_hi16_vi.?, .r1);
+ var res_lo64_it = res_vi.value.field(ty, 0, 8);
+ const res_lo64_vi = try res_lo64_it.only(isel);
+ try call.returnLiveIn(isel, res_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishReturn(isel);
+
+ try call.prepareCallee(isel);
+ try isel.global_relocs.append(gpa, .{
+ .global = switch (air_tag) {
+ else => unreachable,
+ .sin => switch (bits) {
+ else => unreachable,
+ 16 => "__sinh",
+ 32 => "sinf",
+ 64 => "sin",
+ 80 => "__sinx",
+ 128 => "sinq",
+ },
+ .cos => switch (bits) {
+ else => unreachable,
+ 16 => "__cosh",
+ 32 => "cosf",
+ 64 => "cos",
+ 80 => "__cosx",
+ 128 => "cosq",
+ },
+ .tan => switch (bits) {
+ else => unreachable,
+ 16 => "__tanh",
+ 32 => "tanf",
+ 64 => "tan",
+ 80 => "__tanx",
+ 128 => "tanq",
+ },
+ .exp => switch (bits) {
+ else => unreachable,
+ 16 => "__exph",
+ 32 => "expf",
+ 64 => "exp",
+ 80 => "__expx",
+ 128 => "expq",
+ },
+ .exp2 => switch (bits) {
+ else => unreachable,
+ 16 => "__exp2h",
+ 32 => "exp2f",
+ 64 => "exp2",
+ 80 => "__exp2x",
+ 128 => "exp2q",
+ },
+ .log => switch (bits) {
+ else => unreachable,
+ 16 => "__logh",
+ 32 => "logf",
+ 64 => "log",
+ 80 => "__logx",
+ 128 => "logq",
+ },
+ .log2 => switch (bits) {
+ else => unreachable,
+ 16 => "__log2h",
+ 32 => "log2f",
+ 64 => "log2",
+ 80 => "__log2x",
+ 128 => "log2q",
+ },
+ .log10 => switch (bits) {
+ else => unreachable,
+ 16 => "__log10h",
+ 32 => "log10f",
+ 64 => "log10",
+ 80 => "__log10x",
+ 128 => "log10q",
+ },
+ },
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+ try call.finishCallee(isel);
+
+ try call.prepareParams(isel);
+ const src_vi = try isel.use(un_op);
+ switch (bits) {
+ else => unreachable,
+ 16, 32, 64, 128 => try call.paramLiveOut(isel, src_vi, .v0),
+ 80 => {
+ var src_hi16_it = src_vi.field(ty, 8, 8);
+ const src_hi16_vi = try src_hi16_it.only(isel);
+ try call.paramLiveOut(isel, src_hi16_vi.?, .r1);
+ var src_lo64_it = src_vi.field(ty, 0, 8);
+ const src_lo64_vi = try src_lo64_it.only(isel);
+ try call.paramLiveOut(isel, src_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishParams(isel);
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .abs => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| unused: {
+ defer res_vi.value.deref(isel);
+
+ const ty_op = air.data(air.inst_index).ty_op;
+ const ty = ty_op.ty.toType();
+ if (!ty.isRuntimeFloat()) {
+ if (!ty.isAbiInt(zcu)) return isel.fail("bad {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+ switch (ty.intInfo(zcu).bits) {
+ 0 => unreachable,
+ 1...32 => {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ try isel.emit(.csneg(res_ra.w(), src_mat.ra.w(), src_mat.ra.w(), .pl));
+ try isel.emit(.subs(.wzr, src_mat.ra.w(), .{ .immediate = 0 }));
+ try src_mat.finish(isel);
+ },
+ 33...64 => {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ try isel.emit(.csneg(res_ra.x(), src_mat.ra.x(), src_mat.ra.x(), .pl));
+ try isel.emit(.subs(.xzr, src_mat.ra.x(), .{ .immediate = 0 }));
+ try src_mat.finish(isel);
+ },
+ 65...128 => {
+ var res_hi64_it = res_vi.value.field(ty, 8, 8);
+ const res_hi64_vi = try res_hi64_it.only(isel);
+ const res_hi64_ra = try res_hi64_vi.?.defReg(isel);
+ var res_lo64_it = res_vi.value.field(ty, 0, 8);
+ const res_lo64_vi = try res_lo64_it.only(isel);
+ const res_lo64_ra = try res_lo64_vi.?.defReg(isel);
+ if (res_hi64_ra == null and res_lo64_ra == null) break :unused;
+ const src_ty = isel.air.typeOf(ty_op.operand, ip);
+ const src_vi = try isel.use(ty_op.operand);
+ var src_hi64_it = src_vi.field(src_ty, 8, 8);
+ const src_hi64_vi = try src_hi64_it.only(isel);
+ const src_hi64_mat = try src_hi64_vi.?.matReg(isel);
+ var src_lo64_it = src_vi.field(src_ty, 0, 8);
+ const src_lo64_vi = try src_lo64_it.only(isel);
+ const src_lo64_mat = try src_lo64_vi.?.matReg(isel);
+ const lo64_ra = try isel.allocIntReg();
+ defer isel.freeReg(lo64_ra);
+ const hi64_ra, const mask_ra = alloc_ras: {
+ const res_lo64_lock: RegLock = if (res_lo64_ra) |res_ra| isel.tryLockReg(res_ra) else .empty;
+ defer res_lo64_lock.unlock(isel);
+ break :alloc_ras .{ try isel.allocIntReg(), try isel.allocIntReg() };
+ };
+ defer {
+ isel.freeReg(hi64_ra);
+ isel.freeReg(mask_ra);
+ }
+ if (res_hi64_ra) |res_ra| try isel.emit(.sbc(res_ra.x(), hi64_ra.x(), mask_ra.x()));
+ try isel.emit(.subs(
+ if (res_lo64_ra) |res_ra| res_ra.x() else .xzr,
+ lo64_ra.x(),
+ .{ .register = mask_ra.x() },
+ ));
+ if (res_hi64_ra) |_| try isel.emit(.eor(hi64_ra.x(), src_hi64_mat.ra.x(), .{ .register = mask_ra.x() }));
+ try isel.emit(.eor(lo64_ra.x(), src_lo64_mat.ra.x(), .{ .register = mask_ra.x() }));
+ try isel.emit(.sbfm(mask_ra.x(), src_hi64_mat.ra.x(), .{
+ .N = .doubleword,
+ .immr = 64 - 1,
+ .imms = 64 - 1,
+ }));
+ try src_lo64_mat.finish(isel);
+ try src_hi64_mat.finish(isel);
+ },
+ else => return isel.fail("too big {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) }),
+ }
+ } else switch (ty.floatBits(isel.target)) {
+ else => unreachable,
+ 16 => {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ try isel.emit(if (isel.target.cpu.has(.aarch64, .fullfp16))
+ .fabs(res_ra.h(), src_mat.ra.h())
+ else
+ .bic(res_ra.@"4h"(), res_ra.@"4h"(), .{ .shifted_immediate = .{
+ .immediate = 0b10000000,
+ .lsl = 8,
+ } }));
+ try src_mat.finish(isel);
+ },
+ 32 => {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ try isel.emit(.fabs(res_ra.s(), src_mat.ra.s()));
+ try src_mat.finish(isel);
+ },
+ 64 => {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ try isel.emit(.fabs(res_ra.d(), src_mat.ra.d()));
+ try src_mat.finish(isel);
+ },
+ 80 => {
+ const src_vi = try isel.use(ty_op.operand);
+ var res_hi16_it = res_vi.value.field(ty, 8, 8);
+ const res_hi16_vi = try res_hi16_it.only(isel);
+ if (try res_hi16_vi.?.defReg(isel)) |res_hi16_ra| {
+ var src_hi16_it = src_vi.field(ty, 8, 8);
+ const src_hi16_vi = try src_hi16_it.only(isel);
+ const src_hi16_mat = try src_hi16_vi.?.matReg(isel);
+ try isel.emit(.@"and"(res_hi16_ra.w(), src_hi16_mat.ra.w(), .{ .immediate = .{
+ .N = .word,
+ .immr = 0,
+ .imms = 15 - 1,
+ } }));
+ try src_hi16_mat.finish(isel);
+ }
+ var res_lo64_it = res_vi.value.field(ty, 0, 8);
+ const res_lo64_vi = try res_lo64_it.only(isel);
+ if (try res_lo64_vi.?.defReg(isel)) |res_lo64_ra| {
+ var src_lo64_it = src_vi.field(ty, 0, 8);
+ const src_lo64_vi = try src_lo64_it.only(isel);
+ try src_lo64_vi.?.liveOut(isel, res_lo64_ra);
+ }
+ },
+ 128 => {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ const neg_zero_ra = try isel.allocVecReg();
+ defer isel.freeReg(neg_zero_ra);
+ try isel.emit(.bic(res_ra.@"16b"(), src_mat.ra.@"16b"(), .{ .register = neg_zero_ra.@"16b"() }));
+ try isel.literals.appendNTimes(gpa, 0, -%isel.literals.items.len % 4);
+ try isel.literal_relocs.append(gpa, .{
+ .label = @intCast(isel.instructions.items.len),
+ });
+ try isel.emit(.ldr(neg_zero_ra.q(), .{
+ .literal = @intCast((isel.instructions.items.len + 1 + isel.literals.items.len) << 2),
+ }));
+ try isel.emitLiteral(&(.{0} ** 15 ++ .{0x80}));
+ try src_mat.finish(isel);
+ },
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .neg, .neg_optimized => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| unused: {
+ defer res_vi.value.deref(isel);
+
+ const un_op = air.data(air.inst_index).un_op;
+ const ty = isel.air.typeOf(un_op, ip);
+ switch (ty.floatBits(isel.target)) {
+ else => unreachable,
+ 16 => {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(un_op);
+ const src_mat = try src_vi.matReg(isel);
+ if (isel.target.cpu.has(.aarch64, .fullfp16)) {
+ try isel.emit(.fneg(res_ra.h(), src_mat.ra.h()));
+ } else {
+ const neg_zero_ra = try isel.allocVecReg();
+ defer isel.freeReg(neg_zero_ra);
+ try isel.emit(.eor(res_ra.@"8b"(), res_ra.@"8b"(), .{ .register = neg_zero_ra.@"8b"() }));
+ try isel.emit(.movi(neg_zero_ra.@"4h"(), 0b10000000, .{ .lsl = 8 }));
+ }
+ try src_mat.finish(isel);
+ },
+ 32 => {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(un_op);
+ const src_mat = try src_vi.matReg(isel);
+ try isel.emit(.fneg(res_ra.s(), src_mat.ra.s()));
+ try src_mat.finish(isel);
+ },
+ 64 => {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(un_op);
+ const src_mat = try src_vi.matReg(isel);
+ try isel.emit(.fneg(res_ra.d(), src_mat.ra.d()));
+ try src_mat.finish(isel);
+ },
+ 80 => {
+ const src_vi = try isel.use(un_op);
+ var res_hi16_it = res_vi.value.field(ty, 8, 8);
+ const res_hi16_vi = try res_hi16_it.only(isel);
+ if (try res_hi16_vi.?.defReg(isel)) |res_hi16_ra| {
+ var src_hi16_it = src_vi.field(ty, 8, 8);
+ const src_hi16_vi = try src_hi16_it.only(isel);
+ const src_hi16_mat = try src_hi16_vi.?.matReg(isel);
+ try isel.emit(.eor(res_hi16_ra.w(), src_hi16_mat.ra.w(), .{ .immediate = .{
+ .N = .word,
+ .immr = 32 - 15,
+ .imms = 1 - 1,
+ } }));
+ try src_hi16_mat.finish(isel);
+ }
+ var res_lo64_it = res_vi.value.field(ty, 0, 8);
+ const res_lo64_vi = try res_lo64_it.only(isel);
+ if (try res_lo64_vi.?.defReg(isel)) |res_lo64_ra| {
+ var src_lo64_it = src_vi.field(ty, 0, 8);
+ const src_lo64_vi = try src_lo64_it.only(isel);
+ try src_lo64_vi.?.liveOut(isel, res_lo64_ra);
+ }
+ },
+ 128 => {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(un_op);
+ const src_mat = try src_vi.matReg(isel);
+ const neg_zero_ra = try isel.allocVecReg();
+ defer isel.freeReg(neg_zero_ra);
+ try isel.emit(.eor(res_ra.@"16b"(), src_mat.ra.@"16b"(), .{ .register = neg_zero_ra.@"16b"() }));
+ try isel.literals.appendNTimes(gpa, 0, -%isel.literals.items.len % 4);
+ try isel.literal_relocs.append(gpa, .{
+ .label = @intCast(isel.instructions.items.len),
+ });
+ try isel.emit(.ldr(neg_zero_ra.q(), .{
+ .literal = @intCast((isel.instructions.items.len + 1 + isel.literals.items.len) << 2),
+ }));
+ try isel.emitLiteral(&(.{0} ** 15 ++ .{0x80}));
+ try src_mat.finish(isel);
+ },
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .cmp_lt, .cmp_lte, .cmp_eq, .cmp_gte, .cmp_gt, .cmp_neq => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| unused: {
+ defer res_vi.value.deref(isel);
+
+ var bin_op = air.data(air.inst_index).bin_op;
+ const ty = isel.air.typeOf(bin_op.lhs, ip);
+ if (!ty.isRuntimeFloat()) {
+ const int_info: std.builtin.Type.Int = if (ty.toIntern() == .bool_type)
+ .{ .signedness = .unsigned, .bits = 1 }
+ else if (ty.isAbiInt(zcu))
+ ty.intInfo(zcu)
+ else if (ty.isPtrAtRuntime(zcu))
+ .{ .signedness = .unsigned, .bits = 64 }
+ else
+ return isel.fail("bad {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+ if (int_info.bits > 256) return isel.fail("too big {s} {f}", .{ @tagName(air_tag), isel.fmtType(ty) });
+
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ try isel.emit(.csinc(res_ra.w(), .wzr, .wzr, .invert(cond: switch (air_tag) {
+ else => unreachable,
+ .cmp_lt => switch (int_info.signedness) {
+ .signed => .lt,
+ .unsigned => .lo,
+ },
+ .cmp_lte => switch (int_info.bits) {
+ else => unreachable,
+ 1...64 => switch (int_info.signedness) {
+ .signed => .le,
+ .unsigned => .ls,
+ },
+ 65...128 => {
+ std.mem.swap(Air.Inst.Ref, &bin_op.lhs, &bin_op.rhs);
+ continue :cond .cmp_gte;
+ },
+ },
+ .cmp_eq => .eq,
+ .cmp_gte => switch (int_info.signedness) {
+ .signed => .ge,
+ .unsigned => .hs,
+ },
+ .cmp_gt => switch (int_info.bits) {
+ else => unreachable,
+ 1...64 => switch (int_info.signedness) {
+ .signed => .gt,
+ .unsigned => .hi,
+ },
+ 65...128 => {
+ std.mem.swap(Air.Inst.Ref, &bin_op.lhs, &bin_op.rhs);
+ continue :cond .cmp_lt;
+ },
+ },
+ .cmp_neq => .ne,
+ })));
+
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ var part_offset = lhs_vi.size(isel);
+ while (part_offset > 0) {
+ const part_size = @min(part_offset, 8);
+ part_offset -= part_size;
+ var lhs_part_it = lhs_vi.field(ty, part_offset, part_size);
+ const lhs_part_vi = try lhs_part_it.only(isel);
+ const lhs_part_mat = try lhs_part_vi.?.matReg(isel);
+ var rhs_part_it = rhs_vi.field(ty, part_offset, part_size);
+ const rhs_part_vi = try rhs_part_it.only(isel);
+ const rhs_part_mat = try rhs_part_vi.?.matReg(isel);
+ try isel.emit(switch (part_size) {
+ else => unreachable,
+ 1...4 => switch (part_offset) {
+ 0 => .subs(.wzr, lhs_part_mat.ra.w(), .{ .register = rhs_part_mat.ra.w() }),
+ else => switch (air_tag) {
+ else => unreachable,
+ .cmp_lt, .cmp_lte, .cmp_gte, .cmp_gt => .sbcs(
+ .wzr,
+ lhs_part_mat.ra.w(),
+ rhs_part_mat.ra.w(),
+ ),
+ .cmp_eq, .cmp_neq => .ccmp(
+ lhs_part_mat.ra.w(),
+ .{ .register = rhs_part_mat.ra.w() },
+ .{ .n = false, .z = false, .c = false, .v = false },
+ .eq,
+ ),
+ },
+ },
+ 5...8 => switch (part_offset) {
+ 0 => .subs(.xzr, lhs_part_mat.ra.x(), .{ .register = rhs_part_mat.ra.x() }),
+ else => switch (air_tag) {
+ else => unreachable,
+ .cmp_lt, .cmp_lte, .cmp_gte, .cmp_gt => .sbcs(
+ .xzr,
+ lhs_part_mat.ra.x(),
+ rhs_part_mat.ra.x(),
+ ),
+ .cmp_eq, .cmp_neq => .ccmp(
+ lhs_part_mat.ra.x(),
+ .{ .register = rhs_part_mat.ra.x() },
+ .{ .n = false, .z = false, .c = false, .v = false },
+ .eq,
+ ),
+ },
+ },
+ });
+ try rhs_part_mat.finish(isel);
+ try lhs_part_mat.finish(isel);
+ }
+ } else switch (ty.floatBits(isel.target)) {
+ else => unreachable,
+ 16, 32, 64 => |bits| {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const need_fcvt = switch (bits) {
+ else => unreachable,
+ 16 => !isel.target.cpu.has(.aarch64, .fullfp16),
+ 32, 64 => false,
+ };
+ try isel.emit(.csinc(res_ra.w(), .wzr, .wzr, .invert(switch (air_tag) {
+ else => unreachable,
+ .cmp_lt => .lo,
+ .cmp_lte => .ls,
+ .cmp_eq => .eq,
+ .cmp_gte => .ge,
+ .cmp_gt => .gt,
+ .cmp_neq => .ne,
+ })));
+
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ const lhs_ra = if (need_fcvt) try isel.allocVecReg() else lhs_mat.ra;
+ defer if (need_fcvt) isel.freeReg(lhs_ra);
+ const rhs_ra = if (need_fcvt) try isel.allocVecReg() else rhs_mat.ra;
+ defer if (need_fcvt) isel.freeReg(rhs_ra);
+ try isel.emit(bits: switch (bits) {
+ else => unreachable,
+ 16 => if (need_fcvt)
+ continue :bits 32
+ else
+ .fcmp(lhs_ra.h(), .{ .register = rhs_ra.h() }),
+ 32 => .fcmp(lhs_ra.s(), .{ .register = rhs_ra.s() }),
+ 64 => .fcmp(lhs_ra.d(), .{ .register = rhs_ra.d() }),
+ });
+ if (need_fcvt) {
+ try isel.emit(.fcvt(rhs_ra.s(), rhs_mat.ra.h()));
+ try isel.emit(.fcvt(lhs_ra.s(), lhs_mat.ra.h()));
+ }
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ },
+ 80, 128 => |bits| {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+
+ try call.prepareReturn(isel);
+ try call.returnFill(isel, .r0);
+ try isel.emit(.csinc(res_ra.w(), .wzr, .wzr, .invert(cond: switch (air_tag) {
+ else => unreachable,
+ .cmp_lt => .lt,
+ .cmp_lte => .le,
+ .cmp_eq => .eq,
+ .cmp_gte => {
+ std.mem.swap(Air.Inst.Ref, &bin_op.lhs, &bin_op.rhs);
+ continue :cond .cmp_lte;
+ },
+ .cmp_gt => {
+ std.mem.swap(Air.Inst.Ref, &bin_op.lhs, &bin_op.rhs);
+ continue :cond .cmp_lt;
+ },
+ .cmp_neq => .ne,
+ })));
+ try isel.emit(.subs(.wzr, .w0, .{ .immediate = 0 }));
+ try call.finishReturn(isel);
+
+ try call.prepareCallee(isel);
+ try isel.global_relocs.append(gpa, .{
+ .global = switch (bits) {
+ else => unreachable,
+ 16 => "__cmphf2",
+ 32 => "__cmpsf2",
+ 64 => "__cmpdf2",
+ 80 => "__cmpxf2",
+ 128 => "__cmptf2",
+ },
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+ try call.finishCallee(isel);
+
+ try call.prepareParams(isel);
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ switch (bits) {
+ else => unreachable,
+ 16, 32, 64, 128 => {
+ try call.paramLiveOut(isel, rhs_vi, .v1);
+ try call.paramLiveOut(isel, lhs_vi, .v0);
+ },
+ 80 => {
+ var rhs_hi16_it = rhs_vi.field(ty, 8, 8);
+ const rhs_hi16_vi = try rhs_hi16_it.only(isel);
+ try call.paramLiveOut(isel, rhs_hi16_vi.?, .r3);
+ var rhs_lo64_it = rhs_vi.field(ty, 0, 8);
+ const rhs_lo64_vi = try rhs_lo64_it.only(isel);
+ try call.paramLiveOut(isel, rhs_lo64_vi.?, .r2);
+ var lhs_hi16_it = lhs_vi.field(ty, 8, 8);
+ const lhs_hi16_vi = try lhs_hi16_it.only(isel);
+ try call.paramLiveOut(isel, lhs_hi16_vi.?, .r1);
+ var lhs_lo64_it = lhs_vi.field(ty, 0, 8);
+ const lhs_lo64_vi = try lhs_lo64_it.only(isel);
+ try call.paramLiveOut(isel, lhs_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishParams(isel);
+ },
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .cond_br => {
+ const pl_op = air.data(air.inst_index).pl_op;
+ const extra = isel.air.extraData(Air.CondBr, pl_op.payload);
+
+ try isel.body(@ptrCast(isel.air.extra.items[extra.end + extra.data.then_body_len ..][0..extra.data.else_body_len]));
+ const else_label = isel.instructions.items.len;
+ const else_live_registers = isel.live_registers;
+ try isel.body(@ptrCast(isel.air.extra.items[extra.end..][0..extra.data.then_body_len]));
+ try isel.merge(&else_live_registers, .{});
+
+ const cond_vi = try isel.use(pl_op.operand);
+ const cond_mat = try cond_vi.matReg(isel);
+ try isel.emit(.tbz(
+ cond_mat.ra.x(),
+ 0,
+ @intCast((isel.instructions.items.len + 1 - else_label) << 2),
+ ));
+ try cond_mat.finish(isel);
+
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .switch_br => {
+ const switch_br = isel.air.unwrapSwitch(air.inst_index);
+ const cond_ty = isel.air.typeOf(switch_br.operand, ip);
+ const cond_int_info: std.builtin.Type.Int = if (cond_ty.toIntern() == .bool_type)
+ .{ .signedness = .unsigned, .bits = 1 }
+ else if (cond_ty.isAbiInt(zcu))
+ cond_ty.intInfo(zcu)
+ else
+ return isel.fail("bad switch cond {f}", .{isel.fmtType(cond_ty)});
+
+ var final_case = true;
+ if (switch_br.else_body_len > 0) {
+ var cases_it = switch_br.iterateCases();
+ while (cases_it.next()) |_| {}
+ try isel.body(cases_it.elseBody());
+ assert(final_case);
+ final_case = false;
+ }
+ const zero_reg: Register = switch (cond_int_info.bits) {
+ else => unreachable,
+ 1...32 => .wzr,
+ 33...64 => .xzr,
+ };
+ var cond_mat: ?Value.Materialize = null;
+ var cond_reg: Register = undefined;
+ var temp_reg: Register = undefined;
+ var cases_it = switch_br.iterateCases();
+ while (cases_it.next()) |case| {
+ const next_label = isel.instructions.items.len;
+ const next_live_registers = isel.live_registers;
+ try isel.body(case.body);
+ if (final_case) {
+ final_case = false;
+ continue;
+ }
+ try isel.merge(&next_live_registers, .{});
+ if (cond_mat == null) {
+ var cond_vi = try isel.use(switch_br.operand);
+ cond_mat = try cond_vi.matReg(isel);
+ const temp_ra = try isel.allocIntReg();
+ cond_reg, temp_reg = switch (cond_int_info.bits) {
+ else => unreachable,
+ 1...32 => .{ cond_mat.?.ra.w(), temp_ra.w() },
+ 33...64 => .{ cond_mat.?.ra.x(), temp_ra.x() },
+ };
+ }
+ if (case.ranges.len == 0 and case.items.len == 1 and Constant.fromInterned(
+ case.items[0].toInterned().?,
+ ).orderAgainstZero(zcu).compare(.eq)) {
+ try isel.emit(.cbnz(
+ cond_reg,
+ @intCast((isel.instructions.items.len + 1 - next_label) << 2),
+ ));
+ continue;
+ }
+ try isel.emit(.@"b."(
+ .invert(switch (case.ranges.len) {
+ 0 => .eq,
+ else => .ls,
+ }),
+ @intCast((isel.instructions.items.len + 1 - next_label) << 2),
+ ));
+ var case_range_index = case.ranges.len;
+ while (case_range_index > 0) {
+ case_range_index -= 1;
+
+ const low_val: Constant = .fromInterned(case.ranges[case_range_index][0].toInterned().?);
+ var low_bigint_space: Constant.BigIntSpace = undefined;
+ const low_bigint = low_val.toBigInt(&low_bigint_space, zcu);
+ const low_int: i64 = if (low_bigint.positive) @bitCast(
+ low_bigint.toInt(u64) catch
+ return isel.fail("too big case range start: {f}", .{isel.fmtConstant(low_val)}),
+ ) else low_bigint.toInt(i64) catch
+ return isel.fail("too big case range start: {f}", .{isel.fmtConstant(low_val)});
+
+ const high_val: Constant = .fromInterned(case.ranges[case_range_index][1].toInterned().?);
+ var high_bigint_space: Constant.BigIntSpace = undefined;
+ const high_bigint = high_val.toBigInt(&high_bigint_space, zcu);
+ const high_int: i64 = if (high_bigint.positive) @bitCast(
+ high_bigint.toInt(u64) catch
+ return isel.fail("too big case range end: {f}", .{isel.fmtConstant(high_val)}),
+ ) else high_bigint.toInt(i64) catch
+ return isel.fail("too big case range end: {f}", .{isel.fmtConstant(high_val)});
+
+ const delta_int = high_int -% low_int;
+ if (case_range_index > 0) {
+ return isel.fail("case range", .{});
+ } else if (case.items.len > 0) {
+ return isel.fail("case range", .{});
+ } else {
+ const adjusted_reg = switch (low_int) {
+ 0 => cond_reg,
+ else => temp_reg,
+ };
+
+ if (std.math.cast(u12, delta_int)) |pos_imm| try isel.emit(.subs(
+ zero_reg,
+ adjusted_reg,
+ .{ .immediate = pos_imm },
+ )) else if (std.math.cast(u12, -delta_int)) |neg_imm| try isel.emit(.adds(
+ zero_reg,
+ adjusted_reg,
+ .{ .immediate = neg_imm },
+ )) else if (if (@as(i12, @truncate(delta_int)) == 0)
+ std.math.cast(u12, delta_int >> 12)
+ else
+ null) |pos_imm_lsr_12| try isel.emit(.subs(
+ zero_reg,
+ adjusted_reg,
+ .{ .shifted_immediate = .{ .immediate = pos_imm_lsr_12, .lsl = .@"12" } },
+ )) else if (if (@as(i12, @truncate(-delta_int)) == 0)
+ std.math.cast(u12, -delta_int >> 12)
+ else
+ null) |neg_imm_lsr_12| try isel.emit(.adds(
+ zero_reg,
+ adjusted_reg,
+ .{ .shifted_immediate = .{ .immediate = neg_imm_lsr_12, .lsl = .@"12" } },
+ )) else {
+ try isel.movImmediate(temp_reg, @bitCast(delta_int));
+ try isel.emit(.subs(zero_reg, adjusted_reg, .{ .register = temp_reg }));
+ }
+
+ switch (low_int) {
+ 0 => {},
+ else => {
+ if (std.math.cast(u12, low_int)) |pos_imm| try isel.emit(.sub(
+ adjusted_reg,
+ cond_reg,
+ .{ .immediate = pos_imm },
+ )) else if (std.math.cast(u12, -low_int)) |neg_imm| try isel.emit(.add(
+ adjusted_reg,
+ cond_reg,
+ .{ .immediate = neg_imm },
+ )) else if (if (@as(i12, @truncate(low_int)) == 0)
+ std.math.cast(u12, low_int >> 12)
+ else
+ null) |pos_imm_lsr_12| try isel.emit(.sub(
+ adjusted_reg,
+ cond_reg,
+ .{ .shifted_immediate = .{ .immediate = pos_imm_lsr_12, .lsl = .@"12" } },
+ )) else if (if (@as(i12, @truncate(-low_int)) == 0)
+ std.math.cast(u12, -low_int >> 12)
+ else
+ null) |neg_imm_lsr_12| try isel.emit(.add(
+ adjusted_reg,
+ cond_reg,
+ .{ .shifted_immediate = .{ .immediate = neg_imm_lsr_12, .lsl = .@"12" } },
+ )) else {
+ try isel.movImmediate(temp_reg, @bitCast(low_int));
+ try isel.emit(.subs(adjusted_reg, cond_reg, .{ .register = temp_reg }));
+ }
+ },
+ }
+ }
+ }
+ var case_item_index = case.items.len;
+ while (case_item_index > 0) {
+ case_item_index -= 1;
+
+ const item_val: Constant = .fromInterned(case.items[case_item_index].toInterned().?);
+ var item_bigint_space: Constant.BigIntSpace = undefined;
+ const item_bigint = item_val.toBigInt(&item_bigint_space, zcu);
+ const item_int: i64 = if (item_bigint.positive) @bitCast(
+ item_bigint.toInt(u64) catch
+ return isel.fail("too big case item: {f}", .{isel.fmtConstant(item_val)}),
+ ) else item_bigint.toInt(i64) catch
+ return isel.fail("too big case item: {f}", .{isel.fmtConstant(item_val)});
+
+ if (case_item_index > 0) {
+ if (std.math.cast(u5, item_int)) |pos_imm| try isel.emit(.ccmp(
+ cond_reg,
+ .{ .immediate = pos_imm },
+ .{ .n = false, .z = true, .c = false, .v = false },
+ .ne,
+ )) else if (std.math.cast(u5, -item_int)) |neg_imm| try isel.emit(.ccmn(
+ cond_reg,
+ .{ .immediate = neg_imm },
+ .{ .n = false, .z = true, .c = false, .v = false },
+ .ne,
+ )) else {
+ try isel.movImmediate(temp_reg, @bitCast(item_int));
+ try isel.emit(.ccmp(
+ cond_reg,
+ .{ .register = temp_reg },
+ .{ .n = false, .z = true, .c = false, .v = false },
+ .ne,
+ ));
+ }
+ } else {
+ if (std.math.cast(u12, item_int)) |pos_imm| try isel.emit(.subs(
+ zero_reg,
+ cond_reg,
+ .{ .immediate = pos_imm },
+ )) else if (std.math.cast(u12, -item_int)) |neg_imm| try isel.emit(.adds(
+ zero_reg,
+ cond_reg,
+ .{ .immediate = neg_imm },
+ )) else if (if (@as(i12, @truncate(item_int)) == 0)
+ std.math.cast(u12, item_int >> 12)
+ else
+ null) |pos_imm_lsr_12| try isel.emit(.subs(
+ zero_reg,
+ cond_reg,
+ .{ .shifted_immediate = .{ .immediate = pos_imm_lsr_12, .lsl = .@"12" } },
+ )) else if (if (@as(i12, @truncate(-item_int)) == 0)
+ std.math.cast(u12, -item_int >> 12)
+ else
+ null) |neg_imm_lsr_12| try isel.emit(.adds(
+ zero_reg,
+ cond_reg,
+ .{ .shifted_immediate = .{ .immediate = neg_imm_lsr_12, .lsl = .@"12" } },
+ )) else {
+ try isel.movImmediate(temp_reg, @bitCast(item_int));
+ try isel.emit(.subs(zero_reg, cond_reg, .{ .register = temp_reg }));
+ }
+ }
+ }
+ }
+ if (cond_mat) |mat| {
+ try mat.finish(isel);
+ isel.freeReg(temp_reg.alias);
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .@"try", .try_cold => {
+ const pl_op = air.data(air.inst_index).pl_op;
+ const extra = isel.air.extraData(Air.Try, pl_op.payload);
+ const error_union_ty = isel.air.typeOf(pl_op.operand, ip);
+ const error_union_info = ip.indexToKey(error_union_ty.toIntern()).error_union_type;
+ const payload_ty: ZigType = .fromInterned(error_union_info.payload_type);
+
+ const error_union_vi = try isel.use(pl_op.operand);
+ if (isel.live_values.fetchRemove(air.inst_index)) |payload_vi| {
+ defer payload_vi.value.deref(isel);
+
+ var payload_part_it = error_union_vi.field(
+ error_union_ty,
+ codegen.errUnionPayloadOffset(payload_ty, zcu),
+ payload_vi.value.size(isel),
+ );
+ const payload_part_vi = try payload_part_it.only(isel);
+ try payload_vi.value.copy(isel, payload_ty, payload_part_vi.?);
+ }
+
+ const cont_label = isel.instructions.items.len;
+ const cont_live_registers = isel.live_registers;
+ try isel.body(@ptrCast(isel.air.extra.items[extra.end..][0..extra.data.body_len]));
+ try isel.merge(&cont_live_registers, .{});
+
+ var error_set_part_it = error_union_vi.field(
+ error_union_ty,
+ codegen.errUnionErrorOffset(payload_ty, zcu),
+ ZigType.fromInterned(error_union_info.error_set_type).abiSize(zcu),
+ );
+ const error_set_part_vi = try error_set_part_it.only(isel);
+ const error_set_part_mat = try error_set_part_vi.?.matReg(isel);
+ try isel.emit(.cbz(
+ switch (error_set_part_vi.?.size(isel)) {
+ else => unreachable,
+ 1...4 => error_set_part_mat.ra.w(),
+ 5...8 => error_set_part_mat.ra.x(),
+ },
+ @intCast((isel.instructions.items.len + 1 - cont_label) << 2),
+ ));
+ try error_set_part_mat.finish(isel);
+
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .dbg_stmt => {
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .dbg_empty_stmt => {
+ try isel.emit(.nop());
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .dbg_var_ptr, .dbg_var_val, .dbg_arg_inline => {
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .is_null, .is_non_null => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |is_vi| unused: {
+ defer is_vi.value.deref(isel);
+ const is_ra = try is_vi.value.defReg(isel) orelse break :unused;
+
+ const un_op = air.data(air.inst_index).un_op;
+ const opt_ty = isel.air.typeOf(un_op, ip);
+ const payload_ty = opt_ty.optionalChild(zcu);
+ const payload_size = payload_ty.abiSize(zcu);
+ const has_value_offset, const has_value_size = if (!opt_ty.optionalReprIsPayload(zcu))
+ .{ payload_size, 1 }
+ else if (payload_ty.isSlice(zcu))
+ .{ 0, 8 }
+ else
+ .{ 0, payload_size };
+
+ try isel.emit(.csinc(is_ra.w(), .wzr, .wzr, .invert(switch (air_tag) {
+ else => unreachable,
+ .is_null => .eq,
+ .is_non_null => .ne,
+ })));
+ const opt_vi = try isel.use(un_op);
+ var has_value_part_it = opt_vi.field(opt_ty, has_value_offset, has_value_size);
+ const has_value_part_vi = try has_value_part_it.only(isel);
+ const has_value_part_mat = try has_value_part_vi.?.matReg(isel);
+ try isel.emit(switch (has_value_size) {
+ else => unreachable,
+ 1...4 => .subs(.wzr, has_value_part_mat.ra.w(), .{ .immediate = 0 }),
+ 5...8 => .subs(.xzr, has_value_part_mat.ra.x(), .{ .immediate = 0 }),
+ });
+ try has_value_part_mat.finish(isel);
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .is_err, .is_non_err => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |is_vi| unused: {
+ defer is_vi.value.deref(isel);
+ const is_ra = try is_vi.value.defReg(isel) orelse break :unused;
+
+ const un_op = air.data(air.inst_index).un_op;
+ const error_union_ty = isel.air.typeOf(un_op, ip);
+ const error_union_info = ip.indexToKey(error_union_ty.toIntern()).error_union_type;
+ const error_set_ty: ZigType = .fromInterned(error_union_info.error_set_type);
+ const payload_ty: ZigType = .fromInterned(error_union_info.payload_type);
+ const error_set_offset = codegen.errUnionErrorOffset(payload_ty, zcu);
+ const error_set_size = error_set_ty.abiSize(zcu);
+
+ try isel.emit(.csinc(is_ra.w(), .wzr, .wzr, .invert(switch (air_tag) {
+ else => unreachable,
+ .is_err => .ne,
+ .is_non_err => .eq,
+ })));
+ const error_union_vi = try isel.use(un_op);
+ var error_set_part_it = error_union_vi.field(error_union_ty, error_set_offset, error_set_size);
+ const error_set_part_vi = try error_set_part_it.only(isel);
+ const error_set_part_mat = try error_set_part_vi.?.matReg(isel);
+ try isel.emit(.ands(.wzr, error_set_part_mat.ra.w(), .{ .immediate = .{
+ .N = .word,
+ .immr = 0,
+ .imms = @intCast(8 * error_set_size - 1),
+ } }));
+ try error_set_part_mat.finish(isel);
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .load => {
+ const ty_op = air.data(air.inst_index).ty_op;
+ const ptr_ty = isel.air.typeOf(ty_op.operand, ip);
+ const ptr_info = ptr_ty.ptrInfo(zcu);
+ if (ptr_info.packed_offset.host_size > 0) return isel.fail("packed load", .{});
+
+ if (ptr_info.flags.is_volatile) _ = try isel.use(air.inst_index.toRef());
+ if (isel.live_values.fetchRemove(air.inst_index)) |dst_vi| unused: {
+ defer dst_vi.value.deref(isel);
+ switch (dst_vi.value.size(isel)) {
+ 0 => unreachable,
+ 1...Value.max_parts => {
+ const ptr_vi = try isel.use(ty_op.operand);
+ const ptr_mat = try ptr_vi.matReg(isel);
+ _ = try dst_vi.value.load(isel, ty_op.ty.toType(), ptr_mat.ra, .{
+ .@"volatile" = ptr_info.flags.is_volatile,
+ });
+ try ptr_mat.finish(isel);
+ },
+ else => |size| {
+ try dst_vi.value.defAddr(isel, .fromInterned(ptr_info.child), null, comptime &.initFill(.free)) orelse break :unused;
+
+ try call.prepareReturn(isel);
+ try call.finishReturn(isel);
+
+ try call.prepareCallee(isel);
+ try isel.global_relocs.append(gpa, .{
+ .global = "memcpy",
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+ try call.finishCallee(isel);
+
+ try call.prepareParams(isel);
+ const ptr_vi = try isel.use(ty_op.operand);
+ try isel.movImmediate(.x2, size);
+ try call.paramLiveOut(isel, ptr_vi, .r1);
+ try call.paramAddress(isel, dst_vi.value, .r0);
+ try call.finishParams(isel);
+ },
+ }
+ }
+
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .ret, .ret_safe => {
+ assert(isel.blocks.keys()[0] == Block.main);
+ try isel.blocks.values()[0].branch(isel);
+ if (isel.live_values.get(Block.main)) |ret_vi| {
+ const un_op = air.data(air.inst_index).un_op;
+ const src_vi = try isel.use(un_op);
+ switch (ret_vi.parent(isel)) {
+ .unallocated, .stack_slot => if (ret_vi.hint(isel)) |ret_ra| {
+ try src_vi.liveOut(isel, ret_ra);
+ } else {
+ var ret_part_it = ret_vi.parts(isel);
+ var src_part_it = src_vi.parts(isel);
+ if (src_part_it.only()) |_| {
+ try isel.values.ensureUnusedCapacity(gpa, ret_part_it.remaining);
+ src_vi.setParts(isel, ret_part_it.remaining);
+ while (ret_part_it.next()) |ret_part_vi| {
+ const src_part_vi = src_vi.addPart(
+ isel,
+ ret_part_vi.get(isel).offset_from_parent,
+ ret_part_vi.size(isel),
+ );
+ switch (ret_part_vi.signedness(isel)) {
+ .signed => src_part_vi.setSignedness(isel, .signed),
+ .unsigned => {},
+ }
+ if (ret_part_vi.isVector(isel)) src_part_vi.setIsVector(isel);
+ }
+ ret_part_it = ret_vi.parts(isel);
+ src_part_it = src_vi.parts(isel);
+ }
+ while (ret_part_it.next()) |ret_part_vi| {
+ const src_part_vi = src_part_it.next().?;
+ assert(ret_part_vi.get(isel).offset_from_parent == src_part_vi.get(isel).offset_from_parent);
+ assert(ret_part_vi.size(isel) == src_part_vi.size(isel));
+ try src_part_vi.liveOut(isel, ret_part_vi.hint(isel).?);
+ }
+ },
+ .value, .constant => unreachable,
+ .address => |address_vi| {
+ const ptr_mat = try address_vi.matReg(isel);
+ try src_vi.store(isel, isel.air.typeOf(un_op, ip), ptr_mat.ra, .{});
+ try ptr_mat.finish(isel);
+ },
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .ret_load => {
+ const un_op = air.data(air.inst_index).un_op;
+ const ptr_ty = isel.air.typeOf(un_op, ip);
+ const ptr_info = ptr_ty.ptrInfo(zcu);
+ if (ptr_info.packed_offset.host_size > 0) return isel.fail("packed load", .{});
+
+ assert(isel.blocks.keys()[0] == Block.main);
+ try isel.blocks.values()[0].branch(isel);
+ if (isel.live_values.get(Block.main)) |ret_vi| switch (ret_vi.parent(isel)) {
+ .unallocated, .stack_slot => {
+ var ret_part_it: Value.PartIterator = if (ret_vi.hint(isel)) |_| .initOne(ret_vi) else ret_vi.parts(isel);
+ while (ret_part_it.next()) |ret_part_vi| try ret_part_vi.liveOut(isel, ret_part_vi.hint(isel).?);
+ const ptr_vi = try isel.use(un_op);
+ const ptr_mat = try ptr_vi.matReg(isel);
+ _ = try ret_vi.load(isel, .fromInterned(ptr_info.child), ptr_mat.ra, .{});
+ try ptr_mat.finish(isel);
+ },
+ .value, .constant => unreachable,
+ .address => {},
+ };
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .store, .store_safe, .atomic_store_unordered => {
+ const bin_op = air.data(air.inst_index).bin_op;
+ const ptr_ty = isel.air.typeOf(bin_op.lhs, ip);
+ const ptr_info = ptr_ty.ptrInfo(zcu);
+ if (ptr_info.packed_offset.host_size > 0) return isel.fail("packed store", .{});
+ if (bin_op.rhs.toInterned()) |rhs_val| if (ip.isUndef(rhs_val)) {
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ break :air_tag;
+ };
+
+ const src_vi = try isel.use(bin_op.rhs);
+ const size = src_vi.size(isel);
+ if (ZigType.fromInterned(ptr_info.child).zigTypeTag(zcu) != .@"union") switch (size) {
+ 0 => unreachable,
+ 1...Value.max_parts => {
+ const ptr_vi = try isel.use(bin_op.lhs);
+ const ptr_mat = try ptr_vi.matReg(isel);
+ try src_vi.store(isel, isel.air.typeOf(bin_op.rhs, ip), ptr_mat.ra, .{
+ .@"volatile" = ptr_info.flags.is_volatile,
+ });
+ try ptr_mat.finish(isel);
+
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ break :air_tag;
+ },
+ else => {},
+ };
+ try call.prepareReturn(isel);
+ try call.finishReturn(isel);
+
+ try call.prepareCallee(isel);
+ try isel.global_relocs.append(gpa, .{
+ .global = "memcpy",
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+ try call.finishCallee(isel);
+
+ try call.prepareParams(isel);
+ const ptr_vi = try isel.use(bin_op.lhs);
+ try isel.movImmediate(.x2, size);
+ try call.paramAddress(isel, src_vi, .r1);
+ try call.paramLiveOut(isel, ptr_vi, .r0);
+ try call.finishParams(isel);
+
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .unreach => if (air.next()) |next_air_tag| continue :air_tag next_air_tag,
+ .fptrunc, .fpext => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |dst_vi| unused: {
+ defer dst_vi.value.deref(isel);
+
+ const ty_op = air.data(air.inst_index).ty_op;
+ const dst_ty = ty_op.ty.toType();
+ const dst_bits = dst_ty.floatBits(isel.target);
+ const src_ty = isel.air.typeOf(ty_op.operand, ip);
+ const src_bits = src_ty.floatBits(isel.target);
+ assert(dst_bits != src_bits);
+ switch (@max(dst_bits, src_bits)) {
+ else => unreachable,
+ 16, 32, 64 => {
+ const dst_ra = try dst_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ try isel.emit(.fcvt(switch (dst_bits) {
+ else => unreachable,
+ 16 => dst_ra.h(),
+ 32 => dst_ra.s(),
+ 64 => dst_ra.d(),
+ }, switch (src_bits) {
+ else => unreachable,
+ 16 => src_mat.ra.h(),
+ 32 => src_mat.ra.s(),
+ 64 => src_mat.ra.d(),
+ }));
+ try src_mat.finish(isel);
+ },
+ 80, 128 => {
+ try call.prepareReturn(isel);
+ switch (dst_bits) {
+ else => unreachable,
+ 16, 32, 64, 128 => try call.returnLiveIn(isel, dst_vi.value, .v0),
+ 80 => {
+ var dst_hi16_it = dst_vi.value.field(dst_ty, 8, 8);
+ const dst_hi16_vi = try dst_hi16_it.only(isel);
+ try call.returnLiveIn(isel, dst_hi16_vi.?, .r1);
+ var dst_lo64_it = dst_vi.value.field(dst_ty, 0, 8);
+ const dst_lo64_vi = try dst_lo64_it.only(isel);
+ try call.returnLiveIn(isel, dst_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishReturn(isel);
+
+ try call.prepareCallee(isel);
+ try isel.global_relocs.append(gpa, .{
+ .global = switch (dst_bits) {
+ else => unreachable,
+ 16 => switch (src_bits) {
+ else => unreachable,
+ 32 => "__truncsfhf2",
+ 64 => "__truncdfhf2",
+ 80 => "__truncxfhf2",
+ 128 => "__trunctfhf2",
+ },
+ 32 => switch (src_bits) {
+ else => unreachable,
+ 16 => "__extendhfsf2",
+ 64 => "__truncdfsf2",
+ 80 => "__truncxfsf2",
+ 128 => "__trunctfsf2",
+ },
+ 64 => switch (src_bits) {
+ else => unreachable,
+ 16 => "__extendhfdf2",
+ 32 => "__extendsfdf2",
+ 80 => "__truncxfdf2",
+ 128 => "__trunctfdf2",
+ },
+ 80 => switch (src_bits) {
+ else => unreachable,
+ 16 => "__extendhfxf2",
+ 32 => "__extendsfxf2",
+ 64 => "__extenddfxf2",
+ 128 => "__trunctfxf2",
+ },
+ 128 => switch (src_bits) {
+ else => unreachable,
+ 16 => "__extendhftf2",
+ 32 => "__extendsftf2",
+ 64 => "__extenddftf2",
+ 80 => "__extendxftf2",
+ },
+ },
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+ try call.finishCallee(isel);
+
+ try call.prepareParams(isel);
+ const src_vi = try isel.use(ty_op.operand);
+ switch (src_bits) {
+ else => unreachable,
+ 16, 32, 64, 128 => try call.paramLiveOut(isel, src_vi, .v0),
+ 80 => {
+ var src_hi16_it = src_vi.field(src_ty, 8, 8);
+ const src_hi16_vi = try src_hi16_it.only(isel);
+ try call.paramLiveOut(isel, src_hi16_vi.?, .r1);
+ var src_lo64_it = src_vi.field(src_ty, 0, 8);
+ const src_lo64_vi = try src_lo64_it.only(isel);
+ try call.paramLiveOut(isel, src_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishParams(isel);
+ },
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .intcast => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |dst_vi| unused: {
+ defer dst_vi.value.deref(isel);
+
+ const ty_op = air.data(air.inst_index).ty_op;
+ const dst_ty = ty_op.ty.toType();
+ const dst_int_info = dst_ty.intInfo(zcu);
+ const src_ty = isel.air.typeOf(ty_op.operand, ip);
+ const src_int_info = src_ty.intInfo(zcu);
+ const can_be_negative = dst_int_info.signedness == .signed and
+ src_int_info.signedness == .signed;
+ if ((dst_int_info.bits <= 8 and src_int_info.bits <= 8) or
+ (dst_int_info.bits > 8 and dst_int_info.bits <= 16 and
+ src_int_info.bits > 8 and src_int_info.bits <= 16) or
+ (dst_int_info.bits > 16 and dst_int_info.bits <= 32 and
+ src_int_info.bits > 16 and src_int_info.bits <= 32) or
+ (dst_int_info.bits > 32 and dst_int_info.bits <= 64 and
+ src_int_info.bits > 32 and src_int_info.bits <= 64) or
+ (dst_int_info.bits > 64 and src_int_info.bits > 64 and
+ (dst_int_info.bits - 1) / 128 == (src_int_info.bits - 1) / 128))
+ {
+ try dst_vi.value.move(isel, ty_op.operand);
+ } else if (dst_int_info.bits <= 32 and src_int_info.bits <= 64) {
+ const dst_ra = try dst_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ try isel.emit(.orr(dst_ra.w(), .wzr, .{ .register = src_mat.ra.w() }));
+ try src_mat.finish(isel);
+ } else if (dst_int_info.bits <= 64 and src_int_info.bits <= 32) {
+ const dst_ra = try dst_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ try isel.emit(if (can_be_negative) .sbfm(dst_ra.x(), src_mat.ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(src_int_info.bits - 1),
+ }) else .orr(dst_ra.w(), .wzr, .{ .register = src_mat.ra.w() }));
+ try src_mat.finish(isel);
+ } else if (dst_int_info.bits <= 32 and src_int_info.bits <= 128) {
+ assert(src_int_info.bits > 64);
+ const dst_ra = try dst_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+
+ var src_lo64_it = src_vi.field(src_ty, 0, 8);
+ const src_lo64_vi = try src_lo64_it.only(isel);
+ const src_lo64_mat = try src_lo64_vi.?.matReg(isel);
+ try isel.emit(.orr(dst_ra.w(), .wzr, .{ .register = src_lo64_mat.ra.w() }));
+ try src_lo64_mat.finish(isel);
+ } else if (dst_int_info.bits <= 64 and src_int_info.bits <= 128) {
+ assert(dst_int_info.bits > 32 and src_int_info.bits > 64);
+ const src_vi = try isel.use(ty_op.operand);
+
+ var src_lo64_it = src_vi.field(src_ty, 0, 8);
+ const src_lo64_vi = try src_lo64_it.only(isel);
+ try dst_vi.value.copy(isel, dst_ty, src_lo64_vi.?);
+ } else if (dst_int_info.bits <= 128 and src_int_info.bits <= 64) {
+ assert(dst_int_info.bits > 64);
+ const src_vi = try isel.use(ty_op.operand);
+
+ var dst_lo64_it = dst_vi.value.field(dst_ty, 0, 8);
+ const dst_lo64_vi = try dst_lo64_it.only(isel);
+ if (src_int_info.bits <= 32) unused_lo64: {
+ const dst_lo64_ra = try dst_lo64_vi.?.defReg(isel) orelse break :unused_lo64;
+ const src_mat = try src_vi.matReg(isel);
+ try isel.emit(if (can_be_negative) .sbfm(dst_lo64_ra.x(), src_mat.ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(src_int_info.bits - 1),
+ }) else .orr(dst_lo64_ra.w(), .wzr, .{ .register = src_mat.ra.w() }));
+ try src_mat.finish(isel);
+ } else try dst_lo64_vi.?.copy(isel, src_ty, src_vi);
+
+ var dst_hi64_it = dst_vi.value.field(dst_ty, 8, 8);
+ const dst_hi64_vi = try dst_hi64_it.only(isel);
+ const dst_hi64_ra = try dst_hi64_vi.?.defReg(isel);
+ if (dst_hi64_ra) |dst_ra| switch (can_be_negative) {
+ false => try isel.emit(.orr(dst_ra.x(), .xzr, .{ .register = .xzr })),
+ true => {
+ const src_mat = try src_vi.matReg(isel);
+ try isel.emit(.sbfm(dst_ra.x(), src_mat.ra.x(), .{
+ .N = .doubleword,
+ .immr = @intCast(src_int_info.bits - 1),
+ .imms = @intCast(src_int_info.bits - 1),
+ }));
+ try src_mat.finish(isel);
+ },
+ };
+ } else return isel.fail("too big {s} {f} {f}", .{ @tagName(air_tag), isel.fmtType(dst_ty), isel.fmtType(src_ty) });
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .trunc => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |dst_vi| unused: {
+ defer dst_vi.value.deref(isel);
+
+ const ty_op = air.data(air.inst_index).ty_op;
+ const dst_ty = ty_op.ty.toType();
+ const src_ty = isel.air.typeOf(ty_op.operand, ip);
+ if (!dst_ty.isAbiInt(zcu) or !src_ty.isAbiInt(zcu)) return isel.fail("bad {s} {f} {f}", .{ @tagName(air_tag), isel.fmtType(dst_ty), isel.fmtType(src_ty) });
+ const dst_int_info = dst_ty.intInfo(zcu);
+ switch (dst_int_info.bits) {
+ 0 => unreachable,
+ 1...64 => |dst_bits| {
+ const dst_ra = try dst_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ var src_part_it = src_vi.field(src_ty, 0, @min(src_vi.size(isel), 8));
+ const src_part_vi = try src_part_it.only(isel);
+ const src_part_mat = try src_part_vi.?.matReg(isel);
+ try isel.emit(switch (dst_bits) {
+ else => unreachable,
+ 1...31 => |bits| switch (dst_int_info.signedness) {
+ .signed => .sbfm(dst_ra.w(), src_part_mat.ra.w(), .{
+ .N = .word,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }),
+ .unsigned => .ubfm(dst_ra.w(), src_part_mat.ra.w(), .{
+ .N = .word,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }),
+ },
+ 32 => .orr(dst_ra.w(), .wzr, .{ .register = src_part_mat.ra.w() }),
+ 33...63 => |bits| switch (dst_int_info.signedness) {
+ .signed => .sbfm(dst_ra.x(), src_part_mat.ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }),
+ .unsigned => .ubfm(dst_ra.x(), src_part_mat.ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }),
+ },
+ 64 => .orr(dst_ra.x(), .xzr, .{ .register = src_part_mat.ra.x() }),
+ });
+ try src_part_mat.finish(isel);
+ },
+ 65...128 => |dst_bits| switch (src_ty.intInfo(zcu).bits) {
+ 0 => unreachable,
+ 65...128 => {
+ const src_vi = try isel.use(ty_op.operand);
+ var dst_hi64_it = dst_vi.value.field(dst_ty, 8, 8);
+ const dst_hi64_vi = try dst_hi64_it.only(isel);
+ if (try dst_hi64_vi.?.defReg(isel)) |dst_hi64_ra| {
+ var src_hi64_it = src_vi.field(src_ty, 8, 8);
+ const src_hi64_vi = try src_hi64_it.only(isel);
+ const src_hi64_mat = try src_hi64_vi.?.matReg(isel);
+ try isel.emit(switch (dst_int_info.signedness) {
+ .signed => .sbfm(dst_hi64_ra.x(), src_hi64_mat.ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(dst_bits - 64 - 1),
+ }),
+ .unsigned => .ubfm(dst_hi64_ra.x(), src_hi64_mat.ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(dst_bits - 64 - 1),
+ }),
+ });
+ try src_hi64_mat.finish(isel);
+ }
+ var dst_lo64_it = dst_vi.value.field(dst_ty, 0, 8);
+ const dst_lo64_vi = try dst_lo64_it.only(isel);
+ if (try dst_lo64_vi.?.defReg(isel)) |dst_lo64_ra| {
+ var src_lo64_it = src_vi.field(src_ty, 0, 8);
+ const src_lo64_vi = try src_lo64_it.only(isel);
+ try src_lo64_vi.?.liveOut(isel, dst_lo64_ra);
+ }
+ },
+ else => return isel.fail("too big {s} {f} {f}", .{ @tagName(air_tag), isel.fmtType(dst_ty), isel.fmtType(src_ty) }),
+ },
+ else => return isel.fail("too big {s} {f} {f}", .{ @tagName(air_tag), isel.fmtType(dst_ty), isel.fmtType(src_ty) }),
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .optional_payload_ptr => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |dst_vi| {
+ defer dst_vi.value.deref(isel);
+ const ty_op = air.data(air.inst_index).ty_op;
+ try dst_vi.value.move(isel, ty_op.operand);
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .optional_payload => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |payload_vi| unused: {
+ defer payload_vi.value.deref(isel);
+
+ const ty_op = air.data(air.inst_index).ty_op;
+ const opt_ty = isel.air.typeOf(ty_op.operand, ip);
+ if (opt_ty.optionalReprIsPayload(zcu)) {
+ try payload_vi.value.move(isel, ty_op.operand);
+ break :unused;
+ }
+
+ const opt_vi = try isel.use(ty_op.operand);
+ var payload_part_it = opt_vi.field(opt_ty, 0, payload_vi.value.size(isel));
+ const payload_part_vi = try payload_part_it.only(isel);
+ try payload_vi.value.copy(isel, ty_op.ty.toType(), payload_part_vi.?);
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .wrap_optional => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |opt_vi| unused: {
+ defer opt_vi.value.deref(isel);
+
+ const ty_op = air.data(air.inst_index).ty_op;
+ if (ty_op.ty.toType().optionalReprIsPayload(zcu)) {
+ try opt_vi.value.move(isel, ty_op.operand);
+ break :unused;
+ }
+
+ const payload_size = isel.air.typeOf(ty_op.operand, ip).abiSize(zcu);
+ var payload_part_it = opt_vi.value.field(ty_op.ty.toType(), 0, payload_size);
+ const payload_part_vi = try payload_part_it.only(isel);
+ try payload_part_vi.?.move(isel, ty_op.operand);
+ var has_value_part_it = opt_vi.value.field(ty_op.ty.toType(), payload_size, 1);
+ const has_value_part_vi = try has_value_part_it.only(isel);
+ const has_value_part_ra = try has_value_part_vi.?.defReg(isel) orelse break :unused;
+ try isel.emit(.movz(has_value_part_ra.w(), 1, .{ .lsl = .@"0" }));
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .unwrap_errunion_payload => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |payload_vi| {
+ defer payload_vi.value.deref(isel);
+
+ const ty_op = air.data(air.inst_index).ty_op;
+ const error_union_ty = isel.air.typeOf(ty_op.operand, ip);
+
+ const error_union_vi = try isel.use(ty_op.operand);
+ var payload_part_it = error_union_vi.field(
+ error_union_ty,
+ codegen.errUnionPayloadOffset(ty_op.ty.toType(), zcu),
+ payload_vi.value.size(isel),
+ );
+ const payload_part_vi = try payload_part_it.only(isel);
+ try payload_vi.value.copy(isel, ty_op.ty.toType(), payload_part_vi.?);
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .unwrap_errunion_err => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |error_set_vi| {
+ defer error_set_vi.value.deref(isel);
+
+ const ty_op = air.data(air.inst_index).ty_op;
+ const error_union_ty = isel.air.typeOf(ty_op.operand, ip);
+
+ const error_union_vi = try isel.use(ty_op.operand);
+ var error_set_part_it = error_union_vi.field(
+ error_union_ty,
+ codegen.errUnionErrorOffset(error_union_ty.errorUnionPayload(zcu), zcu),
+ error_set_vi.value.size(isel),
+ );
+ const error_set_part_vi = try error_set_part_it.only(isel);
+ try error_set_vi.value.copy(isel, ty_op.ty.toType(), error_set_part_vi.?);
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .wrap_errunion_payload => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |error_union_vi| {
+ defer error_union_vi.value.deref(isel);
+
+ const ty_op = air.data(air.inst_index).ty_op;
+ const error_union_ty = ty_op.ty.toType();
+ const error_union_info = ip.indexToKey(error_union_ty.toIntern()).error_union_type;
+ const error_set_ty: ZigType = .fromInterned(error_union_info.error_set_type);
+ const payload_ty: ZigType = .fromInterned(error_union_info.payload_type);
+ const error_set_offset = codegen.errUnionErrorOffset(payload_ty, zcu);
+ const payload_offset = codegen.errUnionPayloadOffset(payload_ty, zcu);
+ const error_set_size = error_set_ty.abiSize(zcu);
+ const payload_size = payload_ty.abiSize(zcu);
+
+ var payload_part_it = error_union_vi.value.field(error_union_ty, payload_offset, payload_size);
+ const payload_part_vi = try payload_part_it.only(isel);
+ try payload_part_vi.?.move(isel, ty_op.operand);
+ var error_set_part_it = error_union_vi.value.field(error_union_ty, error_set_offset, error_set_size);
+ const error_set_part_vi = try error_set_part_it.only(isel);
+ if (try error_set_part_vi.?.defReg(isel)) |error_set_part_ra| try isel.emit(switch (error_set_size) {
+ else => unreachable,
+ 1...4 => .orr(error_set_part_ra.w(), .wzr, .{ .register = .wzr }),
+ 5...8 => .orr(error_set_part_ra.x(), .xzr, .{ .register = .xzr }),
+ });
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .wrap_errunion_err => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |error_union_vi| {
+ defer error_union_vi.value.deref(isel);
+
+ const ty_op = air.data(air.inst_index).ty_op;
+ const error_union_ty = ty_op.ty.toType();
+ const error_union_info = ip.indexToKey(error_union_ty.toIntern()).error_union_type;
+ const error_set_ty: ZigType = .fromInterned(error_union_info.error_set_type);
+ const payload_ty: ZigType = .fromInterned(error_union_info.payload_type);
+ const error_set_offset = codegen.errUnionErrorOffset(payload_ty, zcu);
+ const payload_offset = codegen.errUnionPayloadOffset(payload_ty, zcu);
+ const error_set_size = error_set_ty.abiSize(zcu);
+ const payload_size = payload_ty.abiSize(zcu);
+
+ if (payload_size > 0) {
+ var payload_part_it = error_union_vi.value.field(error_union_ty, payload_offset, payload_size);
+ const payload_part_vi = try payload_part_it.only(isel);
+ if (try payload_part_vi.?.defReg(isel)) |payload_part_ra| try isel.emit(switch (payload_size) {
+ else => unreachable,
+ 1...4 => .orr(payload_part_ra.w(), .wzr, .{ .immediate = .{
+ .N = .word,
+ .immr = 0b000001,
+ .imms = 0b111100,
+ } }),
+ 5...8 => .orr(payload_part_ra.x(), .xzr, .{ .immediate = .{
+ .N = .word,
+ .immr = 0b000001,
+ .imms = 0b111100,
+ } }),
+ });
+ }
+ var error_set_part_it = error_union_vi.value.field(error_union_ty, error_set_offset, error_set_size);
+ const error_set_part_vi = try error_set_part_it.only(isel);
+ try error_set_part_vi.?.move(isel, ty_op.operand);
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .struct_field_ptr => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |dst_vi| unused: {
+ defer dst_vi.value.deref(isel);
+ const ty_pl = air.data(air.inst_index).ty_pl;
+ const extra = isel.air.extraData(Air.StructField, ty_pl.payload).data;
+ switch (codegen.fieldOffset(
+ isel.air.typeOf(extra.struct_operand, ip),
+ ty_pl.ty.toType(),
+ extra.field_index,
+ zcu,
+ )) {
+ 0 => try dst_vi.value.move(isel, extra.struct_operand),
+ else => |field_offset| {
+ const dst_ra = try dst_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(extra.struct_operand);
+ const src_mat = try src_vi.matReg(isel);
+ const lo12: u12 = @truncate(field_offset >> 0);
+ const hi12: u12 = @intCast(field_offset >> 12);
+ if (hi12 > 0) try isel.emit(.add(
+ dst_ra.x(),
+ if (lo12 > 0) dst_ra.x() else src_mat.ra.x(),
+ .{ .shifted_immediate = .{ .immediate = hi12, .lsl = .@"12" } },
+ ));
+ if (lo12 > 0) try isel.emit(.add(dst_ra.x(), src_mat.ra.x(), .{ .immediate = lo12 }));
+ try src_mat.finish(isel);
+ },
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .struct_field_ptr_index_0,
+ .struct_field_ptr_index_1,
+ .struct_field_ptr_index_2,
+ .struct_field_ptr_index_3,
+ => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |dst_vi| unused: {
+ defer dst_vi.value.deref(isel);
+ const ty_op = air.data(air.inst_index).ty_op;
+ switch (codegen.fieldOffset(
+ isel.air.typeOf(ty_op.operand, ip),
+ ty_op.ty.toType(),
+ switch (air_tag) {
+ else => unreachable,
+ .struct_field_ptr_index_0 => 0,
+ .struct_field_ptr_index_1 => 1,
+ .struct_field_ptr_index_2 => 2,
+ .struct_field_ptr_index_3 => 3,
+ },
+ zcu,
+ )) {
+ 0 => try dst_vi.value.move(isel, ty_op.operand),
+ else => |field_offset| {
+ const dst_ra = try dst_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ const lo12: u12 = @truncate(field_offset >> 0);
+ const hi12: u12 = @intCast(field_offset >> 12);
+ if (hi12 > 0) try isel.emit(.add(
+ dst_ra.x(),
+ if (lo12 > 0) dst_ra.x() else src_mat.ra.x(),
+ .{ .shifted_immediate = .{ .immediate = hi12, .lsl = .@"12" } },
+ ));
+ if (lo12 > 0) try isel.emit(.add(dst_ra.x(), src_mat.ra.x(), .{ .immediate = lo12 }));
+ try src_mat.finish(isel);
+ },
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .struct_field_val => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |field_vi| {
+ defer field_vi.value.deref(isel);
+
+ const ty_pl = air.data(air.inst_index).ty_pl;
+ const extra = isel.air.extraData(Air.StructField, ty_pl.payload).data;
+ const agg_ty = isel.air.typeOf(extra.struct_operand, ip);
+ const field_ty = ty_pl.ty.toType();
+ const field_bit_offset, const field_bit_size, const is_packed = switch (agg_ty.containerLayout(zcu)) {
+ .auto, .@"extern" => .{
+ 8 * agg_ty.structFieldOffset(extra.field_index, zcu),
+ 8 * field_ty.abiSize(zcu),
+ false,
+ },
+ .@"packed" => .{
+ if (zcu.typeToPackedStruct(agg_ty)) |loaded_struct|
+ zcu.structPackedFieldBitOffset(loaded_struct, extra.field_index)
+ else
+ 0,
+ field_ty.bitSize(zcu),
+ true,
+ },
+ };
+ if (is_packed) return isel.fail("packed field of {f}", .{
+ isel.fmtType(agg_ty),
+ });
+
+ const agg_vi = try isel.use(extra.struct_operand);
+ var agg_part_it = agg_vi.field(agg_ty, @divExact(field_bit_offset, 8), @divExact(field_bit_size, 8));
+ while (try agg_part_it.next(isel)) |agg_part| {
+ var field_part_it = field_vi.value.field(ty_pl.ty.toType(), agg_part.offset, agg_part.vi.size(isel));
+ const field_part_vi = try field_part_it.only(isel);
+ if (field_part_vi.? == agg_part.vi) continue;
+ var field_subpart_it = field_part_vi.?.parts(isel);
+ const field_part_offset = if (field_subpart_it.only()) |field_subpart_vi|
+ field_subpart_vi.get(isel).offset_from_parent
+ else
+ 0;
+ while (field_subpart_it.next()) |field_subpart_vi| {
+ const field_subpart_ra = try field_subpart_vi.defReg(isel) orelse continue;
+ const field_subpart_offset, const field_subpart_size = field_subpart_vi.position(isel);
+ var agg_subpart_it = agg_part.vi.field(
+ field_ty,
+ agg_part.offset + field_subpart_offset - field_part_offset,
+ field_subpart_size,
+ );
+ const agg_subpart_vi = try agg_subpart_it.only(isel);
+ try agg_subpart_vi.?.liveOut(isel, field_subpart_ra);
+ }
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .slice => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |slice_vi| {
+ defer slice_vi.value.deref(isel);
+ const ty_pl = air.data(air.inst_index).ty_pl;
+ const bin_op = isel.air.extraData(Air.Bin, ty_pl.payload).data;
+ var ptr_part_it = slice_vi.value.field(ty_pl.ty.toType(), 0, 8);
+ const ptr_part_vi = try ptr_part_it.only(isel);
+ try ptr_part_vi.?.move(isel, bin_op.lhs);
+ var len_part_it = slice_vi.value.field(ty_pl.ty.toType(), 8, 8);
+ const len_part_vi = try len_part_it.only(isel);
+ try len_part_vi.?.move(isel, bin_op.rhs);
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .slice_len => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |len_vi| {
+ defer len_vi.value.deref(isel);
+ const ty_op = air.data(air.inst_index).ty_op;
+ const slice_vi = try isel.use(ty_op.operand);
+ var len_part_it = slice_vi.field(isel.air.typeOf(ty_op.operand, ip), 8, 8);
+ const len_part_vi = try len_part_it.only(isel);
+ try len_vi.value.copy(isel, ty_op.ty.toType(), len_part_vi.?);
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .slice_ptr => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |ptr_vi| {
+ defer ptr_vi.value.deref(isel);
+ const ty_op = air.data(air.inst_index).ty_op;
+ const slice_vi = try isel.use(ty_op.operand);
+ var ptr_part_it = slice_vi.field(isel.air.typeOf(ty_op.operand, ip), 0, 8);
+ const ptr_part_vi = try ptr_part_it.only(isel);
+ try ptr_vi.value.copy(isel, ty_op.ty.toType(), ptr_part_vi.?);
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .array_elem_val => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |elem_vi| unused: {
+ defer elem_vi.value.deref(isel);
+
+ const bin_op = air.data(air.inst_index).bin_op;
+ const array_ty = isel.air.typeOf(bin_op.lhs, ip);
+ const elem_ty = array_ty.childType(zcu);
+ const elem_size = elem_ty.abiSize(zcu);
+ if (elem_size <= 16 and array_ty.arrayLenIncludingSentinel(zcu) <= Value.max_parts) if (bin_op.rhs.toInterned()) |index_val| {
+ const elem_offset = elem_size * Constant.fromInterned(index_val).toUnsignedInt(zcu);
+ const array_vi = try isel.use(bin_op.lhs);
+ var elem_part_it = array_vi.field(array_ty, elem_offset, elem_size);
+ const elem_part_vi = try elem_part_it.only(isel);
+ try elem_vi.value.copy(isel, elem_ty, elem_part_vi.?);
+ break :unused;
+ };
+ switch (elem_size) {
+ 0 => unreachable,
+ 1, 2, 4, 8 => {
+ const elem_ra = try elem_vi.value.defReg(isel) orelse break :unused;
+ const array_ptr_ra = try isel.allocIntReg();
+ defer isel.freeReg(array_ptr_ra);
+ const index_vi = try isel.use(bin_op.rhs);
+ const index_mat = try index_vi.matReg(isel);
+ try isel.emit(switch (elem_size) {
+ else => unreachable,
+ 1 => if (elem_vi.value.isVector(isel)) .ldr(elem_ra.b(), .{ .extended_register = .{
+ .base = array_ptr_ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 0 },
+ } }) else switch (elem_vi.value.signedness(isel)) {
+ .signed => .ldrsb(elem_ra.w(), .{ .extended_register = .{
+ .base = array_ptr_ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 0 },
+ } }),
+ .unsigned => .ldrb(elem_ra.w(), .{ .extended_register = .{
+ .base = array_ptr_ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 0 },
+ } }),
+ },
+ 2 => if (elem_vi.value.isVector(isel)) .ldr(elem_ra.h(), .{ .extended_register = .{
+ .base = array_ptr_ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 1 },
+ } }) else switch (elem_vi.value.signedness(isel)) {
+ .signed => .ldrsh(elem_ra.w(), .{ .extended_register = .{
+ .base = array_ptr_ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 1 },
+ } }),
+ .unsigned => .ldrh(elem_ra.w(), .{ .extended_register = .{
+ .base = array_ptr_ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 1 },
+ } }),
+ },
+ 4 => .ldr(if (elem_vi.value.isVector(isel)) elem_ra.s() else elem_ra.w(), .{ .extended_register = .{
+ .base = array_ptr_ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 2 },
+ } }),
+ 8 => .ldr(if (elem_vi.value.isVector(isel)) elem_ra.d() else elem_ra.x(), .{ .extended_register = .{
+ .base = array_ptr_ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 3 },
+ } }),
+ 16 => .ldr(elem_ra.q(), .{ .extended_register = .{
+ .base = array_ptr_ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 4 },
+ } }),
+ });
+ try index_mat.finish(isel);
+ const array_vi = try isel.use(bin_op.lhs);
+ try array_vi.address(isel, 0, array_ptr_ra);
+ },
+ else => {
+ const ptr_ra = try isel.allocIntReg();
+ defer isel.freeReg(ptr_ra);
+ if (!try elem_vi.value.load(isel, elem_ty, ptr_ra, .{})) break :unused;
+ const index_vi = try isel.use(bin_op.rhs);
+ try isel.elemPtr(ptr_ra, ptr_ra, .add, elem_size, index_vi);
+ const array_vi = try isel.use(bin_op.lhs);
+ try array_vi.address(isel, 0, ptr_ra);
+ },
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .slice_elem_val => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |elem_vi| unused: {
+ defer elem_vi.value.deref(isel);
+
+ const bin_op = air.data(air.inst_index).bin_op;
+ const slice_ty = isel.air.typeOf(bin_op.lhs, ip);
+ const ptr_info = slice_ty.ptrInfo(zcu);
+ const elem_size = elem_vi.value.size(isel);
+ const elem_is_vector = elem_vi.value.isVector(isel);
+ if (switch (elem_size) {
+ 0 => unreachable,
+ 1, 2, 4, 8 => true,
+ 16 => elem_is_vector,
+ else => false,
+ }) {
+ const elem_ra = try elem_vi.value.defReg(isel) orelse break :unused;
+ const slice_vi = try isel.use(bin_op.lhs);
+ const index_vi = try isel.use(bin_op.rhs);
+ var ptr_part_it = slice_vi.field(slice_ty, 0, 8);
+ const ptr_part_vi = try ptr_part_it.only(isel);
+ const base_mat = try ptr_part_vi.?.matReg(isel);
+ const index_mat = try index_vi.matReg(isel);
+ try isel.emit(switch (elem_size) {
+ else => unreachable,
+ 1 => if (elem_is_vector) .ldr(elem_ra.b(), .{ .extended_register = .{
+ .base = base_mat.ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 0 },
+ } }) else switch (elem_vi.value.signedness(isel)) {
+ .signed => .ldrsb(elem_ra.w(), .{ .extended_register = .{
+ .base = base_mat.ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 0 },
+ } }),
+ .unsigned => .ldrb(elem_ra.w(), .{ .extended_register = .{
+ .base = base_mat.ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 0 },
+ } }),
+ },
+ 2 => if (elem_is_vector) .ldr(elem_ra.h(), .{ .extended_register = .{
+ .base = base_mat.ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 0 },
+ } }) else switch (elem_vi.value.signedness(isel)) {
+ .signed => .ldrsh(elem_ra.w(), .{ .extended_register = .{
+ .base = base_mat.ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 1 },
+ } }),
+ .unsigned => .ldrh(elem_ra.w(), .{ .extended_register = .{
+ .base = base_mat.ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 1 },
+ } }),
+ },
+ 4 => .ldr(if (elem_is_vector) elem_ra.s() else elem_ra.w(), .{ .extended_register = .{
+ .base = base_mat.ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 2 },
+ } }),
+ 8 => .ldr(if (elem_is_vector) elem_ra.d() else elem_ra.x(), .{ .extended_register = .{
+ .base = base_mat.ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 3 },
+ } }),
+ 16 => .ldr(elem_ra.q(), .{ .extended_register = .{
+ .base = base_mat.ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 4 },
+ } }),
+ });
+ try index_mat.finish(isel);
+ try base_mat.finish(isel);
+ break :unused;
+ } else {
+ const elem_ptr_ra = try isel.allocIntReg();
+ defer isel.freeReg(elem_ptr_ra);
+ if (!try elem_vi.value.load(isel, slice_ty.elemType2(zcu), elem_ptr_ra, .{
+ .@"volatile" = ptr_info.flags.is_volatile,
+ })) break :unused;
+ const slice_vi = try isel.use(bin_op.lhs);
+ var ptr_part_it = slice_vi.field(slice_ty, 0, 8);
+ const ptr_part_vi = try ptr_part_it.only(isel);
+ const ptr_part_mat = try ptr_part_vi.?.matReg(isel);
+ const index_vi = try isel.use(bin_op.rhs);
+ try isel.elemPtr(elem_ptr_ra, ptr_part_mat.ra, .add, elem_size, index_vi);
+ try ptr_part_mat.finish(isel);
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .slice_elem_ptr => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |elem_ptr_vi| unused: {
+ defer elem_ptr_vi.value.deref(isel);
+ const elem_ptr_ra = try elem_ptr_vi.value.defReg(isel) orelse break :unused;
+
+ const ty_pl = air.data(air.inst_index).ty_pl;
+ const bin_op = isel.air.extraData(Air.Bin, ty_pl.payload).data;
+ const elem_size = ty_pl.ty.toType().childType(zcu).abiSize(zcu);
+
+ const slice_vi = try isel.use(bin_op.lhs);
+ var ptr_part_it = slice_vi.field(isel.air.typeOf(bin_op.lhs, ip), 0, 8);
+ const ptr_part_vi = try ptr_part_it.only(isel);
+ const ptr_part_mat = try ptr_part_vi.?.matReg(isel);
+ const index_vi = try isel.use(bin_op.rhs);
+ try isel.elemPtr(elem_ptr_ra, ptr_part_mat.ra, .add, elem_size, index_vi);
+ try ptr_part_mat.finish(isel);
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .ptr_elem_val => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |elem_vi| unused: {
+ defer elem_vi.value.deref(isel);
+
+ const bin_op = air.data(air.inst_index).bin_op;
+ const ptr_ty = isel.air.typeOf(bin_op.lhs, ip);
+ const ptr_info = ptr_ty.ptrInfo(zcu);
+ const elem_size = elem_vi.value.size(isel);
+ switch (elem_size) {
+ 0 => unreachable,
+ 1, 2, 4, 8 => {
+ const elem_ra = try elem_vi.value.defReg(isel) orelse break :unused;
+ const base_vi = try isel.use(bin_op.lhs);
+ const index_vi = try isel.use(bin_op.rhs);
+ const base_mat = try base_vi.matReg(isel);
+ const index_mat = try index_vi.matReg(isel);
+ try isel.emit(switch (elem_size) {
+ else => unreachable,
+ 1 => switch (elem_vi.value.signedness(isel)) {
+ .signed => .ldrsb(elem_ra.w(), .{ .extended_register = .{
+ .base = base_mat.ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 0 },
+ } }),
+ .unsigned => .ldrb(elem_ra.w(), .{ .extended_register = .{
+ .base = base_mat.ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 0 },
+ } }),
+ },
+ 2 => switch (elem_vi.value.signedness(isel)) {
+ .signed => .ldrsh(elem_ra.w(), .{ .extended_register = .{
+ .base = base_mat.ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 1 },
+ } }),
+ .unsigned => .ldrh(elem_ra.w(), .{ .extended_register = .{
+ .base = base_mat.ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 1 },
+ } }),
+ },
+ 4 => .ldr(elem_ra.w(), .{ .extended_register = .{
+ .base = base_mat.ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 2 },
+ } }),
+ 8 => .ldr(elem_ra.x(), .{ .extended_register = .{
+ .base = base_mat.ra.x(),
+ .index = index_mat.ra.x(),
+ .extend = .{ .lsl = 3 },
+ } }),
+ });
+ try index_mat.finish(isel);
+ try base_mat.finish(isel);
+ },
+ else => {
+ const elem_ptr_ra = try isel.allocIntReg();
+ defer isel.freeReg(elem_ptr_ra);
+ if (!try elem_vi.value.load(isel, ptr_ty.elemType2(zcu), elem_ptr_ra, .{
+ .@"volatile" = ptr_info.flags.is_volatile,
+ })) break :unused;
+ const base_vi = try isel.use(bin_op.lhs);
+ const base_mat = try base_vi.matReg(isel);
+ const index_vi = try isel.use(bin_op.rhs);
+ try isel.elemPtr(elem_ptr_ra, base_mat.ra, .add, elem_size, index_vi);
+ try base_mat.finish(isel);
+ },
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .ptr_elem_ptr => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |elem_ptr_vi| unused: {
+ defer elem_ptr_vi.value.deref(isel);
+ const elem_ptr_ra = try elem_ptr_vi.value.defReg(isel) orelse break :unused;
+
+ const ty_pl = air.data(air.inst_index).ty_pl;
+ const bin_op = isel.air.extraData(Air.Bin, ty_pl.payload).data;
+ const elem_size = ty_pl.ty.toType().childType(zcu).abiSize(zcu);
+
+ const base_vi = try isel.use(bin_op.lhs);
+ const base_mat = try base_vi.matReg(isel);
+ const index_vi = try isel.use(bin_op.rhs);
+ try isel.elemPtr(elem_ptr_ra, base_mat.ra, .add, elem_size, index_vi);
+ try base_mat.finish(isel);
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .array_to_slice => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |slice_vi| {
+ defer slice_vi.value.deref(isel);
+ const ty_op = air.data(air.inst_index).ty_op;
+ var ptr_part_it = slice_vi.value.field(ty_op.ty.toType(), 0, 8);
+ const ptr_part_vi = try ptr_part_it.only(isel);
+ try ptr_part_vi.?.move(isel, ty_op.operand);
+ var len_part_it = slice_vi.value.field(ty_op.ty.toType(), 8, 8);
+ const len_part_vi = try len_part_it.only(isel);
+ if (try len_part_vi.?.defReg(isel)) |len_ra| try isel.movImmediate(
+ len_ra.x(),
+ isel.air.typeOf(ty_op.operand, ip).childType(zcu).arrayLen(zcu),
+ );
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .int_from_float, .int_from_float_optimized => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |dst_vi| unused: {
+ defer dst_vi.value.deref(isel);
+
+ const ty_op = air.data(air.inst_index).ty_op;
+ const dst_ty = ty_op.ty.toType();
+ const src_ty = isel.air.typeOf(ty_op.operand, ip);
+ if (!dst_ty.isAbiInt(zcu)) return isel.fail("bad {s} {f} {f}", .{ @tagName(air_tag), isel.fmtType(dst_ty), isel.fmtType(src_ty) });
+ const dst_int_info = dst_ty.intInfo(zcu);
+ const src_bits = src_ty.floatBits(isel.target);
+ switch (@max(dst_int_info.bits, src_bits)) {
+ 0 => unreachable,
+ 1...64 => {
+ const dst_ra = try dst_vi.value.defReg(isel) orelse break :unused;
+ const need_fcvt = switch (src_bits) {
+ else => unreachable,
+ 16 => !isel.target.cpu.has(.aarch64, .fullfp16),
+ 32, 64 => false,
+ };
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ const src_ra = if (need_fcvt) try isel.allocVecReg() else src_mat.ra;
+ defer if (need_fcvt) isel.freeReg(src_ra);
+ const dst_reg = switch (dst_int_info.bits) {
+ else => unreachable,
+ 1...32 => dst_ra.w(),
+ 33...64 => dst_ra.x(),
+ };
+ const src_reg = switch (src_bits) {
+ else => unreachable,
+ 16 => if (need_fcvt) src_ra.s() else src_ra.h(),
+ 32 => src_ra.s(),
+ 64 => src_ra.d(),
+ };
+ try isel.emit(switch (dst_int_info.signedness) {
+ .signed => .fcvtzs(dst_reg, src_reg),
+ .unsigned => .fcvtzu(dst_reg, src_reg),
+ });
+ if (need_fcvt) try isel.emit(.fcvt(src_reg, src_mat.ra.h()));
+ try src_mat.finish(isel);
+ },
+ 65...128 => {
+ try call.prepareReturn(isel);
+ switch (dst_int_info.bits) {
+ else => unreachable,
+ 1...64 => try call.returnLiveIn(isel, dst_vi.value, .r0),
+ 65...128 => {
+ var dst_hi64_it = dst_vi.value.field(dst_ty, 8, 8);
+ const dst_hi64_vi = try dst_hi64_it.only(isel);
+ try call.returnLiveIn(isel, dst_hi64_vi.?, .r1);
+ var dst_lo64_it = dst_vi.value.field(dst_ty, 0, 8);
+ const dst_lo64_vi = try dst_lo64_it.only(isel);
+ try call.returnLiveIn(isel, dst_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishReturn(isel);
+
+ try call.prepareCallee(isel);
+ try isel.global_relocs.append(gpa, .{
+ .global = switch (dst_int_info.bits) {
+ else => unreachable,
+ 1...32 => switch (dst_int_info.signedness) {
+ .signed => switch (src_bits) {
+ else => unreachable,
+ 16 => "__fixhfsi",
+ 32 => "__fixsfsi",
+ 64 => "__fixdfsi",
+ 80 => "__fixxfsi",
+ 128 => "__fixtfsi",
+ },
+ .unsigned => switch (src_bits) {
+ else => unreachable,
+ 16 => "__fixunshfsi",
+ 32 => "__fixunssfsi",
+ 64 => "__fixunsdfsi",
+ 80 => "__fixunsxfsi",
+ 128 => "__fixunstfsi",
+ },
+ },
+ 33...64 => switch (dst_int_info.signedness) {
+ .signed => switch (src_bits) {
+ else => unreachable,
+ 16 => "__fixhfdi",
+ 32 => "__fixsfdi",
+ 64 => "__fixdfdi",
+ 80 => "__fixxfdi",
+ 128 => "__fixtfdi",
+ },
+ .unsigned => switch (src_bits) {
+ else => unreachable,
+ 16 => "__fixunshfdi",
+ 32 => "__fixunssfdi",
+ 64 => "__fixunsdfdi",
+ 80 => "__fixunsxfdi",
+ 128 => "__fixunstfdi",
+ },
+ },
+ 65...128 => switch (dst_int_info.signedness) {
+ .signed => switch (src_bits) {
+ else => unreachable,
+ 16 => "__fixhfti",
+ 32 => "__fixsfti",
+ 64 => "__fixdfti",
+ 80 => "__fixxfti",
+ 128 => "__fixtfti",
+ },
+ .unsigned => switch (src_bits) {
+ else => unreachable,
+ 16 => "__fixunshfti",
+ 32 => "__fixunssfti",
+ 64 => "__fixunsdfti",
+ 80 => "__fixunsxfti",
+ 128 => "__fixunstfti",
+ },
+ },
+ },
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+ try call.finishCallee(isel);
+
+ try call.prepareParams(isel);
+ const src_vi = try isel.use(ty_op.operand);
+ switch (src_bits) {
+ else => unreachable,
+ 16, 32, 64, 128 => try call.paramLiveOut(isel, src_vi, .v0),
+ 80 => {
+ var src_hi16_it = src_vi.field(src_ty, 8, 8);
+ const src_hi16_vi = try src_hi16_it.only(isel);
+ try call.paramLiveOut(isel, src_hi16_vi.?, .r1);
+ var src_lo64_it = src_vi.field(src_ty, 0, 8);
+ const src_lo64_vi = try src_lo64_it.only(isel);
+ try call.paramLiveOut(isel, src_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishParams(isel);
+ },
+ else => return isel.fail("too big {s} {f} {f}", .{ @tagName(air_tag), isel.fmtType(dst_ty), isel.fmtType(src_ty) }),
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .float_from_int => |air_tag| {
+ if (isel.live_values.fetchRemove(air.inst_index)) |dst_vi| unused: {
+ defer dst_vi.value.deref(isel);
+
+ const ty_op = air.data(air.inst_index).ty_op;
+ const dst_ty = ty_op.ty.toType();
+ const src_ty = isel.air.typeOf(ty_op.operand, ip);
+ const dst_bits = dst_ty.floatBits(isel.target);
+ if (!src_ty.isAbiInt(zcu)) return isel.fail("bad {s} {f} {f}", .{ @tagName(air_tag), isel.fmtType(dst_ty), isel.fmtType(src_ty) });
+ const src_int_info = src_ty.intInfo(zcu);
+ switch (@max(dst_bits, src_int_info.bits)) {
+ 0 => unreachable,
+ 1...64 => {
+ const dst_ra = try dst_vi.value.defReg(isel) orelse break :unused;
+ const need_fcvt = switch (dst_bits) {
+ else => unreachable,
+ 16 => !isel.target.cpu.has(.aarch64, .fullfp16),
+ 32, 64 => false,
+ };
+ if (need_fcvt) try isel.emit(.fcvt(dst_ra.h(), dst_ra.s()));
+ const src_vi = try isel.use(ty_op.operand);
+ const src_mat = try src_vi.matReg(isel);
+ const dst_reg = switch (dst_bits) {
+ else => unreachable,
+ 16 => if (need_fcvt) dst_ra.s() else dst_ra.h(),
+ 32 => dst_ra.s(),
+ 64 => dst_ra.d(),
+ };
+ const src_reg = switch (src_int_info.bits) {
+ else => unreachable,
+ 1...32 => src_mat.ra.w(),
+ 33...64 => src_mat.ra.x(),
+ };
+ try isel.emit(switch (src_int_info.signedness) {
+ .signed => .scvtf(dst_reg, src_reg),
+ .unsigned => .ucvtf(dst_reg, src_reg),
+ });
+ try src_mat.finish(isel);
+ },
+ 65...128 => {
+ try call.prepareReturn(isel);
+ switch (dst_bits) {
+ else => unreachable,
+ 16, 32, 64, 128 => try call.returnLiveIn(isel, dst_vi.value, .v0),
+ 80 => {
+ var dst_hi16_it = dst_vi.value.field(dst_ty, 8, 8);
+ const dst_hi16_vi = try dst_hi16_it.only(isel);
+ try call.returnLiveIn(isel, dst_hi16_vi.?, .r1);
+ var dst_lo64_it = dst_vi.value.field(dst_ty, 0, 8);
+ const dst_lo64_vi = try dst_lo64_it.only(isel);
+ try call.returnLiveIn(isel, dst_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishReturn(isel);
+
+ try call.prepareCallee(isel);
+ try isel.global_relocs.append(gpa, .{
+ .global = switch (src_int_info.bits) {
+ else => unreachable,
+ 1...32 => switch (src_int_info.signedness) {
+ .signed => switch (dst_bits) {
+ else => unreachable,
+ 16 => "__floatsihf",
+ 32 => "__floatsisf",
+ 64 => "__floatsidf",
+ 80 => "__floatsixf",
+ 128 => "__floatsitf",
+ },
+ .unsigned => switch (dst_bits) {
+ else => unreachable,
+ 16 => "__floatunsihf",
+ 32 => "__floatunsisf",
+ 64 => "__floatunsidf",
+ 80 => "__floatunsixf",
+ 128 => "__floatunsitf",
+ },
+ },
+ 33...64 => switch (src_int_info.signedness) {
+ .signed => switch (dst_bits) {
+ else => unreachable,
+ 16 => "__floatdihf",
+ 32 => "__floatdisf",
+ 64 => "__floatdidf",
+ 80 => "__floatdixf",
+ 128 => "__floatditf",
+ },
+ .unsigned => switch (dst_bits) {
+ else => unreachable,
+ 16 => "__floatundihf",
+ 32 => "__floatundisf",
+ 64 => "__floatundidf",
+ 80 => "__floatundixf",
+ 128 => "__floatunditf",
+ },
+ },
+ 65...128 => switch (src_int_info.signedness) {
+ .signed => switch (dst_bits) {
+ else => unreachable,
+ 16 => "__floattihf",
+ 32 => "__floattisf",
+ 64 => "__floattidf",
+ 80 => "__floattixf",
+ 128 => "__floattitf",
+ },
+ .unsigned => switch (dst_bits) {
+ else => unreachable,
+ 16 => "__floatuntihf",
+ 32 => "__floatuntisf",
+ 64 => "__floatuntidf",
+ 80 => "__floatuntixf",
+ 128 => "__floatuntitf",
+ },
+ },
+ },
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+ try call.finishCallee(isel);
+
+ try call.prepareParams(isel);
+ const src_vi = try isel.use(ty_op.operand);
+ switch (src_int_info.bits) {
+ else => unreachable,
+ 1...64 => try call.paramLiveOut(isel, src_vi, .r0),
+ 65...128 => {
+ var src_hi64_it = src_vi.field(src_ty, 8, 8);
+ const src_hi64_vi = try src_hi64_it.only(isel);
+ try call.paramLiveOut(isel, src_hi64_vi.?, .r1);
+ var src_lo64_it = src_vi.field(src_ty, 0, 8);
+ const src_lo64_vi = try src_lo64_it.only(isel);
+ try call.paramLiveOut(isel, src_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishParams(isel);
+ },
+ else => return isel.fail("too big {s} {f} {f}", .{ @tagName(air_tag), isel.fmtType(dst_ty), isel.fmtType(src_ty) }),
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .memset => |air_tag| {
+ const bin_op = air.data(air.inst_index).bin_op;
+ const dst_ty = isel.air.typeOf(bin_op.lhs, ip);
+ const dst_info = dst_ty.ptrInfo(zcu);
+ const fill_byte: union(enum) { constant: u8, value: Air.Inst.Ref } = fill_byte: {
+ if (bin_op.rhs.toInterned()) |fill_val|
+ if (try isel.hasRepeatedByteRepr(.fromInterned(fill_val))) |fill_byte|
+ break :fill_byte .{ .constant = fill_byte };
+ switch (dst_ty.elemType2(zcu).abiSize(zcu)) {
+ 0 => unreachable,
+ 1 => break :fill_byte .{ .value = bin_op.rhs },
+ 2, 4, 8 => |size| {
+ const dst_vi = try isel.use(bin_op.lhs);
+ const ptr_ra = try isel.allocIntReg();
+ const fill_vi = try isel.use(bin_op.rhs);
+ const fill_mat = try fill_vi.matReg(isel);
+ const len_mat: Value.Materialize = len_mat: switch (dst_info.flags.size) {
+ .one => .{ .vi = undefined, .ra = try isel.allocIntReg() },
+ .many => unreachable,
+ .slice => {
+ var dst_len_it = dst_vi.field(dst_ty, 8, 8);
+ const dst_len_vi = try dst_len_it.only(isel);
+ break :len_mat try dst_len_vi.?.matReg(isel);
+ },
+ .c => unreachable,
+ };
+
+ const skip_label = isel.instructions.items.len;
+ _ = try isel.instructions.addOne(gpa);
+ try isel.emit(.sub(len_mat.ra.x(), len_mat.ra.x(), .{ .immediate = 1 }));
+ try isel.emit(switch (size) {
+ else => unreachable,
+ 2 => .strh(fill_mat.ra.w(), .{ .post_index = .{ .base = ptr_ra.x(), .index = 2 } }),
+ 4 => .str(fill_mat.ra.w(), .{ .post_index = .{ .base = ptr_ra.x(), .index = 4 } }),
+ 8 => .str(fill_mat.ra.x(), .{ .post_index = .{ .base = ptr_ra.x(), .index = 8 } }),
+ });
+ isel.instructions.items[skip_label] = .cbnz(
+ len_mat.ra.x(),
+ -@as(i21, @intCast((isel.instructions.items.len - 1 - skip_label) << 2)),
+ );
+ switch (dst_info.flags.size) {
+ .one => {
+ const len_imm = ZigType.fromInterned(dst_info.child).arrayLen(zcu);
+ assert(len_imm > 0);
+ try isel.movImmediate(len_mat.ra.x(), len_imm);
+ isel.freeReg(len_mat.ra);
+ try fill_mat.finish(isel);
+ isel.freeReg(ptr_ra);
+ try dst_vi.liveOut(isel, ptr_ra);
+ },
+ .many => unreachable,
+ .slice => {
+ try isel.emit(.cbz(
+ len_mat.ra.x(),
+ @intCast((isel.instructions.items.len + 1 - skip_label) << 2),
+ ));
+ try len_mat.finish(isel);
+ try fill_mat.finish(isel);
+ isel.freeReg(ptr_ra);
+ var dst_ptr_it = dst_vi.field(dst_ty, 0, 8);
+ const dst_ptr_vi = try dst_ptr_it.only(isel);
+ try dst_ptr_vi.?.liveOut(isel, ptr_ra);
+ },
+ .c => unreachable,
+ }
+
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ break :air_tag;
+ },
+ else => return isel.fail("too big {s} {f}", .{ @tagName(air_tag), isel.fmtType(dst_ty) }),
+ }
+ };
+
+ try call.prepareReturn(isel);
+ try call.finishReturn(isel);
+
+ try call.prepareCallee(isel);
+ try isel.global_relocs.append(gpa, .{
+ .global = "memset",
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+ try call.finishCallee(isel);
+
+ try call.prepareParams(isel);
+ const dst_vi = try isel.use(bin_op.lhs);
+ switch (dst_info.flags.size) {
+ .one => {
+ try isel.movImmediate(.x2, ZigType.fromInterned(dst_info.child).abiSize(zcu));
+ switch (fill_byte) {
+ .constant => |byte| try isel.movImmediate(.w1, byte),
+ .value => |byte| try call.paramLiveOut(isel, try isel.use(byte), .r1),
+ }
+ try call.paramLiveOut(isel, dst_vi, .r0);
+ },
+ .many => unreachable,
+ .slice => {
+ var dst_ptr_it = dst_vi.field(dst_ty, 0, 8);
+ const dst_ptr_vi = try dst_ptr_it.only(isel);
+ var dst_len_it = dst_vi.field(dst_ty, 8, 8);
+ const dst_len_vi = try dst_len_it.only(isel);
+ try isel.elemPtr(.r2, .zr, .add, ZigType.fromInterned(dst_info.child).abiSize(zcu), dst_len_vi.?);
+ switch (fill_byte) {
+ .constant => |byte| try isel.movImmediate(.w1, byte),
+ .value => |byte| try call.paramLiveOut(isel, try isel.use(byte), .r1),
+ }
+ try call.paramLiveOut(isel, dst_ptr_vi.?, .r0);
+ },
+ .c => unreachable,
+ }
+ try call.finishParams(isel);
+
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .memcpy, .memmove => |air_tag| {
+ const bin_op = air.data(air.inst_index).bin_op;
+ const dst_ty = isel.air.typeOf(bin_op.lhs, ip);
+ const dst_info = dst_ty.ptrInfo(zcu);
+
+ try call.prepareReturn(isel);
+ try call.finishReturn(isel);
+
+ try call.prepareCallee(isel);
+ try isel.global_relocs.append(gpa, .{
+ .global = @tagName(air_tag),
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+ try call.finishCallee(isel);
+
+ try call.prepareParams(isel);
+ switch (dst_info.flags.size) {
+ .one => {
+ const dst_vi = try isel.use(bin_op.lhs);
+ const src_vi = try isel.use(bin_op.rhs);
+ try isel.movImmediate(.x2, ZigType.fromInterned(dst_info.child).abiSize(zcu));
+ try call.paramLiveOut(isel, src_vi, .r1);
+ try call.paramLiveOut(isel, dst_vi, .r0);
+ },
+ .many => unreachable,
+ .slice => {
+ const dst_vi = try isel.use(bin_op.lhs);
+ var dst_ptr_it = dst_vi.field(dst_ty, 0, 8);
+ const dst_ptr_vi = try dst_ptr_it.only(isel);
+ var dst_len_it = dst_vi.field(dst_ty, 8, 8);
+ const dst_len_vi = try dst_len_it.only(isel);
+ const src_vi = try isel.use(bin_op.rhs);
+ try isel.elemPtr(.r2, .zr, .add, ZigType.fromInterned(dst_info.child).abiSize(zcu), dst_len_vi.?);
+ try call.paramLiveOut(isel, src_vi, .r1);
+ try call.paramLiveOut(isel, dst_ptr_vi.?, .r0);
+ },
+ .c => unreachable,
+ }
+ try call.finishParams(isel);
+
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .atomic_load => {
+ const atomic_load = air.data(air.inst_index).atomic_load;
+ const ptr_ty = isel.air.typeOf(atomic_load.ptr, ip);
+ const ptr_info = ptr_ty.ptrInfo(zcu);
+ if (atomic_load.order != .unordered) return isel.fail("ordered atomic load", .{});
+ if (ptr_info.packed_offset.host_size > 0) return isel.fail("packed atomic load", .{});
+
+ if (isel.live_values.fetchRemove(air.inst_index)) |dst_vi| {
+ defer dst_vi.value.deref(isel);
+ var ptr_mat: ?Value.Materialize = null;
+ var dst_part_it = dst_vi.value.parts(isel);
+ while (dst_part_it.next()) |dst_part_vi| {
+ const dst_ra = try dst_part_vi.defReg(isel) orelse continue;
+ if (ptr_mat == null) {
+ const ptr_vi = try isel.use(atomic_load.ptr);
+ ptr_mat = try ptr_vi.matReg(isel);
+ }
+ try isel.emit(switch (dst_part_vi.size(isel)) {
+ else => |size| return isel.fail("bad atomic load size of {d} from {f}", .{
+ size, isel.fmtType(ptr_ty),
+ }),
+ 1 => switch (dst_part_vi.signedness(isel)) {
+ .signed => .ldrsb(dst_ra.w(), .{ .unsigned_offset = .{
+ .base = ptr_mat.?.ra.x(),
+ .offset = @intCast(dst_part_vi.get(isel).offset_from_parent),
+ } }),
+ .unsigned => .ldrb(dst_ra.w(), .{ .unsigned_offset = .{
+ .base = ptr_mat.?.ra.x(),
+ .offset = @intCast(dst_part_vi.get(isel).offset_from_parent),
+ } }),
+ },
+ 2 => switch (dst_part_vi.signedness(isel)) {
+ .signed => .ldrsh(dst_ra.w(), .{ .unsigned_offset = .{
+ .base = ptr_mat.?.ra.x(),
+ .offset = @intCast(dst_part_vi.get(isel).offset_from_parent),
+ } }),
+ .unsigned => .ldrh(dst_ra.w(), .{ .unsigned_offset = .{
+ .base = ptr_mat.?.ra.x(),
+ .offset = @intCast(dst_part_vi.get(isel).offset_from_parent),
+ } }),
+ },
+ 4 => .ldr(dst_ra.w(), .{ .unsigned_offset = .{
+ .base = ptr_mat.?.ra.x(),
+ .offset = @intCast(dst_part_vi.get(isel).offset_from_parent),
+ } }),
+ 8 => .ldr(dst_ra.x(), .{ .unsigned_offset = .{
+ .base = ptr_mat.?.ra.x(),
+ .offset = @intCast(dst_part_vi.get(isel).offset_from_parent),
+ } }),
+ });
+ }
+ if (ptr_mat) |mat| try mat.finish(isel);
+ } else if (ptr_info.flags.is_volatile) return isel.fail("volatile atomic load", .{});
+
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .aggregate_init => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |agg_vi| {
+ defer agg_vi.value.deref(isel);
+
+ const ty_pl = air.data(air.inst_index).ty_pl;
+ const agg_ty = ty_pl.ty.toType();
+ switch (ip.indexToKey(agg_ty.toIntern())) {
+ .array_type => |array_type| {
+ const elems: []const Air.Inst.Ref =
+ @ptrCast(isel.air.extra.items[ty_pl.payload..][0..@intCast(array_type.len)]);
+ var elem_offset: u64 = 0;
+ const elem_size = ZigType.fromInterned(array_type.child).abiSize(zcu);
+ for (elems) |elem| {
+ var agg_part_it = agg_vi.value.field(agg_ty, elem_offset, elem_size);
+ const agg_part_vi = try agg_part_it.only(isel);
+ try agg_part_vi.?.move(isel, elem);
+ elem_offset += elem_size;
+ }
+ switch (array_type.sentinel) {
+ .none => {},
+ else => |sentinel| {
+ var agg_part_it = agg_vi.value.field(agg_ty, elem_offset, elem_size);
+ const agg_part_vi = try agg_part_it.only(isel);
+ try agg_part_vi.?.move(isel, .fromIntern(sentinel));
+ },
+ }
+ },
+ .struct_type => {
+ const loaded_struct = ip.loadStructType(agg_ty.toIntern());
+ const elems: []const Air.Inst.Ref =
+ @ptrCast(isel.air.extra.items[ty_pl.payload..][0..loaded_struct.field_types.len]);
+ var field_offset: u64 = 0;
+ var field_it = loaded_struct.iterateRuntimeOrder(ip);
+ while (field_it.next()) |field_index| {
+ const field_ty: ZigType = .fromInterned(loaded_struct.field_types.get(ip)[field_index]);
+ field_offset = field_ty.structFieldAlignment(
+ loaded_struct.fieldAlign(ip, field_index),
+ loaded_struct.layout,
+ zcu,
+ ).forward(field_offset);
+ const field_size = field_ty.abiSize(zcu);
+ if (field_size == 0) continue;
+ var agg_part_it = agg_vi.value.field(agg_ty, field_offset, field_size);
+ const agg_part_vi = try agg_part_it.only(isel);
+ try agg_part_vi.?.move(isel, elems[field_index]);
+ field_offset += field_size;
+ }
+ assert(field_offset == agg_vi.value.size(isel));
+ },
+ .tuple_type => |tuple_type| {
+ const elems: []const Air.Inst.Ref =
+ @ptrCast(isel.air.extra.items[ty_pl.payload..][0..tuple_type.types.len]);
+ var field_offset: u64 = 0;
+ for (
+ tuple_type.types.get(ip),
+ tuple_type.values.get(ip),
+ elems,
+ ) |field_ty_index, field_val, elem| {
+ if (field_val != .none) continue;
+ const field_ty: ZigType = .fromInterned(field_ty_index);
+ field_offset = field_ty.abiAlignment(zcu).forward(field_offset);
+ const field_size = field_ty.abiSize(zcu);
+ if (field_size == 0) continue;
+ var agg_part_it = agg_vi.value.field(agg_ty, field_offset, field_size);
+ const agg_part_vi = try agg_part_it.only(isel);
+ try agg_part_vi.?.move(isel, elem);
+ field_offset += field_size;
+ }
+ assert(field_offset == agg_vi.value.size(isel));
+ },
+ else => return isel.fail("aggregate init {f}", .{isel.fmtType(agg_ty)}),
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .union_init => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |un_vi| unused: {
+ defer un_vi.value.deref(isel);
+
+ const ty_pl = air.data(air.inst_index).ty_pl;
+ const extra = isel.air.extraData(Air.UnionInit, ty_pl.payload).data;
+ const un_ty = ty_pl.ty.toType();
+ if (un_ty.containerLayout(zcu) != .@"extern") return isel.fail("bad union init {f}", .{isel.fmtType(un_ty)});
+
+ try un_vi.value.defAddr(isel, un_ty, null, comptime &.initFill(.free)) orelse break :unused;
+
+ try call.prepareReturn(isel);
+ try call.finishReturn(isel);
+
+ try call.prepareCallee(isel);
+ try isel.global_relocs.append(gpa, .{
+ .global = "memcpy",
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+ try call.finishCallee(isel);
+
+ try call.prepareParams(isel);
+ const init_vi = try isel.use(extra.init);
+ try isel.movImmediate(.x2, init_vi.size(isel));
+ try call.paramAddress(isel, init_vi, .r1);
+ try call.paramAddress(isel, un_vi.value, .r0);
+ try call.finishParams(isel);
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .prefetch => {
+ const prefetch = air.data(air.inst_index).prefetch;
+ if (!(prefetch.rw == .write and prefetch.cache == .instruction)) {
+ const maybe_slice_ty = isel.air.typeOf(prefetch.ptr, ip);
+ const maybe_slice_vi = try isel.use(prefetch.ptr);
+ const ptr_vi = if (maybe_slice_ty.isSlice(zcu)) ptr_vi: {
+ var ptr_part_it = maybe_slice_vi.field(maybe_slice_ty, 0, 8);
+ const ptr_part_vi = try ptr_part_it.only(isel);
+ break :ptr_vi ptr_part_vi.?;
+ } else maybe_slice_vi;
+ const ptr_mat = try ptr_vi.matReg(isel);
+ try isel.emit(.prfm(.{
+ .policy = switch (prefetch.locality) {
+ 1, 2, 3 => .keep,
+ 0 => .strm,
+ },
+ .target = switch (prefetch.locality) {
+ 0, 3 => .l1,
+ 2 => .l2,
+ 1 => .l3,
+ },
+ .type = switch (prefetch.rw) {
+ .read => switch (prefetch.cache) {
+ .data => .pld,
+ .instruction => .pli,
+ },
+ .write => switch (prefetch.cache) {
+ .data => .pst,
+ .instruction => unreachable,
+ },
+ },
+ }, .{ .base = ptr_mat.ra.x() }));
+ try ptr_mat.finish(isel);
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .mul_add => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |res_vi| unused: {
+ defer res_vi.value.deref(isel);
+
+ const pl_op = air.data(air.inst_index).pl_op;
+ const bin_op = isel.air.extraData(Air.Bin, pl_op.payload).data;
+ const ty = isel.air.typeOf(pl_op.operand, ip);
+ switch (ty.floatBits(isel.target)) {
+ else => unreachable,
+ 16, 32, 64 => |bits| {
+ const res_ra = try res_vi.value.defReg(isel) orelse break :unused;
+ const need_fcvt = switch (bits) {
+ else => unreachable,
+ 16 => !isel.target.cpu.has(.aarch64, .fullfp16),
+ 32, 64 => false,
+ };
+ if (need_fcvt) try isel.emit(.fcvt(res_ra.h(), res_ra.s()));
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const addend_vi = try isel.use(pl_op.operand);
+ const lhs_mat = try lhs_vi.matReg(isel);
+ const rhs_mat = try rhs_vi.matReg(isel);
+ const addend_mat = try addend_vi.matReg(isel);
+ const lhs_ra = if (need_fcvt) try isel.allocVecReg() else lhs_mat.ra;
+ defer if (need_fcvt) isel.freeReg(lhs_ra);
+ const rhs_ra = if (need_fcvt) try isel.allocVecReg() else rhs_mat.ra;
+ defer if (need_fcvt) isel.freeReg(rhs_ra);
+ const addend_ra = if (need_fcvt) try isel.allocVecReg() else addend_mat.ra;
+ defer if (need_fcvt) isel.freeReg(addend_ra);
+ try isel.emit(bits: switch (bits) {
+ else => unreachable,
+ 16 => if (need_fcvt)
+ continue :bits 32
+ else
+ .fmadd(res_ra.h(), lhs_ra.h(), rhs_ra.h(), addend_ra.h()),
+ 32 => .fmadd(res_ra.s(), lhs_ra.s(), rhs_ra.s(), addend_ra.s()),
+ 64 => .fmadd(res_ra.d(), lhs_ra.d(), rhs_ra.d(), addend_ra.d()),
+ });
+ if (need_fcvt) {
+ try isel.emit(.fcvt(addend_ra.s(), addend_mat.ra.h()));
+ try isel.emit(.fcvt(rhs_ra.s(), rhs_mat.ra.h()));
+ try isel.emit(.fcvt(lhs_ra.s(), lhs_mat.ra.h()));
+ }
+ try addend_mat.finish(isel);
+ try rhs_mat.finish(isel);
+ try lhs_mat.finish(isel);
+ },
+ 80, 128 => |bits| {
+ try call.prepareReturn(isel);
+ switch (bits) {
+ else => unreachable,
+ 16, 32, 64, 128 => try call.returnLiveIn(isel, res_vi.value, .v0),
+ 80 => {
+ var res_hi16_it = res_vi.value.field(ty, 8, 8);
+ const res_hi16_vi = try res_hi16_it.only(isel);
+ try call.returnLiveIn(isel, res_hi16_vi.?, .r1);
+ var res_lo64_it = res_vi.value.field(ty, 0, 8);
+ const res_lo64_vi = try res_lo64_it.only(isel);
+ try call.returnLiveIn(isel, res_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishReturn(isel);
+
+ try call.prepareCallee(isel);
+ try isel.global_relocs.append(gpa, .{
+ .global = switch (bits) {
+ else => unreachable,
+ 16 => "__fmah",
+ 32 => "fmaf",
+ 64 => "fma",
+ 80 => "__fmax",
+ 128 => "fmaq",
+ },
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.bl(0));
+ try call.finishCallee(isel);
+
+ try call.prepareParams(isel);
+ const lhs_vi = try isel.use(bin_op.lhs);
+ const rhs_vi = try isel.use(bin_op.rhs);
+ const addend_vi = try isel.use(pl_op.operand);
+ switch (bits) {
+ else => unreachable,
+ 16, 32, 64, 128 => {
+ try call.paramLiveOut(isel, addend_vi, .v2);
+ try call.paramLiveOut(isel, rhs_vi, .v1);
+ try call.paramLiveOut(isel, lhs_vi, .v0);
+ },
+ 80 => {
+ var addend_hi16_it = addend_vi.field(ty, 8, 8);
+ const addend_hi16_vi = try addend_hi16_it.only(isel);
+ try call.paramLiveOut(isel, addend_hi16_vi.?, .r5);
+ var addend_lo64_it = addend_vi.field(ty, 0, 8);
+ const addend_lo64_vi = try addend_lo64_it.only(isel);
+ try call.paramLiveOut(isel, addend_lo64_vi.?, .r4);
+ var rhs_hi16_it = rhs_vi.field(ty, 8, 8);
+ const rhs_hi16_vi = try rhs_hi16_it.only(isel);
+ try call.paramLiveOut(isel, rhs_hi16_vi.?, .r3);
+ var rhs_lo64_it = rhs_vi.field(ty, 0, 8);
+ const rhs_lo64_vi = try rhs_lo64_it.only(isel);
+ try call.paramLiveOut(isel, rhs_lo64_vi.?, .r2);
+ var lhs_hi16_it = lhs_vi.field(ty, 8, 8);
+ const lhs_hi16_vi = try lhs_hi16_it.only(isel);
+ try call.paramLiveOut(isel, lhs_hi16_vi.?, .r1);
+ var lhs_lo64_it = lhs_vi.field(ty, 0, 8);
+ const lhs_lo64_vi = try lhs_lo64_it.only(isel);
+ try call.paramLiveOut(isel, lhs_lo64_vi.?, .r0);
+ },
+ }
+ try call.finishParams(isel);
+ },
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .field_parent_ptr => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |dst_vi| unused: {
+ defer dst_vi.value.deref(isel);
+ const ty_pl = air.data(air.inst_index).ty_pl;
+ const extra = isel.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
+ switch (codegen.fieldOffset(
+ ty_pl.ty.toType(),
+ isel.air.typeOf(extra.field_ptr, ip),
+ extra.field_index,
+ zcu,
+ )) {
+ 0 => try dst_vi.value.move(isel, extra.field_ptr),
+ else => |field_offset| {
+ const dst_ra = try dst_vi.value.defReg(isel) orelse break :unused;
+ const src_vi = try isel.use(extra.field_ptr);
+ const src_mat = try src_vi.matReg(isel);
+ const lo12: u12 = @truncate(field_offset >> 0);
+ const hi12: u12 = @intCast(field_offset >> 12);
+ if (hi12 > 0) try isel.emit(.sub(
+ dst_ra.x(),
+ if (lo12 > 0) dst_ra.x() else src_mat.ra.x(),
+ .{ .shifted_immediate = .{ .immediate = hi12, .lsl = .@"12" } },
+ ));
+ if (lo12 > 0) try isel.emit(.sub(dst_ra.x(), src_mat.ra.x(), .{ .immediate = lo12 }));
+ try src_mat.finish(isel);
+ },
+ }
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .runtime_nav_ptr => {
+ if (isel.live_values.fetchRemove(air.inst_index)) |ptr_vi| unused: {
+ defer ptr_vi.value.deref(isel);
+ const ptr_ra = try ptr_vi.value.defReg(isel) orelse break :unused;
+
+ const ty_nav = air.data(air.inst_index).ty_nav;
+ if (ZigType.fromInterned(ip.getNav(ty_nav.nav).typeOf(ip)).isFnOrHasRuntimeBits(zcu)) switch (true) {
+ false => {
+ try isel.nav_relocs.append(zcu.gpa, .{
+ .nav = ty_nav.nav,
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.adr(ptr_ra.x(), 0));
+ },
+ true => {
+ try isel.nav_relocs.append(zcu.gpa, .{
+ .nav = ty_nav.nav,
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.add(ptr_ra.x(), ptr_ra.x(), .{ .immediate = 0 }));
+ try isel.nav_relocs.append(zcu.gpa, .{
+ .nav = ty_nav.nav,
+ .reloc = .{ .label = @intCast(isel.instructions.items.len) },
+ });
+ try isel.emit(.adrp(ptr_ra.x(), 0));
+ },
+ } else try isel.movImmediate(ptr_ra.x(), isel.pt.navAlignment(ty_nav.nav).forward(0xaaaaaaaaaaaaaaaa));
+ }
+ if (air.next()) |next_air_tag| continue :air_tag next_air_tag;
+ },
+ .add_safe,
+ .sub_safe,
+ .mul_safe,
+ .inferred_alloc,
+ .inferred_alloc_comptime,
+ .int_from_float_safe,
+ .int_from_float_optimized_safe,
+ .wasm_memory_size,
+ .wasm_memory_grow,
+ .work_item_id,
+ .work_group_size,
+ .work_group_id,
+ => unreachable,
+ }
+ assert(air.body_index == 0);
+}
+
+pub fn verify(isel: *Select, check_values: bool) void {
+ if (!std.debug.runtime_safety) return;
+ assert(isel.blocks.count() == 1 and isel.blocks.keys()[0] == Select.Block.main);
+ assert(isel.active_loops.items.len == 0);
+ assert(isel.dom_start == 0 and isel.dom_len == 0);
+ var live_reg_it = isel.live_registers.iterator();
+ while (live_reg_it.next()) |live_reg_entry| switch (live_reg_entry.value.*) {
+ _ => {
+ isel.dumpValues(.all);
+ unreachable;
+ },
+ .allocating, .free => {},
+ };
+ if (check_values) for (isel.values.items) |value| if (value.refs != 0) {
+ isel.dumpValues(.only_referenced);
+ unreachable;
+ };
+}
+
+/// Stack Frame Layout
+/// +-+-----------------------------------+
+/// |R| allocated stack |
+/// +-+-----------------------------------+
+/// |S| caller frame record | +---------------+
+/// +-+-----------------------------------+ <-| entry/exit FP |
+/// |R| caller frame | +---------------+
+/// +-+-----------------------------------+
+/// |R| variable incoming stack arguments | +---------------+
+/// +-+-----------------------------------+ <-| __stack |
+/// |S| named incoming stack arguments | +---------------+
+/// +-+-----------------------------------+ <-| entry/exit SP |
+/// |S| incoming gr arguments | | __gr_top |
+/// +-+-----------------------------------+ +---------------+
+/// |S| alignment gap |
+/// +-+-----------------------------------+
+/// |S| frame record | +----------+
+/// +-+-----------------------------------+ <-| FP |
+/// |S| incoming vr arguments | | __vr_top |
+/// +-+-----------------------------------+ +----------+
+/// |L| alignment gap |
+/// +-+-----------------------------------+
+/// |L| callee saved vr area |
+/// +-+-----------------------------------+
+/// |L| callee saved gr area | +----------------------+
+/// +-+-----------------------------------+ <-| prologue/epilogue SP |
+/// |R| realignment gap | +----------------------+
+/// +-+-----------------------------------+
+/// |L| locals |
+/// +-+-----------------------------------+
+/// |S| outgoing stack arguments | +----+
+/// +-+-----------------------------------+ <-| SP |
+/// |R| unallocated stack | +----+
+/// +-+-----------------------------------+
+/// [S] Size computed by `analyze`, can be used by the body.
+/// [L] Size computed by `layout`, can be used by the prologue/epilogue.
+/// [R] Size unknown until runtime, can vary from one call to the next.
+///
+/// Constraints that led to this layout:
+/// * FP to __stack/__gr_top/__vr_top must only pass through [S]
+/// * SP to outgoing stack arguments/locals must only pass through [S]
+/// * entry/exit SP to prologue/epilogue SP must only pass through [S/L]
+/// * all save areas must be at a positive offset from prologue/epilogue SP
+/// * the entry/exit SP to prologue/epilogue SP distance must
+/// - be a multiple of 16 due to hardware restrictions on the value of SP
+/// - conform to the limit from the first matching condition in the
+/// following list due to instruction encoding limitations
+/// 1. callee saved gr count >= 2: multiple of 8 of at most 504 bytes
+/// 2. callee saved vr count >= 2: multiple of 8 of at most 504 bytes
+/// 3. callee saved gr count >= 1: at most 255 bytes
+/// 4. callee saved vr count >= 1: at most 255 bytes
+/// 5. variable incoming vr argument count >= 2: multiple of 16 of at most 1008 bytes
+/// 6. variable incoming vr argument count >= 1: at most 255 bytes
+/// 7. have frame record: multiple of 8 of at most 504 bytes
+pub fn layout(
+ isel: *Select,
+ incoming: CallAbiIterator,
+ have_va: bool,
+ saved_gra_len: u7,
+ saved_vra_len: u7,
+ mod: *const Package.Module,
+) !usize {
+ const zcu = isel.pt.zcu;
+ const ip = &zcu.intern_pool;
+ const nav = ip.getNav(isel.nav_index);
+ wip_mir_log.debug("{f}<body>:\n", .{nav.fqn.fmt(ip)});
+
+ const stack_size: u24 = @intCast(InternPool.Alignment.@"16".forward(isel.stack_size));
+ const stack_size_low: u12 = @truncate(stack_size >> 0);
+ const stack_size_high: u12 = @truncate(stack_size >> 12);
+
+ var saves_buf: [10 + 8 + 8 + 2 + 8]struct {
+ class: enum { integer, vector },
+ needs_restore: bool,
+ register: Register,
+ offset: u10,
+ size: u5,
+ } = undefined;
+ const saves, const saves_size, const frame_record_offset = saves: {
+ var saves_len: usize = 0;
+ var saves_size: u10 = 0;
+ var save_ra: Register.Alias = undefined;
+
+ // callee saved gr area
+ save_ra = .r19;
+ while (save_ra != .r29) : (save_ra = @enumFromInt(@intFromEnum(save_ra) + 1)) {
+ if (!isel.saved_registers.contains(save_ra)) continue;
+ saves_size = std.mem.alignForward(u10, saves_size, 8);
+ saves_buf[saves_len] = .{
+ .class = .integer,
+ .needs_restore = true,
+ .register = save_ra.x(),
+ .offset = saves_size,
+ .size = 8,
+ };
+ saves_len += 1;
+ saves_size += 8;
+ }
+ var deferred_gr = if (saves_size == 8 or (saves_size % 16 != 0 and saved_gra_len % 2 != 0)) gr: {
+ saves_len -= 1;
+ saves_size -= 8;
+ break :gr saves_buf[saves_len].register;
+ } else null;
+ defer assert(deferred_gr == null);
+
+ // callee saved vr area
+ save_ra = .v8;
+ while (save_ra != .v16) : (save_ra = @enumFromInt(@intFromEnum(save_ra) + 1)) {
+ if (!isel.saved_registers.contains(save_ra)) continue;
+ saves_size = std.mem.alignForward(u10, saves_size, 8);
+ saves_buf[saves_len] = .{
+ .class = .vector,
+ .needs_restore = true,
+ .register = save_ra.d(),
+ .offset = saves_size,
+ .size = 8,
+ };
+ saves_len += 1;
+ saves_size += 8;
+ }
+ if (deferred_gr != null and saved_gra_len % 2 == 0) {
+ saves_size = std.mem.alignForward(u10, saves_size, 8);
+ saves_buf[saves_len] = .{
+ .class = .integer,
+ .needs_restore = true,
+ .register = deferred_gr.?,
+ .offset = saves_size,
+ .size = 8,
+ };
+ saves_len += 1;
+ saves_size += 8;
+ deferred_gr = null;
+ }
+ if (saves_size % 16 != 0 and saved_vra_len % 2 != 0) {
+ const prev_save = &saves_buf[saves_len - 1];
+ switch (prev_save.class) {
+ .integer => {},
+ .vector => {
+ prev_save.register = prev_save.register.alias.q();
+ prev_save.size = 16;
+ saves_size += 8;
+ },
+ }
+ }
+
+ // incoming vr arguments
+ save_ra = if (mod.strip) incoming.nsrn else CallAbiIterator.nsrn_start;
+ while (save_ra != if (have_va) CallAbiIterator.nsrn_end else incoming.nsrn) : (save_ra = @enumFromInt(@intFromEnum(save_ra) + 1)) {
+ saves_size = std.mem.alignForward(u10, saves_size, 16);
+ saves_buf[saves_len] = .{
+ .class = .vector,
+ .needs_restore = false,
+ .register = save_ra.q(),
+ .offset = saves_size,
+ .size = 16,
+ };
+ saves_len += 1;
+ saves_size += 16;
+ }
+
+ // frame record
+ saves_size = std.mem.alignForward(u10, saves_size, 16);
+ const frame_record_offset = saves_size;
+ saves_buf[saves_len] = .{
+ .class = .integer,
+ .needs_restore = true,
+ .register = .fp,
+ .offset = saves_size,
+ .size = 8,
+ };
+ saves_len += 1;
+ saves_size += 8;
+
+ saves_size = std.mem.alignForward(u10, saves_size, 8);
+ saves_buf[saves_len] = .{
+ .class = .integer,
+ .needs_restore = true,
+ .register = .lr,
+ .offset = saves_size,
+ .size = 8,
+ };
+ saves_len += 1;
+ saves_size += 8;
+
+ // incoming gr arguments
+ if (deferred_gr) |gr| {
+ saves_size = std.mem.alignForward(u10, saves_size, 8);
+ saves_buf[saves_len] = .{
+ .class = .integer,
+ .needs_restore = true,
+ .register = gr,
+ .offset = saves_size,
+ .size = 8,
+ };
+ saves_len += 1;
+ saves_size += 8;
+ deferred_gr = null;
+ }
+ save_ra = if (mod.strip) incoming.ngrn else CallAbiIterator.ngrn_start;
+ while (save_ra != if (have_va) CallAbiIterator.ngrn_end else incoming.ngrn) : (save_ra = @enumFromInt(@intFromEnum(save_ra) + 1)) {
+ saves_size = std.mem.alignForward(u10, saves_size, 8);
+ saves_buf[saves_len] = .{
+ .class = .integer,
+ .needs_restore = false,
+ .register = save_ra.x(),
+ .offset = saves_size,
+ .size = 8,
+ };
+ saves_len += 1;
+ saves_size += 8;
+ }
+
+ assert(InternPool.Alignment.@"16".check(saves_size));
+ break :saves .{ saves_buf[0..saves_len], saves_size, frame_record_offset };
+ };
+
+ {
+ wip_mir_log.debug("{f}<prologue>:", .{nav.fqn.fmt(ip)});
+ var save_index: usize = 0;
+ while (save_index < saves.len) {
+ if (save_index + 2 <= saves.len and saves[save_index + 0].class == saves[save_index + 1].class and
+ saves[save_index + 0].offset + saves[save_index + 0].size == saves[save_index + 1].offset)
+ {
+ try isel.emit(.stp(
+ saves[save_index + 0].register,
+ saves[save_index + 1].register,
+ switch (saves[save_index + 0].offset) {
+ 0 => .{ .pre_index = .{
+ .base = .sp,
+ .index = @intCast(-@as(i11, saves_size)),
+ } },
+ else => |offset| .{ .signed_offset = .{
+ .base = .sp,
+ .offset = @intCast(offset),
+ } },
+ },
+ ));
+ save_index += 2;
+ } else {
+ try isel.emit(.str(
+ saves[save_index].register,
+ switch (saves[save_index].offset) {
+ 0 => .{ .pre_index = .{
+ .base = .sp,
+ .index = @intCast(-@as(i11, saves_size)),
+ } },
+ else => |offset| .{ .unsigned_offset = .{
+ .base = .sp,
+ .offset = @intCast(offset),
+ } },
+ },
+ ));
+ save_index += 1;
+ }
+ }
+
+ const scratch_reg: Register = if (isel.stack_align == .@"16")
+ .sp
+ else if (stack_size == 0)
+ .fp
+ else
+ .x9;
+ try isel.emit(.add(.fp, .sp, .{ .immediate = frame_record_offset }));
+ if (stack_size_high > 0) try isel.emit(.sub(scratch_reg, .sp, .{
+ .shifted_immediate = .{ .immediate = stack_size_high, .lsl = .@"12" },
+ }));
+ if (stack_size_low > 0) try isel.emit(.sub(
+ scratch_reg,
+ if (stack_size_high > 0) scratch_reg else .sp,
+ .{ .immediate = stack_size_low },
+ ));
+ if (isel.stack_align != .@"16") {
+ try isel.emit(.@"and"(.sp, scratch_reg, .{ .immediate = .{
+ .N = .doubleword,
+ .immr = -%isel.stack_align.toLog2Units(),
+ .imms = ~isel.stack_align.toLog2Units(),
+ } }));
+ }
+ wip_mir_log.debug("", .{});
+ }
+
+ const epilogue = isel.instructions.items.len;
+ if (isel.returns) {
+ try isel.emit(.ret(.lr));
+ var save_index: usize = 0;
+ while (save_index < saves.len) {
+ if (save_index + 2 <= saves.len and saves[save_index + 1].needs_restore and
+ saves[save_index + 0].class == saves[save_index + 1].class and
+ saves[save_index + 0].offset + saves[save_index + 0].size == saves[save_index + 1].offset)
+ {
+ try isel.emit(.ldp(
+ saves[save_index + 0].register,
+ saves[save_index + 1].register,
+ switch (saves[save_index + 0].offset) {
+ 0 => .{ .post_index = .{
+ .base = .sp,
+ .index = @intCast(saves_size),
+ } },
+ else => |offset| .{ .signed_offset = .{
+ .base = .sp,
+ .offset = @intCast(offset),
+ } },
+ },
+ ));
+ save_index += 2;
+ } else if (saves[save_index].needs_restore) {
+ try isel.emit(.ldr(
+ saves[save_index].register,
+ switch (saves[save_index].offset) {
+ 0 => .{ .post_index = .{
+ .base = .sp,
+ .index = @intCast(saves_size),
+ } },
+ else => |offset| .{ .unsigned_offset = .{
+ .base = .sp,
+ .offset = @intCast(offset),
+ } },
+ },
+ ));
+ save_index += 1;
+ } else save_index += 1;
+ }
+ if (isel.stack_align != .@"16" or (stack_size_low > 0 and stack_size_high > 0)) {
+ try isel.emit(switch (frame_record_offset) {
+ 0 => .add(.sp, .fp, .{ .immediate = 0 }),
+ else => |offset| .sub(.sp, .fp, .{ .immediate = offset }),
+ });
+ } else {
+ if (stack_size_high > 0) try isel.emit(.add(.sp, .sp, .{
+ .shifted_immediate = .{ .immediate = stack_size_high, .lsl = .@"12" },
+ }));
+ if (stack_size_low > 0) try isel.emit(.add(.sp, .sp, .{
+ .immediate = stack_size_low,
+ }));
+ }
+ wip_mir_log.debug("{f}<epilogue>:\n", .{nav.fqn.fmt(ip)});
+ }
+ return epilogue;
+}
+
+fn fmtDom(isel: *Select, inst: Air.Inst.Index, start: u32, len: u32) struct {
+ isel: *Select,
+ inst: Air.Inst.Index,
+ start: u32,
+ len: u32,
+ pub fn format(data: @This(), writer: *std.Io.Writer) std.Io.Writer.Error!void {
+ try writer.print("%{d} -> {{", .{@intFromEnum(data.inst)});
+ var first = true;
+ for (data.isel.blocks.keys()[0..data.len], 0..) |block_inst_index, dom_index| {
+ if (@as(u1, @truncate(data.isel.dom.items[
+ data.start + dom_index / @bitSizeOf(DomInt)
+ ] >> @truncate(dom_index))) == 0) continue;
+ if (first) {
+ first = false;
+ } else {
+ try writer.writeByte(',');
+ }
+ switch (block_inst_index) {
+ Block.main => try writer.writeAll(" %main"),
+ else => try writer.print(" %{d}", .{@intFromEnum(block_inst_index)}),
+ }
+ }
+ if (!first) try writer.writeByte(' ');
+ try writer.writeByte('}');
+ }
+} {
+ return .{ .isel = isel, .inst = inst, .start = start, .len = len };
+}
+
+fn fmtLoopLive(isel: *Select, loop_inst: Air.Inst.Index) struct {
+ isel: *Select,
+ inst: Air.Inst.Index,
+ pub fn format(data: @This(), writer: *std.Io.Writer) std.Io.Writer.Error!void {
+ const loops = data.isel.loops.values();
+ const loop_index = data.isel.loops.getIndex(data.inst).?;
+ const live_insts =
+ data.isel.loop_live.list.items[loops[loop_index].live..loops[loop_index + 1].live];
+
+ try writer.print("%{d} <- {{", .{@intFromEnum(data.inst)});
+ var first = true;
+ for (live_insts) |live_inst| {
+ if (first) {
+ first = false;
+ } else {
+ try writer.writeByte(',');
+ }
+ try writer.print(" %{d}", .{@intFromEnum(live_inst)});
+ }
+ if (!first) try writer.writeByte(' ');
+ try writer.writeByte('}');
+ }
+} {
+ return .{ .isel = isel, .inst = loop_inst };
+}
+
+fn fmtType(isel: *Select, ty: ZigType) ZigType.Formatter {
+ return ty.fmt(isel.pt);
+}
+
+fn fmtConstant(isel: *Select, constant: Constant) @typeInfo(@TypeOf(Constant.fmtValue)).@"fn".return_type.? {
+ return constant.fmtValue(isel.pt);
+}
+
+fn emit(isel: *Select, instruction: codegen.aarch64.encoding.Instruction) !void {
+ wip_mir_log.debug(" | {f}", .{instruction});
+ try isel.instructions.append(isel.pt.zcu.gpa, instruction);
+}
+
+fn emitLiteral(isel: *Select, bytes: []const u8) !void {
+ const words: []align(1) const u32 = @ptrCast(bytes);
+ const literals = try isel.literals.addManyAsSlice(isel.pt.zcu.gpa, words.len);
+ switch (isel.target.cpu.arch.endian()) {
+ .little => @memcpy(literals, words),
+ .big => for (words, 0..) |word, word_index| {
+ literals[literals.len - 1 - word_index] = @byteSwap(word);
+ },
+ }
+}
+
+fn fail(isel: *Select, comptime format: []const u8, args: anytype) error{ OutOfMemory, CodegenFail } {
+ @branchHint(.cold);
+ return isel.pt.zcu.codegenFail(isel.nav_index, format, args);
+}
+
+/// dst = src
+fn movImmediate(isel: *Select, dst_reg: Register, src_imm: u64) !void {
+ const sf = dst_reg.format.integer;
+ if (src_imm == 0) {
+ const zr: Register = switch (sf) {
+ .word => .wzr,
+ .doubleword => .xzr,
+ };
+ return isel.emit(.orr(dst_reg, zr, .{ .register = zr }));
+ }
+
+ const Part = u16;
+ const min_part: Part = std.math.minInt(Part);
+ const max_part: Part = std.math.maxInt(Part);
+
+ const parts: [4]Part = @bitCast(switch (sf) {
+ .word => @as(u32, @intCast(src_imm)),
+ .doubleword => @as(u64, @intCast(src_imm)),
+ });
+ const width: u7 = switch (sf) {
+ .word => 32,
+ .doubleword => 64,
+ };
+ const parts_len: u3 = @intCast(@divExact(width, @bitSizeOf(Part)));
+ var equal_min_count: u3 = 0;
+ var equal_max_count: u3 = 0;
+ for (parts[0..parts_len]) |part| {
+ equal_min_count += @intFromBool(part == min_part);
+ equal_max_count += @intFromBool(part == max_part);
+ }
+
+ const equal_fill_count, const fill_part: Part = if (equal_min_count >= equal_max_count)
+ .{ equal_min_count, min_part }
+ else
+ .{ equal_max_count, max_part };
+ var remaining_parts = @max(parts_len - equal_fill_count, 1);
+
+ if (remaining_parts > 1) {
+ var elem_width: u8 = 2;
+ while (elem_width <= width) : (elem_width <<= 1) {
+ const emask = @as(u64, std.math.maxInt(u64)) >> @intCast(64 - elem_width);
+ const rmask = @divExact(@as(u64, switch (sf) {
+ .word => std.math.maxInt(u32),
+ .doubleword => std.math.maxInt(u64),
+ }), emask);
+ const elem = src_imm & emask;
+ if (src_imm != elem * rmask) continue;
+ const imask: u64 = @bitCast(@as(i64, @bitCast(elem << 63)) >> 63);
+ const lsb0 = elem ^ (imask & emask);
+ const lsb1 = (lsb0 - 1) | lsb0;
+ if ((lsb1 +% 1) & lsb1 == 0) {
+ const lo: u6 = @intCast(@ctz(lsb0));
+ const hi: u6 = @intCast(@clz(lsb0) - (64 - elem_width));
+ const mid: u6 = @intCast(elem_width - lo - hi);
+ const smask: u6 = @truncate(imask);
+ const mid_masked = mid & ~smask;
+ return isel.emit(.orr(
+ dst_reg,
+ switch (sf) {
+ .word => .wzr,
+ .doubleword => .xzr,
+ },
+ .{ .immediate = .{
+ .N = @enumFromInt(elem_width >> 6),
+ .immr = hi + mid_masked,
+ .imms = ((((lo + hi) & smask) | mid_masked) - 1) | -%@as(u6, @truncate(elem_width)) << 1,
+ } },
+ ));
+ }
+ }
+ }
+
+ var part_index = parts_len;
+ while (part_index > 0) {
+ part_index -= 1;
+ if (part_index >= remaining_parts and parts[part_index] == fill_part) continue;
+ remaining_parts -= 1;
+ try isel.emit(if (remaining_parts > 0) .movk(
+ dst_reg,
+ parts[part_index],
+ .{ .lsl = @enumFromInt(part_index) },
+ ) else switch (fill_part) {
+ else => unreachable,
+ min_part => .movz(
+ dst_reg,
+ parts[part_index],
+ .{ .lsl = @enumFromInt(part_index) },
+ ),
+ max_part => .movn(
+ dst_reg,
+ ~parts[part_index],
+ .{ .lsl = @enumFromInt(part_index) },
+ ),
+ });
+ }
+ assert(remaining_parts == 0);
+}
+
+/// elem_ptr = base +- elem_size * index
+/// elem_ptr, base, and index may alias
+fn elemPtr(
+ isel: *Select,
+ elem_ptr_ra: Register.Alias,
+ base_ra: Register.Alias,
+ op: codegen.aarch64.encoding.Instruction.AddSubtractOp,
+ elem_size: u64,
+ index_vi: Value.Index,
+) !void {
+ const index_mat = try index_vi.matReg(isel);
+ switch (@popCount(elem_size)) {
+ 0 => unreachable,
+ 1 => try isel.emit(switch (op) {
+ .add => switch (base_ra) {
+ else => .add(elem_ptr_ra.x(), base_ra.x(), .{ .shifted_register = .{
+ .register = index_mat.ra.x(),
+ .shift = .{ .lsl = @intCast(@ctz(elem_size)) },
+ } }),
+ .zr => switch (@ctz(elem_size)) {
+ 0 => .orr(elem_ptr_ra.x(), .xzr, .{ .register = index_mat.ra.x() }),
+ else => |shift| .ubfm(elem_ptr_ra.x(), index_mat.ra.x(), .{
+ .N = .doubleword,
+ .immr = @intCast(64 - shift),
+ .imms = @intCast(63 - shift),
+ }),
+ },
+ },
+ .sub => .sub(elem_ptr_ra.x(), base_ra.x(), .{ .shifted_register = .{
+ .register = index_mat.ra.x(),
+ .shift = .{ .lsl = @intCast(@ctz(elem_size)) },
+ } }),
+ }),
+ 2 => {
+ const shift: u6 = @intCast(@ctz(elem_size));
+ const temp_ra = temp_ra: switch (op) {
+ .add => switch (base_ra) {
+ else => {
+ const temp_ra = try isel.allocIntReg();
+ errdefer isel.freeReg(temp_ra);
+ try isel.emit(.add(elem_ptr_ra.x(), base_ra.x(), .{ .shifted_register = .{
+ .register = temp_ra.x(),
+ .shift = .{ .lsl = shift },
+ } }));
+ break :temp_ra temp_ra;
+ },
+ .zr => {
+ if (shift > 0) try isel.emit(.ubfm(elem_ptr_ra.x(), elem_ptr_ra.x(), .{
+ .N = .doubleword,
+ .immr = -%shift,
+ .imms = ~shift,
+ }));
+ break :temp_ra elem_ptr_ra;
+ },
+ },
+ .sub => {
+ const temp_ra = try isel.allocIntReg();
+ errdefer isel.freeReg(temp_ra);
+ try isel.emit(.sub(elem_ptr_ra.x(), base_ra.x(), .{ .shifted_register = .{
+ .register = temp_ra.x(),
+ .shift = .{ .lsl = shift },
+ } }));
+ break :temp_ra temp_ra;
+ },
+ };
+ defer if (temp_ra != elem_ptr_ra) isel.freeReg(temp_ra);
+ try isel.emit(.add(temp_ra.x(), index_mat.ra.x(), .{ .shifted_register = .{
+ .register = index_mat.ra.x(),
+ .shift = .{ .lsl = @intCast(63 - @clz(elem_size) - shift) },
+ } }));
+ },
+ else => {
+ const elem_size_lsb1 = (elem_size - 1) | elem_size;
+ if ((elem_size_lsb1 +% 1) & elem_size_lsb1 == 0) {
+ const shift: u6 = @intCast(@ctz(elem_size));
+ const temp_ra = temp_ra: switch (op) {
+ .add => {
+ const temp_ra = try isel.allocIntReg();
+ errdefer isel.freeReg(temp_ra);
+ try isel.emit(.sub(elem_ptr_ra.x(), base_ra.x(), .{ .shifted_register = .{
+ .register = temp_ra.x(),
+ .shift = .{ .lsl = shift },
+ } }));
+ break :temp_ra temp_ra;
+ },
+ .sub => switch (base_ra) {
+ else => {
+ const temp_ra = try isel.allocIntReg();
+ errdefer isel.freeReg(temp_ra);
+ try isel.emit(.add(elem_ptr_ra.x(), base_ra.x(), .{ .shifted_register = .{
+ .register = temp_ra.x(),
+ .shift = .{ .lsl = shift },
+ } }));
+ break :temp_ra temp_ra;
+ },
+ .zr => {
+ if (shift > 0) try isel.emit(.ubfm(elem_ptr_ra.x(), elem_ptr_ra.x(), .{
+ .N = .doubleword,
+ .immr = -%shift,
+ .imms = ~shift,
+ }));
+ break :temp_ra elem_ptr_ra;
+ },
+ },
+ };
+ defer if (temp_ra != elem_ptr_ra) isel.freeReg(temp_ra);
+ try isel.emit(.sub(temp_ra.x(), index_mat.ra.x(), .{ .shifted_register = .{
+ .register = index_mat.ra.x(),
+ .shift = .{ .lsl = @intCast(64 - @clz(elem_size) - shift) },
+ } }));
+ } else {
+ try isel.emit(switch (op) {
+ .add => .madd(elem_ptr_ra.x(), index_mat.ra.x(), elem_ptr_ra.x(), base_ra.x()),
+ .sub => .msub(elem_ptr_ra.x(), index_mat.ra.x(), elem_ptr_ra.x(), base_ra.x()),
+ });
+ try isel.movImmediate(elem_ptr_ra.x(), elem_size);
+ }
+ },
+ }
+ try index_mat.finish(isel);
+}
+
+fn clzLimb(
+ isel: *Select,
+ res_ra: Register.Alias,
+ src_int_info: std.builtin.Type.Int,
+ src_ra: Register.Alias,
+) !void {
+ switch (src_int_info.bits) {
+ else => unreachable,
+ 1...31 => |bits| {
+ try isel.emit(.sub(res_ra.w(), res_ra.w(), .{
+ .immediate = @intCast(32 - bits),
+ }));
+ switch (src_int_info.signedness) {
+ .signed => {
+ try isel.emit(.clz(res_ra.w(), res_ra.w()));
+ try isel.emit(.ubfm(res_ra.w(), src_ra.w(), .{
+ .N = .word,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }));
+ },
+ .unsigned => try isel.emit(.clz(res_ra.w(), src_ra.w())),
+ }
+ },
+ 32 => try isel.emit(.clz(res_ra.w(), src_ra.w())),
+ 33...63 => |bits| {
+ try isel.emit(.sub(res_ra.w(), res_ra.w(), .{
+ .immediate = @intCast(64 - bits),
+ }));
+ switch (src_int_info.signedness) {
+ .signed => {
+ try isel.emit(.clz(res_ra.x(), res_ra.x()));
+ try isel.emit(.ubfm(res_ra.x(), src_ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }));
+ },
+ .unsigned => try isel.emit(.clz(res_ra.x(), src_ra.x())),
+ }
+ },
+ 64 => try isel.emit(.clz(res_ra.x(), src_ra.x())),
+ }
+}
+
+fn ctzLimb(
+ isel: *Select,
+ res_ra: Register.Alias,
+ src_int_info: std.builtin.Type.Int,
+ src_ra: Register.Alias,
+) !void {
+ switch (src_int_info.bits) {
+ else => unreachable,
+ 1...31 => |bits| {
+ try isel.emit(.clz(res_ra.w(), res_ra.w()));
+ try isel.emit(.rbit(res_ra.w(), res_ra.w()));
+ try isel.emit(.orr(res_ra.w(), src_ra.w(), .{ .immediate = .{
+ .N = .word,
+ .immr = @intCast(32 - bits),
+ .imms = @intCast(32 - bits - 1),
+ } }));
+ },
+ 32 => {
+ try isel.emit(.clz(res_ra.w(), res_ra.w()));
+ try isel.emit(.rbit(res_ra.w(), src_ra.w()));
+ },
+ 33...63 => |bits| {
+ try isel.emit(.clz(res_ra.x(), res_ra.x()));
+ try isel.emit(.rbit(res_ra.x(), res_ra.x()));
+ try isel.emit(.orr(res_ra.x(), src_ra.x(), .{ .immediate = .{
+ .N = .doubleword,
+ .immr = @intCast(64 - bits),
+ .imms = @intCast(64 - bits - 1),
+ } }));
+ },
+ 64 => {
+ try isel.emit(.clz(res_ra.x(), res_ra.x()));
+ try isel.emit(.rbit(res_ra.x(), src_ra.x()));
+ },
+ }
+}
+
+fn storeReg(
+ isel: *Select,
+ ra: Register.Alias,
+ size: u64,
+ base_ra: Register.Alias,
+ offset: i65,
+) !void {
+ switch (size) {
+ 0 => unreachable,
+ 1 => {
+ if (std.math.cast(u12, offset)) |unsigned_offset| return isel.emit(if (ra.isVector()) .str(
+ ra.b(),
+ .{ .unsigned_offset = .{
+ .base = base_ra.x(),
+ .offset = unsigned_offset,
+ } },
+ ) else .strb(
+ ra.w(),
+ .{ .unsigned_offset = .{
+ .base = base_ra.x(),
+ .offset = unsigned_offset,
+ } },
+ ));
+ if (std.math.cast(i9, offset)) |signed_offset| return isel.emit(if (ra.isVector())
+ .stur(ra.b(), base_ra.x(), signed_offset)
+ else
+ .sturb(ra.w(), base_ra.x(), signed_offset));
+ },
+ 2 => {
+ if (std.math.cast(u13, offset)) |unsigned_offset| if (unsigned_offset % 2 == 0)
+ return isel.emit(if (ra.isVector()) .str(
+ ra.h(),
+ .{ .unsigned_offset = .{
+ .base = base_ra.x(),
+ .offset = unsigned_offset,
+ } },
+ ) else .strh(
+ ra.w(),
+ .{ .unsigned_offset = .{
+ .base = base_ra.x(),
+ .offset = unsigned_offset,
+ } },
+ ));
+ if (std.math.cast(i9, offset)) |signed_offset| return isel.emit(if (ra.isVector())
+ .stur(ra.h(), base_ra.x(), signed_offset)
+ else
+ .sturh(ra.w(), base_ra.x(), signed_offset));
+ },
+ 3 => {
+ const hi8_ra = try isel.allocIntReg();
+ defer isel.freeReg(hi8_ra);
+ try isel.storeReg(hi8_ra, 1, base_ra, offset + 2);
+ try isel.storeReg(ra, 2, base_ra, offset);
+ return isel.emit(.ubfm(hi8_ra.w(), ra.w(), .{
+ .N = .word,
+ .immr = 16,
+ .imms = 16 + 8 - 1,
+ }));
+ },
+ 4 => {
+ if (std.math.cast(u14, offset)) |unsigned_offset| if (unsigned_offset % 4 == 0) return isel.emit(.str(
+ if (ra.isVector()) ra.s() else ra.w(),
+ .{ .unsigned_offset = .{
+ .base = base_ra.x(),
+ .offset = unsigned_offset,
+ } },
+ ));
+ if (std.math.cast(i9, offset)) |signed_offset| return isel.emit(.stur(
+ if (ra.isVector()) ra.s() else ra.w(),
+ base_ra.x(),
+ signed_offset,
+ ));
+ },
+ 5 => {
+ const hi8_ra = try isel.allocIntReg();
+ defer isel.freeReg(hi8_ra);
+ try isel.storeReg(hi8_ra, 1, base_ra, offset + 4);
+ try isel.storeReg(ra, 4, base_ra, offset);
+ return isel.emit(.ubfm(hi8_ra.x(), ra.x(), .{
+ .N = .doubleword,
+ .immr = 32,
+ .imms = 32 + 8 - 1,
+ }));
+ },
+ 6 => {
+ const hi16_ra = try isel.allocIntReg();
+ defer isel.freeReg(hi16_ra);
+ try isel.storeReg(hi16_ra, 2, base_ra, offset + 4);
+ try isel.storeReg(ra, 4, base_ra, offset);
+ return isel.emit(.ubfm(hi16_ra.x(), ra.x(), .{
+ .N = .doubleword,
+ .immr = 32,
+ .imms = 32 + 16 - 1,
+ }));
+ },
+ 7 => {
+ const hi16_ra = try isel.allocIntReg();
+ defer isel.freeReg(hi16_ra);
+ const hi8_ra = try isel.allocIntReg();
+ defer isel.freeReg(hi8_ra);
+ try isel.storeReg(hi8_ra, 1, base_ra, offset + 6);
+ try isel.storeReg(hi16_ra, 2, base_ra, offset + 4);
+ try isel.storeReg(ra, 4, base_ra, offset);
+ try isel.emit(.ubfm(hi8_ra.x(), ra.x(), .{
+ .N = .doubleword,
+ .immr = 32 + 16,
+ .imms = 32 + 16 + 8 - 1,
+ }));
+ return isel.emit(.ubfm(hi16_ra.x(), ra.x(), .{
+ .N = .doubleword,
+ .immr = 32,
+ .imms = 32 + 16 - 1,
+ }));
+ },
+ 8 => {
+ if (std.math.cast(u15, offset)) |unsigned_offset| if (unsigned_offset % 8 == 0) return isel.emit(.str(
+ if (ra.isVector()) ra.d() else ra.x(),
+ .{ .unsigned_offset = .{
+ .base = base_ra.x(),
+ .offset = unsigned_offset,
+ } },
+ ));
+ if (std.math.cast(i9, offset)) |signed_offset| return isel.emit(.stur(
+ if (ra.isVector()) ra.d() else ra.x(),
+ base_ra.x(),
+ signed_offset,
+ ));
+ },
+ 16 => {
+ if (std.math.cast(u16, offset)) |unsigned_offset| if (unsigned_offset % 16 == 0) return isel.emit(.str(
+ ra.q(),
+ .{ .unsigned_offset = .{
+ .base = base_ra.x(),
+ .offset = unsigned_offset,
+ } },
+ ));
+ if (std.math.cast(i9, offset)) |signed_offset| return isel.emit(.stur(ra.q(), base_ra.x(), signed_offset));
+ },
+ else => return isel.fail("bad store size: {d}", .{size}),
+ }
+ const ptr_ra = try isel.allocIntReg();
+ defer isel.freeReg(ptr_ra);
+ try isel.storeReg(ra, size, ptr_ra, 0);
+ if (std.math.cast(u24, offset)) |pos_offset| {
+ const lo12: u12 = @truncate(pos_offset >> 0);
+ const hi12: u12 = @intCast(pos_offset >> 12);
+ if (hi12 > 0) try isel.emit(.add(
+ ptr_ra.x(),
+ if (lo12 > 0) ptr_ra.x() else base_ra.x(),
+ .{ .shifted_immediate = .{ .immediate = hi12, .lsl = .@"12" } },
+ ));
+ if (lo12 > 0 or hi12 == 0) try isel.emit(.add(ptr_ra.x(), base_ra.x(), .{ .immediate = lo12 }));
+ } else if (std.math.cast(u24, -offset)) |neg_offset| {
+ const lo12: u12 = @truncate(neg_offset >> 0);
+ const hi12: u12 = @intCast(neg_offset >> 12);
+ if (hi12 > 0) try isel.emit(.sub(
+ ptr_ra.x(),
+ if (lo12 > 0) ptr_ra.x() else base_ra.x(),
+ .{ .shifted_immediate = .{ .immediate = hi12, .lsl = .@"12" } },
+ ));
+ if (lo12 > 0 or hi12 == 0) try isel.emit(.sub(ptr_ra.x(), base_ra.x(), .{ .immediate = lo12 }));
+ } else {
+ try isel.emit(.add(ptr_ra.x(), base_ra.x(), .{ .register = ptr_ra.x() }));
+ try isel.movImmediate(ptr_ra.x(), @truncate(@as(u65, @bitCast(offset))));
+ }
+}
+
+const DomInt = u8;
+
+pub const Value = struct {
+ refs: u32,
+ flags: Flags,
+ offset_from_parent: u64,
+ parent_payload: Parent.Payload,
+ location_payload: Location.Payload,
+ parts: Value.Index,
+
+ /// Must be at least 16 to compute call abi.
+ /// Must be at least 16, the largest hardware alignment.
+ pub const max_parts = 16;
+ pub const PartsLen = std.math.IntFittingRange(0, Value.max_parts);
+
+ comptime {
+ if (!std.debug.runtime_safety) assert(@sizeOf(Value) == 32);
+ }
+
+ pub const Flags = packed struct(u32) {
+ alignment: InternPool.Alignment,
+ parent_tag: Parent.Tag,
+ location_tag: Location.Tag,
+ parts_len_minus_one: std.math.IntFittingRange(0, Value.max_parts - 1),
+ unused: u18 = 0,
+ };
+
+ pub const Parent = union(enum(u3)) {
+ unallocated: void,
+ stack_slot: Indirect,
+ address: Value.Index,
+ value: Value.Index,
+ constant: Constant,
+
+ pub const Tag = @typeInfo(Parent).@"union".tag_type.?;
+ pub const Payload = @Type(.{ .@"union" = .{
+ .layout = .auto,
+ .tag_type = null,
+ .fields = @typeInfo(Parent).@"union".fields,
+ .decls = &.{},
+ } });
+ };
+
+ pub const Location = union(enum(u1)) {
+ large: struct {
+ size: u64,
+ },
+ small: struct {
+ size: u5,
+ signedness: std.builtin.Signedness,
+ is_vector: bool,
+ hint: Register.Alias,
+ register: Register.Alias,
+ },
+
+ pub const Tag = @typeInfo(Location).@"union".tag_type.?;
+ pub const Payload = @Type(.{ .@"union" = .{
+ .layout = .auto,
+ .tag_type = null,
+ .fields = @typeInfo(Location).@"union".fields,
+ .decls = &.{},
+ } });
+ };
+
+ pub const Indirect = packed struct(u32) {
+ base: Register.Alias,
+ offset: i25,
+
+ pub fn withOffset(ind: Indirect, offset: i25) Indirect {
+ return .{
+ .base = ind.base,
+ .offset = ind.offset + offset,
+ };
+ }
+ };
+
+ pub const Index = enum(u32) {
+ allocating = std.math.maxInt(u32) - 1,
+ free = std.math.maxInt(u32) - 0,
+ _,
+
+ fn get(vi: Value.Index, isel: *Select) *Value {
+ return &isel.values.items[@intFromEnum(vi)];
+ }
+
+ fn setAlignment(vi: Value.Index, isel: *Select, new_alignment: InternPool.Alignment) void {
+ vi.get(isel).flags.alignment = new_alignment;
+ }
+
+ pub fn alignment(vi: Value.Index, isel: *Select) InternPool.Alignment {
+ return vi.get(isel).flags.alignment;
+ }
+
+ pub fn setParent(vi: Value.Index, isel: *Select, new_parent: Parent) void {
+ const value = vi.get(isel);
+ assert(value.flags.parent_tag == .unallocated);
+ value.flags.parent_tag = new_parent;
+ value.parent_payload = switch (new_parent) {
+ .unallocated => unreachable,
+ inline else => |payload, tag| @unionInit(Parent.Payload, @tagName(tag), payload),
+ };
+ if (value.refs > 0) switch (new_parent) {
+ .unallocated => unreachable,
+ .stack_slot, .constant => {},
+ .address, .value => |parent_vi| _ = parent_vi.ref(isel),
+ };
+ }
+
+ pub fn parent(vi: Value.Index, isel: *Select) Parent {
+ const value = vi.get(isel);
+ return switch (value.flags.parent_tag) {
+ inline else => |tag| @unionInit(
+ Parent,
+ @tagName(tag),
+ @field(value.parent_payload, @tagName(tag)),
+ ),
+ };
+ }
+
+ pub fn valueParent(initial_vi: Value.Index, isel: *Select) struct { u64, Value.Index } {
+ var offset: u64 = 0;
+ var vi = initial_vi;
+ parent: switch (vi.parent(isel)) {
+ else => return .{ offset, vi },
+ .value => |parent_vi| {
+ offset += vi.position(isel)[0];
+ vi = parent_vi;
+ continue :parent parent_vi.parent(isel);
+ },
+ }
+ }
+
+ pub fn location(vi: Value.Index, isel: *Select) Location {
+ const value = vi.get(isel);
+ return switch (value.flags.location_tag) {
+ inline else => |tag| @unionInit(
+ Location,
+ @tagName(tag),
+ @field(value.location_payload, @tagName(tag)),
+ ),
+ };
+ }
+
+ pub fn position(vi: Value.Index, isel: *Select) struct { u64, u64 } {
+ return .{ vi.get(isel).offset_from_parent, vi.size(isel) };
+ }
+
+ pub fn size(vi: Value.Index, isel: *Select) u64 {
+ return switch (vi.location(isel)) {
+ inline else => |loc| loc.size,
+ };
+ }
+
+ fn setHint(vi: Value.Index, isel: *Select, new_hint: Register.Alias) void {
+ vi.get(isel).location_payload.small.hint = new_hint;
+ }
+
+ pub fn hint(vi: Value.Index, isel: *Select) ?Register.Alias {
+ return switch (vi.location(isel)) {
+ .large => null,
+ .small => |loc| switch (loc.hint) {
+ .zr => null,
+ else => |hint_reg| hint_reg,
+ },
+ };
+ }
+
+ fn setSignedness(vi: Value.Index, isel: *Select, new_signedness: std.builtin.Signedness) void {
+ const value = vi.get(isel);
+ assert(value.location_payload.small.size <= 2);
+ value.location_payload.small.signedness = new_signedness;
+ }
+
+ pub fn signedness(vi: Value.Index, isel: *Select) std.builtin.Signedness {
+ const value = vi.get(isel);
+ return switch (value.flags.location_tag) {
+ .large => .unsigned,
+ .small => value.location_payload.small.signedness,
+ };
+ }
+
+ fn setIsVector(vi: Value.Index, isel: *Select) void {
+ const is_vector = &vi.get(isel).location_payload.small.is_vector;
+ assert(!is_vector.*);
+ is_vector.* = true;
+ }
+
+ pub fn isVector(vi: Value.Index, isel: *Select) bool {
+ const value = vi.get(isel);
+ return switch (value.flags.location_tag) {
+ .large => false,
+ .small => value.location_payload.small.is_vector,
+ };
+ }
+
+ pub fn register(vi: Value.Index, isel: *Select) ?Register.Alias {
+ return switch (vi.location(isel)) {
+ .large => null,
+ .small => |loc| switch (loc.register) {
+ .zr => null,
+ else => |reg| reg,
+ },
+ };
+ }
+
+ pub fn isUsed(vi: Value.Index, isel: *Select) bool {
+ return vi.valueParent(isel)[1].parent(isel) != .unallocated or vi.hasRegisterRecursive(isel);
+ }
+
+ fn hasRegisterRecursive(vi: Value.Index, isel: *Select) bool {
+ if (vi.register(isel)) |_| return true;
+ var part_it = vi.parts(isel);
+ if (part_it.only() == null) while (part_it.next()) |part_vi| if (part_vi.hasRegisterRecursive(isel)) return true;
+ return false;
+ }
+
+ fn setParts(vi: Value.Index, isel: *Select, parts_len: Value.PartsLen) void {
+ assert(parts_len > 1);
+ const value = vi.get(isel);
+ assert(value.flags.parts_len_minus_one == 0);
+ value.parts = @enumFromInt(isel.values.items.len);
+ value.flags.parts_len_minus_one = @intCast(parts_len - 1);
+ }
+
+ fn addPart(vi: Value.Index, isel: *Select, part_offset: u64, part_size: u64) Value.Index {
+ const part_vi = isel.initValueAdvanced(vi.alignment(isel), part_offset, part_size);
+ tracking_log.debug("${d} <- ${d}[{d}]", .{
+ @intFromEnum(part_vi),
+ @intFromEnum(vi),
+ part_offset,
+ });
+ part_vi.setParent(isel, .{ .value = vi });
+ return part_vi;
+ }
+
+ pub fn parts(vi: Value.Index, isel: *Select) Value.PartIterator {
+ const value = vi.get(isel);
+ return switch (value.flags.parts_len_minus_one) {
+ 0 => .initOne(vi),
+ else => |parts_len_minus_one| .{
+ .vi = value.parts,
+ .remaining = @as(Value.PartsLen, parts_len_minus_one) + 1,
+ },
+ };
+ }
+
+ fn containingParts(vi: Value.Index, isel: *Select, part_offset: u64, part_size: u64) Value.PartIterator {
+ const start_vi = vi.partAtOffset(isel, part_offset);
+ const start_offset, const start_size = start_vi.position(isel);
+ if (part_offset >= start_offset and part_size <= start_size) return .initOne(start_vi);
+ const end_vi = vi.partAtOffset(isel, part_size - 1 + part_offset);
+ return .{
+ .vi = start_vi,
+ .remaining = @intCast(@intFromEnum(end_vi) - @intFromEnum(start_vi) + 1),
+ };
+ }
+ comptime {
+ _ = containingParts;
+ }
+
+ fn partAtOffset(vi: Value.Index, isel: *Select, offset: u64) Value.Index {
+ const SearchPartIndex = std.math.IntFittingRange(0, Value.max_parts * 2 - 1);
+ const value = vi.get(isel);
+ var last: SearchPartIndex = value.flags.parts_len_minus_one;
+ if (last == 0) return vi;
+ var first: SearchPartIndex = 0;
+ last += 1;
+ while (true) {
+ const mid = (first + last) / 2;
+ const mid_vi: Value.Index = @enumFromInt(@intFromEnum(value.parts) + mid);
+ if (mid == first) return mid_vi;
+ if (offset < mid_vi.get(isel).offset_from_parent) last = mid else first = mid;
+ }
+ }
+
+ fn field(
+ vi: Value.Index,
+ ty: ZigType,
+ field_offset: u64,
+ field_size: u64,
+ ) Value.FieldPartIterator {
+ assert(field_size > 0);
+ return .{
+ .vi = vi,
+ .ty = ty,
+ .field_offset = field_offset,
+ .field_size = field_size,
+ .next_offset = 0,
+ };
+ }
+
+ fn ref(initial_vi: Value.Index, isel: *Select) Value.Index {
+ var vi = initial_vi;
+ while (true) {
+ const refs = &vi.get(isel).refs;
+ refs.* += 1;
+ if (refs.* > 1) return initial_vi;
+ switch (vi.parent(isel)) {
+ .unallocated, .stack_slot, .constant => {},
+ .address, .value => |parent_vi| {
+ vi = parent_vi;
+ continue;
+ },
+ }
+ return initial_vi;
+ }
+ }
+
+ pub fn deref(initial_vi: Value.Index, isel: *Select) void {
+ var vi = initial_vi;
+ while (true) {
+ const refs = &vi.get(isel).refs;
+ refs.* -= 1;
+ if (refs.* > 0) return;
+ switch (vi.parent(isel)) {
+ .unallocated, .constant => {},
+ .stack_slot => {
+ // reuse stack slot
+ },
+ .address, .value => |parent_vi| {
+ vi = parent_vi;
+ continue;
+ },
+ }
+ return;
+ }
+ }
+
+ fn move(dst_vi: Value.Index, isel: *Select, src_ref: Air.Inst.Ref) !void {
+ try dst_vi.copy(
+ isel,
+ isel.air.typeOf(src_ref, &isel.pt.zcu.intern_pool),
+ try isel.use(src_ref),
+ );
+ }
+
+ fn copy(dst_vi: Value.Index, isel: *Select, ty: ZigType, src_vi: Value.Index) !void {
+ try dst_vi.copyAdvanced(isel, src_vi, .{
+ .ty = ty,
+ .dst_vi = dst_vi,
+ .dst_offset = 0,
+ .src_vi = src_vi,
+ .src_offset = 0,
+ });
+ }
+
+ fn copyAdvanced(dst_vi: Value.Index, isel: *Select, src_vi: Value.Index, root: struct {
+ ty: ZigType,
+ dst_vi: Value.Index,
+ dst_offset: u64,
+ src_vi: Value.Index,
+ src_offset: u64,
+ }) !void {
+ if (dst_vi == src_vi) return;
+ var dst_part_it = dst_vi.parts(isel);
+ if (dst_part_it.only()) |dst_part_vi| {
+ var src_part_it = src_vi.parts(isel);
+ if (src_part_it.only()) |src_part_vi| {
+ try src_part_vi.liveOut(isel, try dst_part_vi.defReg(isel) orelse return);
+ } else while (src_part_it.next()) |src_part_vi| {
+ const src_part_offset, const src_part_size = src_part_vi.position(isel);
+ var dst_field_it = root.dst_vi.field(root.ty, root.dst_offset + src_part_offset, src_part_size);
+ const dst_field_vi = try dst_field_it.only(isel);
+ try dst_field_vi.?.copyAdvanced(isel, src_part_vi, .{
+ .ty = root.ty,
+ .dst_vi = root.dst_vi,
+ .dst_offset = root.dst_offset + src_part_offset,
+ .src_vi = root.src_vi,
+ .src_offset = root.src_offset + src_part_offset,
+ });
+ }
+ } else while (dst_part_it.next()) |dst_part_vi| {
+ const dst_part_offset, const dst_part_size = dst_part_vi.position(isel);
+ var src_field_it = root.src_vi.field(root.ty, root.src_offset + dst_part_offset, dst_part_size);
+ const src_part_vi = try src_field_it.only(isel);
+ try dst_part_vi.copyAdvanced(isel, src_part_vi.?, .{
+ .ty = root.ty,
+ .dst_vi = root.dst_vi,
+ .dst_offset = root.dst_offset + dst_part_offset,
+ .src_vi = root.src_vi,
+ .src_offset = root.src_offset + dst_part_offset,
+ });
+ }
+ }
+
+ fn addOrSubtract(
+ res_vi: Value.Index,
+ isel: *Select,
+ ty: ZigType,
+ lhs_vi: Value.Index,
+ op: codegen.aarch64.encoding.Instruction.AddSubtractOp,
+ rhs_vi: Value.Index,
+ opts: struct {
+ wrap: bool,
+ overflow_ra: Register.Alias = .zr,
+ },
+ ) !void {
+ assert(opts.wrap or opts.overflow_ra == .zr);
+ const zcu = isel.pt.zcu;
+ if (!ty.isAbiInt(zcu)) return isel.fail("bad {s} {f}", .{ @tagName(op), isel.fmtType(ty) });
+ const int_info = ty.intInfo(zcu);
+ if (int_info.bits > 128) return isel.fail("too big {s} {f}", .{ @tagName(op), isel.fmtType(ty) });
+ var part_offset = res_vi.size(isel);
+ var need_wrap = opts.wrap;
+ var need_carry = opts.overflow_ra != .zr;
+ while (part_offset > 0) : (need_wrap = false) {
+ const part_size = @min(part_offset, 8);
+ part_offset -= part_size;
+ var wrapped_res_part_it = res_vi.field(ty, part_offset, part_size);
+ const wrapped_res_part_vi = try wrapped_res_part_it.only(isel);
+ const wrapped_res_part_ra = try wrapped_res_part_vi.?.defReg(isel) orelse if (need_carry) .zr else continue;
+ const unwrapped_res_part_ra = unwrapped_res_part_ra: {
+ if (!need_wrap) break :unwrapped_res_part_ra wrapped_res_part_ra;
+ if (int_info.bits % 32 == 0) {
+ if (opts.overflow_ra != .zr) try isel.emit(.csinc(opts.overflow_ra.w(), .wzr, .wzr, .invert(switch (int_info.signedness) {
+ .signed => .vs,
+ .unsigned => switch (op) {
+ .add => .cs,
+ .sub => .cc,
+ },
+ })));
+ break :unwrapped_res_part_ra wrapped_res_part_ra;
+ }
+ const wrapped_part_ra, const unwrapped_part_ra = if (opts.overflow_ra != .zr) part_ra: {
+ switch (op) {
+ .add => {},
+ .sub => switch (int_info.signedness) {
+ .signed => {},
+ .unsigned => {
+ try isel.emit(.csinc(opts.overflow_ra.w(), .wzr, .wzr, .invert(.cc)));
+ break :part_ra .{ wrapped_res_part_ra, wrapped_res_part_ra };
+ },
+ },
+ }
+ try isel.emit(.csinc(opts.overflow_ra.w(), .wzr, .wzr, .invert(.ne)));
+ const wrapped_part_ra = switch (wrapped_res_part_ra) {
+ else => |res_part_ra| res_part_ra,
+ .zr => try isel.allocIntReg(),
+ };
+ errdefer if (wrapped_part_ra != wrapped_res_part_ra) isel.freeReg(wrapped_part_ra);
+ const unwrapped_part_ra = unwrapped_part_ra: {
+ const wrapped_res_part_lock: RegLock = switch (wrapped_res_part_ra) {
+ else => |res_part_ra| isel.lockReg(res_part_ra),
+ .zr => .empty,
+ };
+ defer wrapped_res_part_lock.unlock(isel);
+ break :unwrapped_part_ra try isel.allocIntReg();
+ };
+ errdefer isel.freeReg(unwrapped_part_ra);
+ switch (part_size) {
+ else => unreachable,
+ 1...4 => try isel.emit(.subs(.wzr, wrapped_part_ra.w(), .{ .register = unwrapped_part_ra.w() })),
+ 5...8 => try isel.emit(.subs(.xzr, wrapped_part_ra.x(), .{ .register = unwrapped_part_ra.x() })),
+ }
+ break :part_ra .{ wrapped_part_ra, unwrapped_part_ra };
+ } else .{ wrapped_res_part_ra, wrapped_res_part_ra };
+ defer if (wrapped_part_ra != wrapped_res_part_ra) isel.freeReg(wrapped_part_ra);
+ errdefer if (unwrapped_part_ra != wrapped_res_part_ra) isel.freeReg(unwrapped_part_ra);
+ if (wrapped_part_ra != .zr) try isel.emit(switch (part_size) {
+ else => unreachable,
+ 1...4 => switch (int_info.signedness) {
+ .signed => .sbfm(wrapped_part_ra.w(), unwrapped_part_ra.w(), .{
+ .N = .word,
+ .immr = 0,
+ .imms = @truncate(int_info.bits - 1),
+ }),
+ .unsigned => .ubfm(wrapped_part_ra.w(), unwrapped_part_ra.w(), .{
+ .N = .word,
+ .immr = 0,
+ .imms = @truncate(int_info.bits - 1),
+ }),
+ },
+ 5...8 => switch (int_info.signedness) {
+ .signed => .sbfm(wrapped_part_ra.x(), unwrapped_part_ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @truncate(int_info.bits - 1),
+ }),
+ .unsigned => .ubfm(wrapped_part_ra.x(), unwrapped_part_ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @truncate(int_info.bits - 1),
+ }),
+ },
+ });
+ break :unwrapped_res_part_ra unwrapped_part_ra;
+ };
+ defer if (unwrapped_res_part_ra != wrapped_res_part_ra) isel.freeReg(unwrapped_res_part_ra);
+ var lhs_part_it = lhs_vi.field(ty, part_offset, part_size);
+ const lhs_part_vi = try lhs_part_it.only(isel);
+ const lhs_part_mat = try lhs_part_vi.?.matReg(isel);
+ var rhs_part_it = rhs_vi.field(ty, part_offset, part_size);
+ const rhs_part_vi = try rhs_part_it.only(isel);
+ const rhs_part_mat = try rhs_part_vi.?.matReg(isel);
+ try isel.emit(switch (part_size) {
+ else => unreachable,
+ 1...4 => switch (op) {
+ .add => switch (part_offset) {
+ 0 => switch (need_carry) {
+ false => .add(unwrapped_res_part_ra.w(), lhs_part_mat.ra.w(), .{ .register = rhs_part_mat.ra.w() }),
+ true => .adds(unwrapped_res_part_ra.w(), lhs_part_mat.ra.w(), .{ .register = rhs_part_mat.ra.w() }),
+ },
+ else => switch (need_carry) {
+ false => .adc(unwrapped_res_part_ra.w(), lhs_part_mat.ra.w(), rhs_part_mat.ra.w()),
+ true => .adcs(unwrapped_res_part_ra.w(), lhs_part_mat.ra.w(), rhs_part_mat.ra.w()),
+ },
+ },
+ .sub => switch (part_offset) {
+ 0 => switch (need_carry) {
+ false => .sub(unwrapped_res_part_ra.w(), lhs_part_mat.ra.w(), .{ .register = rhs_part_mat.ra.w() }),
+ true => .subs(unwrapped_res_part_ra.w(), lhs_part_mat.ra.w(), .{ .register = rhs_part_mat.ra.w() }),
+ },
+ else => switch (need_carry) {
+ false => .sbc(unwrapped_res_part_ra.w(), lhs_part_mat.ra.w(), rhs_part_mat.ra.w()),
+ true => .sbcs(unwrapped_res_part_ra.w(), lhs_part_mat.ra.w(), rhs_part_mat.ra.w()),
+ },
+ },
+ },
+ 5...8 => switch (op) {
+ .add => switch (part_offset) {
+ 0 => switch (need_carry) {
+ false => .add(unwrapped_res_part_ra.x(), lhs_part_mat.ra.x(), .{ .register = rhs_part_mat.ra.x() }),
+ true => .adds(unwrapped_res_part_ra.x(), lhs_part_mat.ra.x(), .{ .register = rhs_part_mat.ra.x() }),
+ },
+ else => switch (need_carry) {
+ false => .adc(unwrapped_res_part_ra.x(), lhs_part_mat.ra.x(), rhs_part_mat.ra.x()),
+ true => .adcs(unwrapped_res_part_ra.x(), lhs_part_mat.ra.x(), rhs_part_mat.ra.x()),
+ },
+ },
+ .sub => switch (part_offset) {
+ 0 => switch (need_carry) {
+ false => .sub(unwrapped_res_part_ra.x(), lhs_part_mat.ra.x(), .{ .register = rhs_part_mat.ra.x() }),
+ true => .subs(unwrapped_res_part_ra.x(), lhs_part_mat.ra.x(), .{ .register = rhs_part_mat.ra.x() }),
+ },
+ else => switch (need_carry) {
+ false => .sbc(unwrapped_res_part_ra.x(), lhs_part_mat.ra.x(), rhs_part_mat.ra.x()),
+ true => .sbcs(unwrapped_res_part_ra.x(), lhs_part_mat.ra.x(), rhs_part_mat.ra.x()),
+ },
+ },
+ },
+ });
+ try rhs_part_mat.finish(isel);
+ try lhs_part_mat.finish(isel);
+ need_carry = true;
+ }
+ }
+
+ const MemoryAccessOptions = struct {
+ root_vi: Value.Index = .free,
+ offset: u64 = 0,
+ @"volatile": bool = false,
+ split: bool = true,
+ wrap: ?std.builtin.Type.Int = null,
+ expected_live_registers: *const LiveRegisters = &.initFill(.free),
+ };
+
+ fn load(
+ vi: Value.Index,
+ isel: *Select,
+ root_ty: ZigType,
+ base_ra: Register.Alias,
+ opts: MemoryAccessOptions,
+ ) !bool {
+ const root_vi = switch (opts.root_vi) {
+ _ => |root_vi| root_vi,
+ .allocating => unreachable,
+ .free => vi,
+ };
+ var part_it = vi.parts(isel);
+ if (part_it.only()) |part_vi| only: {
+ const part_size = part_vi.size(isel);
+ const part_is_vector = part_vi.isVector(isel);
+ if (part_size > @as(@TypeOf(part_size), if (part_is_vector) 16 else 8)) {
+ if (!opts.split) return false;
+ var subpart_it = root_vi.field(root_ty, opts.offset, part_size - 1);
+ _ = try subpart_it.next(isel);
+ part_it = vi.parts(isel);
+ assert(part_it.only() == null);
+ break :only;
+ }
+ const part_ra = if (try part_vi.defReg(isel)) |part_ra|
+ part_ra
+ else if (opts.@"volatile")
+ .zr
+ else
+ return false;
+ if (part_ra != .zr) {
+ const live_vi = isel.live_registers.getPtr(part_ra);
+ assert(live_vi.* == .free);
+ live_vi.* = .allocating;
+ }
+ if (opts.wrap) |int_info| switch (int_info.bits) {
+ else => unreachable,
+ 1...7, 9...15, 17...31 => |bits| try isel.emit(switch (int_info.signedness) {
+ .signed => .sbfm(part_ra.w(), part_ra.w(), .{
+ .N = .word,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }),
+ .unsigned => .ubfm(part_ra.w(), part_ra.w(), .{
+ .N = .word,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }),
+ }),
+ 8, 16, 32 => {},
+ 33...63 => |bits| try isel.emit(switch (int_info.signedness) {
+ .signed => .sbfm(part_ra.x(), part_ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }),
+ .unsigned => .ubfm(part_ra.x(), part_ra.x(), .{
+ .N = .doubleword,
+ .immr = 0,
+ .imms = @intCast(bits - 1),
+ }),
+ }),
+ 64 => {},
+ };
+ try isel.emit(emit: switch (part_size) {
+ else => return isel.fail("bad load size of {d}", .{part_size}),
+ 1 => if (part_is_vector) .ldr(part_ra.b(), .{ .unsigned_offset = .{
+ .base = base_ra.x(),
+ .offset = @intCast(opts.offset),
+ } }) else switch (part_vi.signedness(isel)) {
+ .signed => .ldrsb(part_ra.w(), .{ .unsigned_offset = .{
+ .base = base_ra.x(),
+ .offset = @intCast(opts.offset),
+ } }),
+ .unsigned => .ldrb(part_ra.w(), .{ .unsigned_offset = .{
+ .base = base_ra.x(),
+ .offset = @intCast(opts.offset),
+ } }),
+ },
+ 2 => if (part_is_vector) .ldr(part_ra.h(), .{ .unsigned_offset = .{
+ .base = base_ra.x(),
+ .offset = @intCast(opts.offset),
+ } }) else switch (part_vi.signedness(isel)) {
+ .signed => .ldrsh(part_ra.w(), .{ .unsigned_offset = .{
+ .base = base_ra.x(),
+ .offset = @intCast(opts.offset),
+ } }),
+ .unsigned => .ldrh(part_ra.w(), .{ .unsigned_offset = .{
+ .base = base_ra.x(),
+ .offset = @intCast(opts.offset),
+ } }),
+ },
+ 3 => {
+ const lo16_ra = try isel.allocIntReg();
+ defer isel.freeReg(lo16_ra);
+ try isel.emit(.orr(part_ra.w(), lo16_ra.w(), .{ .shifted_register = .{
+ .register = part_ra.w(),
+ .shift = .{ .lsl = 16 },
+ } }));
+ try isel.emit(switch (part_vi.signedness(isel)) {
+ .signed => .ldrsb(part_ra.w(), .{ .unsigned_offset = .{
+ .base = base_ra.x(),
+ .offset = @intCast(opts.offset + 2),
+ } }),
+ .unsigned => .ldrb(part_ra.w(), .{ .unsigned_offset = .{
+ .base = base_ra.x(),
+ .offset = @intCast(opts.offset + 2),
+ } }),
+ });
+ break :emit .ldrh(lo16_ra.w(), .{ .unsigned_offset = .{
+ .base = base_ra.x(),
+ .offset = @intCast(opts.offset),
+ } });
+ },
+ 4 => .ldr(if (part_is_vector) part_ra.s() else part_ra.w(), .{ .unsigned_offset = .{
+ .base = base_ra.x(),
+ .offset = @intCast(opts.offset),
+ } }),
+ 5 => {
+ const lo32_ra = try isel.allocIntReg();
+ defer isel.freeReg(lo32_ra);
+ try isel.emit(.orr(part_ra.x(), lo32_ra.x(), .{ .shifted_register = .{
+ .register = part_ra.x(),
+ .shift = .{ .lsl = 32 },
+ } }));
+ try isel.emit(switch (part_vi.signedness(isel)) {
+ .signed => .ldrsb(part_ra.w(), .{ .unsigned_offset = .{
+ .base = base_ra.x(),
+ .offset = @intCast(opts.offset + 4),
+ } }),
+ .unsigned => .ldrb(part_ra.w(), .{ .unsigned_offset = .{
+ .base = base_ra.x(),
+ .offset = @intCast(opts.offset + 4),
+ } }),
+ });
+ break :emit .ldr(lo32_ra.w(), .{ .unsigned_offset = .{
+ .base = base_ra.x(),
+ .offset = @intCast(opts.offset),
+ } });
+ },
+ 7 => {
+ const lo32_ra = try isel.allocIntReg();
+ defer isel.freeReg(lo32_ra);
+ const lo48_ra = try isel.allocIntReg();
+ defer isel.freeReg(lo48_ra);
+ try isel.emit(.orr(part_ra.x(), lo48_ra.x(), .{ .shifted_register = .{
+ .register = part_ra.x(),
+ .shift = .{ .lsl = 32 + 16 },
+ } }));
+ try isel.emit(switch (part_vi.signedness(isel)) {
+ .signed => .ldrsb(part_ra.w(), .{ .unsigned_offset = .{
+ .base = base_ra.x(),
+ .offset = @intCast(opts.offset + 4 + 2),
+ } }),
+ .unsigned => .ldrb(part_ra.w(), .{ .unsigned_offset = .{
+ .base = base_ra.x(),
+ .offset = @intCast(opts.offset + 4 + 2),
+ } }),
+ });
+ try isel.emit(.orr(lo48_ra.x(), lo32_ra.x(), .{ .shifted_register = .{
+ .register = lo48_ra.x(),
+ .shift = .{ .lsl = 32 },
+ } }));
+ try isel.emit(.ldrh(lo48_ra.w(), .{ .unsigned_offset = .{
+ .base = base_ra.x(),
+ .offset = @intCast(opts.offset + 4),
+ } }));
+ break :emit .ldr(lo32_ra.w(), .{ .unsigned_offset = .{
+ .base = base_ra.x(),
+ .offset = @intCast(opts.offset),
+ } });
+ },
+ 8 => .ldr(if (part_is_vector) part_ra.d() else part_ra.x(), .{ .unsigned_offset = .{
+ .base = base_ra.x(),
+ .offset = @intCast(opts.offset),
+ } }),
+ 16 => .ldr(part_ra.q(), .{ .unsigned_offset = .{
+ .base = base_ra.x(),
+ .offset = @intCast(opts.offset),
+ } }),
+ });
+ if (part_ra != .zr) {
+ const live_vi = isel.live_registers.getPtr(part_ra);
+ assert(live_vi.* == .allocating);
+ switch (opts.expected_live_registers.get(part_ra)) {
+ _ => {},
+ .allocating => unreachable,
+ .free => live_vi.* = .free,
+ }
+ }
+ return true;
+ }
+ var used = false;
+ while (part_it.next()) |part_vi| used |= try part_vi.load(isel, root_ty, base_ra, .{
+ .root_vi = root_vi,
+ .offset = opts.offset + part_vi.get(isel).offset_from_parent,
+ .@"volatile" = opts.@"volatile",
+ .split = opts.split,
+ .wrap = switch (part_it.remaining) {
+ else => null,
+ 0 => if (opts.wrap) |wrap| .{
+ .signedness = wrap.signedness,
+ .bits = @intCast(wrap.bits - 8 * part_vi.position(isel)[0]),
+ } else null,
+ },
+ .expected_live_registers = opts.expected_live_registers,
+ });
+ return used;
+ }
+
+ fn store(
+ vi: Value.Index,
+ isel: *Select,
+ root_ty: ZigType,
+ base_ra: Register.Alias,
+ opts: MemoryAccessOptions,
+ ) !void {
+ const root_vi = switch (opts.root_vi) {
+ _ => |root_vi| root_vi,
+ .allocating => unreachable,
+ .free => vi,
+ };
+ var part_it = vi.parts(isel);
+ if (part_it.only()) |part_vi| only: {
+ const part_size = part_vi.size(isel);
+ const part_is_vector = part_vi.isVector(isel);
+ if (part_size > @as(@TypeOf(part_size), if (part_is_vector) 16 else 8)) {
+ if (!opts.split) return;
+ var subpart_it = root_vi.field(root_ty, opts.offset, part_size - 1);
+ _ = try subpart_it.next(isel);
+ part_it = vi.parts(isel);
+ assert(part_it.only() == null);
+ break :only;
+ }
+ const part_mat = try part_vi.matReg(isel);
+ try isel.storeReg(part_mat.ra, part_size, base_ra, opts.offset);
+ return part_mat.finish(isel);
+ }
+ while (part_it.next()) |part_vi| try part_vi.store(isel, root_ty, base_ra, .{
+ .root_vi = root_vi,
+ .offset = opts.offset + part_vi.get(isel).offset_from_parent,
+ .@"volatile" = opts.@"volatile",
+ .split = opts.split,
+ .wrap = switch (part_it.remaining) {
+ else => null,
+ 0 => if (opts.wrap) |wrap| .{
+ .signedness = wrap.signedness,
+ .bits = @intCast(wrap.bits - 8 * part_vi.position(isel)[0]),
+ } else null,
+ },
+ .expected_live_registers = opts.expected_live_registers,
+ });
+ }
+
+ fn mat(vi: Value.Index, isel: *Select) !void {
+ if (false) {
+ var part_it: Value.PartIterator = if (vi.size(isel) > 8) vi.parts(isel) else .initOne(vi);
+ if (part_it.only()) |part_vi| only: {
+ const mat_ra = mat_ra: {
+ if (part_vi.register(isel)) |mat_ra| {
+ part_vi.get(isel).location_payload.small.register = .zr;
+ const live_vi = isel.live_registers.getPtr(mat_ra);
+ assert(live_vi.* == part_vi);
+ live_vi.* = .allocating;
+ break :mat_ra mat_ra;
+ }
+ if (part_vi.hint(isel)) |hint_ra| {
+ const live_vi = isel.live_registers.getPtr(hint_ra);
+ if (live_vi.* == .free) {
+ live_vi.* = .allocating;
+ isel.saved_registers.insert(hint_ra);
+ break :mat_ra hint_ra;
+ }
+ }
+ const part_size = part_vi.size(isel);
+ const part_is_vector = part_vi.isVector(isel);
+ if (part_size <= @as(@TypeOf(part_size), if (part_is_vector) 16 else 8))
+ switch (if (part_is_vector) isel.tryAllocVecReg() else isel.tryAllocIntReg()) {
+ .allocated => |ra| break :mat_ra ra,
+ .fill_candidate, .out_of_registers => {},
+ };
+ _, const parent_vi = vi.valueParent(isel);
+ switch (parent_vi.parent(isel)) {
+ .unallocated => parent_vi.setParent(isel, .{ .stack_slot = parent_vi.allocStackSlot(isel) }),
+ else => {},
+ }
+ break :only;
+ };
+ assert(isel.live_registers.get(mat_ra) == .allocating);
+ try Value.Materialize.finish(.{ .vi = part_vi, .ra = mat_ra }, isel);
+ } else while (part_it.next()) |part_vi| try part_vi.mat(isel);
+ } else {
+ _, const parent_vi = vi.valueParent(isel);
+ switch (parent_vi.parent(isel)) {
+ .unallocated => parent_vi.setParent(isel, .{ .stack_slot = parent_vi.allocStackSlot(isel) }),
+ else => {},
+ }
+ }
+ }
+
+ fn matReg(vi: Value.Index, isel: *Select) !Value.Materialize {
+ const mat_ra = mat_ra: {
+ if (vi.register(isel)) |mat_ra| {
+ vi.get(isel).location_payload.small.register = .zr;
+ const live_vi = isel.live_registers.getPtr(mat_ra);
+ assert(live_vi.* == vi);
+ live_vi.* = .allocating;
+ break :mat_ra mat_ra;
+ }
+ if (vi.hint(isel)) |hint_ra| {
+ const live_vi = isel.live_registers.getPtr(hint_ra);
+ if (live_vi.* == .free) {
+ live_vi.* = .allocating;
+ isel.saved_registers.insert(hint_ra);
+ break :mat_ra hint_ra;
+ }
+ }
+ break :mat_ra if (vi.isVector(isel)) try isel.allocVecReg() else try isel.allocIntReg();
+ };
+ assert(isel.live_registers.get(mat_ra) == .allocating);
+ return .{ .vi = vi, .ra = mat_ra };
+ }
+
+ fn defAddr(
+ def_vi: Value.Index,
+ isel: *Select,
+ def_ty: ZigType,
+ wrap: ?std.builtin.Type.Int,
+ expected_live_registers: *const LiveRegisters,
+ ) !?void {
+ if (!def_vi.isUsed(isel)) return null;
+ const offset_from_parent: i65, const parent_vi = def_vi.valueParent(isel);
+ const stack_slot, const allocated = switch (parent_vi.parent(isel)) {
+ .unallocated => .{ parent_vi.allocStackSlot(isel), true },
+ .stack_slot => |stack_slot| .{ stack_slot, false },
+ else => unreachable,
+ };
+ _ = try def_vi.load(isel, def_ty, stack_slot.base, .{
+ .offset = @intCast(stack_slot.offset + offset_from_parent),
+ .split = false,
+ .wrap = wrap,
+ .expected_live_registers = expected_live_registers,
+ });
+ if (allocated) parent_vi.setParent(isel, .{ .stack_slot = stack_slot });
+ }
+
+ fn defReg(def_vi: Value.Index, isel: *Select) !?Register.Alias {
+ var vi = def_vi;
+ var offset: i65 = 0;
+ var def_ra: ?Register.Alias = null;
+ while (true) {
+ if (vi.register(isel)) |ra| {
+ vi.get(isel).location_payload.small.register = .zr;
+ const live_vi = isel.live_registers.getPtr(ra);
+ assert(live_vi.* == vi);
+ if (def_ra == null and vi != def_vi) {
+ var part_it = vi.parts(isel);
+ assert(part_it.only() == null);
+
+ const first_part_vi = part_it.next().?;
+ const first_part_value = first_part_vi.get(isel);
+ assert(first_part_value.offset_from_parent == 0);
+ first_part_value.location_payload.small.register = ra;
+ live_vi.* = first_part_vi;
+
+ const vi_size = vi.size(isel);
+ while (part_it.next()) |part_vi| {
+ const part_offset, const part_size = part_vi.position(isel);
+ const part_mat = try part_vi.matReg(isel);
+ try isel.emit(if (part_vi.isVector(isel)) emit: {
+ assert(part_offset == 0 and part_size == vi_size);
+ break :emit size: switch (vi_size) {
+ else => unreachable,
+ 2 => if (isel.target.cpu.has(.aarch64, .fullfp16))
+ .fmov(ra.h(), .{ .register = part_mat.ra.h() })
+ else
+ continue :size 4,
+ 4 => .fmov(ra.s(), .{ .register = part_mat.ra.s() }),
+ 8 => .fmov(ra.d(), .{ .register = part_mat.ra.d() }),
+ 16 => .orr(ra.@"16b"(), part_mat.ra.@"16b"(), .{ .register = part_mat.ra.@"16b"() }),
+ };
+ } else switch (vi_size) {
+ else => unreachable,
+ 1...4 => .bfm(ra.w(), part_mat.ra.w(), .{
+ .N = .word,
+ .immr = @as(u5, @truncate(32 - 8 * part_offset)),
+ .imms = @intCast(8 * part_size - 1),
+ }),
+ 5...8 => .bfm(ra.x(), part_mat.ra.x(), .{
+ .N = .doubleword,
+ .immr = @as(u6, @truncate(64 - 8 * part_offset)),
+ .imms = @intCast(8 * part_size - 1),
+ }),
+ });
+ try part_mat.finish(isel);
+ }
+ vi = def_vi;
+ offset = 0;
+ continue;
+ }
+ live_vi.* = .free;
+ def_ra = ra;
+ }
+ offset += vi.get(isel).offset_from_parent;
+ switch (vi.parent(isel)) {
+ else => unreachable,
+ .unallocated => return def_ra,
+ .stack_slot => |stack_slot| {
+ offset += stack_slot.offset;
+ const def_is_vector = def_vi.isVector(isel);
+ const ra = def_ra orelse if (def_is_vector) try isel.allocVecReg() else try isel.allocIntReg();
+ defer if (def_ra == null) isel.freeReg(ra);
+ try isel.storeReg(ra, def_vi.size(isel), stack_slot.base, offset);
+ return ra;
+ },
+ .value => |parent_vi| vi = parent_vi,
+ }
+ }
+ }
+
+ pub fn liveIn(
+ vi: Value.Index,
+ isel: *Select,
+ src_ra: Register.Alias,
+ expected_live_registers: *const LiveRegisters,
+ ) !void {
+ const src_live_vi = isel.live_registers.getPtr(src_ra);
+ if (vi.register(isel)) |dst_ra| {
+ const dst_live_vi = isel.live_registers.getPtr(dst_ra);
+ assert(dst_live_vi.* == vi);
+ if (dst_ra == src_ra) {
+ src_live_vi.* = .allocating;
+ return;
+ }
+ dst_live_vi.* = .allocating;
+ if (try isel.fill(src_ra)) {
+ assert(src_live_vi.* == .free);
+ src_live_vi.* = .allocating;
+ }
+ assert(src_live_vi.* == .allocating);
+ try isel.emit(switch (dst_ra.isVector()) {
+ false => switch (src_ra.isVector()) {
+ false => switch (vi.size(isel)) {
+ else => unreachable,
+ 1...4 => .orr(dst_ra.w(), .wzr, .{ .register = src_ra.w() }),
+ 5...8 => .orr(dst_ra.x(), .xzr, .{ .register = src_ra.x() }),
+ },
+ true => switch (vi.size(isel)) {
+ else => unreachable,
+ 2 => .fmov(dst_ra.w(), .{ .register = src_ra.h() }),
+ 4 => .fmov(dst_ra.w(), .{ .register = src_ra.s() }),
+ 8 => .fmov(dst_ra.x(), .{ .register = src_ra.d() }),
+ },
+ },
+ true => switch (src_ra.isVector()) {
+ false => switch (vi.size(isel)) {
+ else => unreachable,
+ 2 => .fmov(dst_ra.h(), .{ .register = src_ra.w() }),
+ 4 => .fmov(dst_ra.s(), .{ .register = src_ra.w() }),
+ 8 => .fmov(dst_ra.d(), .{ .register = src_ra.x() }),
+ },
+ true => switch (vi.size(isel)) {
+ else => unreachable,
+ 2 => .fmov(dst_ra.h(), .{ .register = src_ra.h() }),
+ 4 => .fmov(dst_ra.s(), .{ .register = src_ra.s() }),
+ 8 => .fmov(dst_ra.d(), .{ .register = src_ra.d() }),
+ 16 => .orr(dst_ra.@"16b"(), src_ra.@"16b"(), .{ .register = src_ra.@"16b"() }),
+ },
+ },
+ });
+ assert(dst_live_vi.* == .allocating);
+ dst_live_vi.* = switch (expected_live_registers.get(dst_ra)) {
+ _ => .allocating,
+ .allocating => .allocating,
+ .free => .free,
+ };
+ } else if (try isel.fill(src_ra)) {
+ assert(src_live_vi.* == .free);
+ src_live_vi.* = .allocating;
+ }
+ assert(src_live_vi.* == .allocating);
+ vi.get(isel).location_payload.small.register = src_ra;
+ }
+
+ pub fn defLiveIn(
+ vi: Value.Index,
+ isel: *Select,
+ src_ra: Register.Alias,
+ expected_live_registers: *const LiveRegisters,
+ ) !void {
+ try vi.liveIn(isel, src_ra, expected_live_registers);
+ const offset_from_parent: i65, const parent_vi = vi.valueParent(isel);
+ switch (parent_vi.parent(isel)) {
+ .unallocated => {},
+ .stack_slot => |stack_slot| {
+ const offset = stack_slot.offset + offset_from_parent;
+ try isel.emit(switch (vi.size(isel)) {
+ else => unreachable,
+ 1 => if (src_ra.isVector()) .str(src_ra.b(), .{ .unsigned_offset = .{
+ .base = stack_slot.base.x(),
+ .offset = @intCast(offset),
+ } }) else .strb(src_ra.w(), .{ .unsigned_offset = .{
+ .base = stack_slot.base.x(),
+ .offset = @intCast(offset),
+ } }),
+ 2 => if (src_ra.isVector()) .str(src_ra.h(), .{ .unsigned_offset = .{
+ .base = stack_slot.base.x(),
+ .offset = @intCast(offset),
+ } }) else .strh(src_ra.w(), .{ .unsigned_offset = .{
+ .base = stack_slot.base.x(),
+ .offset = @intCast(offset),
+ } }),
+ 4 => .str(if (src_ra.isVector()) src_ra.s() else src_ra.w(), .{ .unsigned_offset = .{
+ .base = stack_slot.base.x(),
+ .offset = @intCast(offset),
+ } }),
+ 8 => .str(if (src_ra.isVector()) src_ra.d() else src_ra.x(), .{ .unsigned_offset = .{
+ .base = stack_slot.base.x(),
+ .offset = @intCast(offset),
+ } }),
+ 16 => .str(src_ra.q(), .{ .unsigned_offset = .{
+ .base = stack_slot.base.x(),
+ .offset = @intCast(offset),
+ } }),
+ });
+ },
+ else => unreachable,
+ }
+ try vi.spillReg(isel, src_ra, 0, expected_live_registers);
+ }
+
+ fn spillReg(
+ vi: Value.Index,
+ isel: *Select,
+ src_ra: Register.Alias,
+ start_offset: u64,
+ expected_live_registers: *const LiveRegisters,
+ ) !void {
+ assert(isel.live_registers.get(src_ra) == .allocating);
+ var part_it = vi.parts(isel);
+ if (part_it.only()) |part_vi| {
+ const dst_ra = part_vi.register(isel) orelse return;
+ if (dst_ra == src_ra) return;
+ const part_size = part_vi.size(isel);
+ const part_ra = if (part_vi.isVector(isel)) try isel.allocIntReg() else dst_ra;
+ defer if (part_ra != dst_ra) isel.freeReg(part_ra);
+ if (part_ra != dst_ra) try isel.emit(switch (part_size) {
+ else => unreachable,
+ 2 => .fmov(dst_ra.h(), .{ .register = part_ra.w() }),
+ 4 => .fmov(dst_ra.s(), .{ .register = part_ra.w() }),
+ 8 => .fmov(dst_ra.d(), .{ .register = part_ra.x() }),
+ });
+ try isel.emit(switch (start_offset + part_size) {
+ else => unreachable,
+ 1...4 => |end_offset| switch (part_vi.signedness(isel)) {
+ .signed => .sbfm(part_ra.w(), src_ra.w(), .{
+ .N = .word,
+ .immr = @intCast(8 * start_offset),
+ .imms = @intCast(8 * end_offset - 1),
+ }),
+ .unsigned => .ubfm(part_ra.w(), src_ra.w(), .{
+ .N = .word,
+ .immr = @intCast(8 * start_offset),
+ .imms = @intCast(8 * end_offset - 1),
+ }),
+ },
+ 5...8 => |end_offset| switch (part_vi.signedness(isel)) {
+ .signed => .sbfm(part_ra.x(), src_ra.x(), .{
+ .N = .doubleword,
+ .immr = @intCast(8 * start_offset),
+ .imms = @intCast(8 * end_offset - 1),
+ }),
+ .unsigned => .ubfm(part_ra.x(), src_ra.x(), .{
+ .N = .doubleword,
+ .immr = @intCast(8 * start_offset),
+ .imms = @intCast(8 * end_offset - 1),
+ }),
+ },
+ });
+ const value_ra = &part_vi.get(isel).location_payload.small.register;
+ assert(value_ra.* == dst_ra);
+ value_ra.* = .zr;
+ const dst_live_vi = isel.live_registers.getPtr(dst_ra);
+ assert(dst_live_vi.* == part_vi);
+ dst_live_vi.* = switch (expected_live_registers.get(dst_ra)) {
+ _ => .allocating,
+ .allocating => unreachable,
+ .free => .free,
+ };
+ } else while (part_it.next()) |part_vi| try part_vi.spillReg(
+ isel,
+ src_ra,
+ start_offset + part_vi.get(isel).offset_from_parent,
+ expected_live_registers,
+ );
+ }
+
+ fn liveOut(vi: Value.Index, isel: *Select, ra: Register.Alias) !void {
+ assert(try isel.fill(ra));
+ const live_vi = isel.live_registers.getPtr(ra);
+ assert(live_vi.* == .free);
+ live_vi.* = .allocating;
+ try Value.Materialize.finish(.{ .vi = vi, .ra = ra }, isel);
+ }
+
+ fn allocStackSlot(vi: Value.Index, isel: *Select) Value.Indirect {
+ const offset = vi.alignment(isel).forward(isel.stack_size);
+ isel.stack_size = @intCast(offset + vi.size(isel));
+ tracking_log.debug("${d} -> [sp, #0x{x}]", .{ @intFromEnum(vi), @abs(offset) });
+ return .{
+ .base = .sp,
+ .offset = @intCast(offset),
+ };
+ }
+
+ fn address(initial_vi: Value.Index, isel: *Select, initial_offset: u64, ptr_ra: Register.Alias) !void {
+ var vi = initial_vi;
+ var offset: i65 = vi.get(isel).offset_from_parent + initial_offset;
+ parent: switch (vi.parent(isel)) {
+ .unallocated => {
+ const stack_slot = vi.allocStackSlot(isel);
+ vi.setParent(isel, .{ .stack_slot = stack_slot });
+ continue :parent .{ .stack_slot = stack_slot };
+ },
+ .stack_slot => |stack_slot| {
+ offset += stack_slot.offset;
+ const lo12: u12 = @truncate(@abs(offset) >> 0);
+ const hi12: u12 = @intCast(@abs(offset) >> 12);
+ if (hi12 > 0) try isel.emit(if (offset >= 0) .add(
+ ptr_ra.x(),
+ if (lo12 > 0) ptr_ra.x() else stack_slot.base.x(),
+ .{ .shifted_immediate = .{ .immediate = hi12, .lsl = .@"12" } },
+ ) else .sub(
+ ptr_ra.x(),
+ if (lo12 > 0) ptr_ra.x() else stack_slot.base.x(),
+ .{ .shifted_immediate = .{ .immediate = hi12, .lsl = .@"12" } },
+ ));
+ if (lo12 > 0 or hi12 == 0) try isel.emit(if (offset >= 0) .add(
+ ptr_ra.x(),
+ stack_slot.base.x(),
+ .{ .immediate = lo12 },
+ ) else .sub(
+ ptr_ra.x(),
+ stack_slot.base.x(),
+ .{ .immediate = lo12 },
+ ));
+ },
+ .address => |address_vi| try address_vi.liveOut(isel, ptr_ra),
+ .value => |parent_vi| {
+ vi = parent_vi;
+ offset += vi.get(isel).offset_from_parent;
+ continue :parent vi.parent(isel);
+ },
+ .constant => |constant| {
+ const pt = isel.pt;
+ const zcu = pt.zcu;
+ switch (true) {
+ false => {
+ try isel.uav_relocs.append(zcu.gpa, .{
+ .uav = .{
+ .val = constant.toIntern(),
+ .orig_ty = (try pt.singleConstPtrType(constant.typeOf(zcu))).toIntern(),
+ },
+ .reloc = .{
+ .label = @intCast(isel.instructions.items.len),
+ .addend = @intCast(offset),
+ },
+ });
+ try isel.emit(.adr(ptr_ra.x(), 0));
+ },
+ true => {
+ try isel.uav_relocs.append(zcu.gpa, .{
+ .uav = .{
+ .val = constant.toIntern(),
+ .orig_ty = (try pt.singleConstPtrType(constant.typeOf(zcu))).toIntern(),
+ },
+ .reloc = .{
+ .label = @intCast(isel.instructions.items.len),
+ .addend = @intCast(offset),
+ },
+ });
+ try isel.emit(.add(ptr_ra.x(), ptr_ra.x(), .{ .immediate = 0 }));
+ try isel.uav_relocs.append(zcu.gpa, .{
+ .uav = .{
+ .val = constant.toIntern(),
+ .orig_ty = (try pt.singleConstPtrType(constant.typeOf(zcu))).toIntern(),
+ },
+ .reloc = .{
+ .label = @intCast(isel.instructions.items.len),
+ .addend = @intCast(offset),
+ },
+ });
+ try isel.emit(.adrp(ptr_ra.x(), 0));
+ },
+ }
+ },
+ }
+ }
+ };
+
+ pub const PartIterator = struct {
+ vi: Value.Index,
+ remaining: Value.PartsLen,
+
+ fn initOne(vi: Value.Index) PartIterator {
+ return .{ .vi = vi, .remaining = 1 };
+ }
+
+ pub fn next(it: *PartIterator) ?Value.Index {
+ if (it.remaining == 0) return null;
+ it.remaining -= 1;
+ defer it.vi = @enumFromInt(@intFromEnum(it.vi) + 1);
+ return it.vi;
+ }
+
+ pub fn only(it: PartIterator) ?Value.Index {
+ return if (it.remaining == 1) it.vi else null;
+ }
+ };
+
+ const FieldPartIterator = struct {
+ vi: Value.Index,
+ ty: ZigType,
+ field_offset: u64,
+ field_size: u64,
+ next_offset: u64,
+
+ fn next(it: *FieldPartIterator, isel: *Select) !?struct { offset: u64, vi: Value.Index } {
+ const next_offset = it.next_offset;
+ const next_part_size = it.field_size - next_offset;
+ if (next_part_size == 0) return null;
+ var next_part_offset = it.field_offset + next_offset;
+
+ const zcu = isel.pt.zcu;
+ const ip = &zcu.intern_pool;
+ var vi = it.vi;
+ var ty = it.ty;
+ var ty_size = vi.size(isel);
+ assert(ty_size == ty.abiSize(zcu));
+ var offset: u64 = 0;
+ var size = ty_size;
+ assert(next_part_offset + next_part_size <= size);
+ while (next_part_offset > 0 or next_part_size < size) {
+ const part_vi = vi.partAtOffset(isel, next_part_offset);
+ if (part_vi != vi) {
+ vi = part_vi;
+ const part_offset, size = part_vi.position(isel);
+ assert(part_offset <= next_part_offset and part_offset + size > next_part_offset);
+ offset += part_offset;
+ next_part_offset -= part_offset;
+ continue;
+ }
+ try isel.values.ensureUnusedCapacity(zcu.gpa, Value.max_parts);
+ type_key: switch (ip.indexToKey(ty.toIntern())) {
+ else => return isel.fail("Value.FieldPartIterator.next({f})", .{isel.fmtType(ty)}),
+ .int_type => |int_type| switch (int_type.bits) {
+ 0 => unreachable,
+ 1...64 => unreachable,
+ 65...256 => |bits| if (offset == 0 and size == ty_size) {
+ const parts_len = std.math.divCeil(u16, bits, 64) catch unreachable;
+ vi.setParts(isel, @intCast(parts_len));
+ for (0..parts_len) |part_index| _ = vi.addPart(isel, 8 * part_index, 8);
+ },
+ else => return isel.fail("Value.FieldPartIterator.next({f})", .{isel.fmtType(ty)}),
+ },
+ .ptr_type => |ptr_type| switch (ptr_type.flags.size) {
+ .one, .many, .c => unreachable,
+ .slice => if (offset == 0 and size == ty_size) {
+ vi.setParts(isel, 2);
+ _ = vi.addPart(isel, 0, 8);
+ _ = vi.addPart(isel, 8, 8);
+ } else unreachable,
+ },
+ .opt_type => |child_type| if (ty.optionalReprIsPayload(zcu))
+ continue :type_key ip.indexToKey(child_type)
+ else switch (ZigType.fromInterned(child_type).abiSize(zcu)) {
+ 0...8, 16 => |child_size| if (offset == 0 and size == ty_size) {
+ vi.setParts(isel, 2);
+ _ = vi.addPart(isel, 0, child_size);
+ _ = vi.addPart(isel, child_size, 1);
+ } else unreachable,
+ 9...15 => |child_size| if (offset == 0 and size == ty_size) {
+ vi.setParts(isel, 2);
+ _ = vi.addPart(isel, 0, 8);
+ _ = vi.addPart(isel, 8, ty_size - 8);
+ } else if (offset == 8 and size == ty_size - 8) {
+ vi.setParts(isel, 2);
+ _ = vi.addPart(isel, 0, child_size - 8);
+ _ = vi.addPart(isel, child_size - 8, 1);
+ } else unreachable,
+ else => return isel.fail("Value.FieldPartIterator.next({f})", .{isel.fmtType(ty)}),
+ },
+ .array_type => |array_type| {
+ const min_part_log2_stride: u5 = if (size > 16) 4 else if (size > 8) 3 else 0;
+ const array_len = array_type.lenIncludingSentinel();
+ if (array_len > Value.max_parts and
+ (std.math.divCeil(u64, size, @as(u64, 1) << min_part_log2_stride) catch unreachable) > Value.max_parts)
+ return isel.fail("Value.FieldPartIterator.next({f})", .{isel.fmtType(ty)});
+ const alignment = vi.alignment(isel);
+ const Part = struct { offset: u64, size: u64 };
+ var parts: [Value.max_parts]Part = undefined;
+ var parts_len: Value.PartsLen = 0;
+ const elem_ty: ZigType = .fromInterned(array_type.child);
+ const elem_size = elem_ty.abiSize(zcu);
+ const elem_signedness = if (ty.isAbiInt(zcu)) elem_signedness: {
+ const elem_int_info = elem_ty.intInfo(zcu);
+ break :elem_signedness if (elem_int_info.bits <= 16) elem_int_info.signedness else null;
+ } else null;
+ const elem_is_vector = elem_size <= 16 and
+ CallAbiIterator.homogeneousAggregateBaseType(zcu, elem_ty.toIntern()) != null;
+ var elem_end: u64 = 0;
+ for (0..@intCast(array_len)) |_| {
+ const elem_begin = elem_end;
+ if (elem_begin >= offset + size) break;
+ elem_end = elem_begin + elem_size;
+ if (elem_end <= offset) continue;
+ if (offset >= elem_begin and offset + size <= elem_begin + elem_size) {
+ ty = elem_ty;
+ ty_size = elem_size;
+ offset -= elem_begin;
+ continue :type_key ip.indexToKey(elem_ty.toIntern());
+ }
+ if (parts_len > 0) combine: {
+ const prev_part = &parts[parts_len - 1];
+ const combined_size = elem_end - prev_part.offset;
+ if (combined_size > @as(u64, 1) << @min(
+ min_part_log2_stride,
+ alignment.toLog2Units(),
+ @ctz(prev_part.offset),
+ )) break :combine;
+ prev_part.size = combined_size;
+ continue;
+ }
+ parts[parts_len] = .{ .offset = elem_begin, .size = elem_size };
+ parts_len += 1;
+ }
+ vi.setParts(isel, parts_len);
+ for (parts[0..parts_len]) |part| {
+ const subpart_vi = vi.addPart(isel, part.offset - offset, part.size);
+ if (elem_signedness) |signedness| subpart_vi.setSignedness(isel, signedness);
+ if (elem_is_vector) subpart_vi.setIsVector(isel);
+ }
+ },
+ .anyframe_type => unreachable,
+ .error_union_type => |error_union_type| {
+ const min_part_log2_stride: u5 = if (size > 16) 4 else if (size > 8) 3 else 0;
+ if ((std.math.divCeil(u64, size, @as(u64, 1) << min_part_log2_stride) catch unreachable) > Value.max_parts)
+ return isel.fail("Value.FieldPartIterator.next({f})", .{isel.fmtType(ty)});
+ const alignment = vi.alignment(isel);
+ const payload_ty: ZigType = .fromInterned(error_union_type.payload_type);
+ const error_set_offset = codegen.errUnionErrorOffset(payload_ty, zcu);
+ const payload_offset = codegen.errUnionPayloadOffset(payload_ty, zcu);
+ const Part = struct { offset: u64, size: u64, signedness: ?std.builtin.Signedness, is_vector: bool };
+ var parts: [2]Part = undefined;
+ var parts_len: Value.PartsLen = 0;
+ var field_end: u64 = 0;
+ for (0..2) |field_index| {
+ const field_ty: ZigType, const field_begin = switch (@as(enum { error_set, payload }, switch (field_index) {
+ 0 => if (error_set_offset < payload_offset) .error_set else .payload,
+ 1 => if (error_set_offset < payload_offset) .payload else .error_set,
+ else => unreachable,
+ })) {
+ .error_set => .{ .fromInterned(error_union_type.error_set_type), error_set_offset },
+ .payload => .{ payload_ty, payload_offset },
+ };
+ if (field_begin >= offset + size) break;
+ const field_size = field_ty.abiSize(zcu);
+ if (field_size == 0) continue;
+ field_end = field_begin + field_size;
+ if (field_end <= offset) continue;
+ if (offset >= field_begin and offset + size <= field_begin + field_size) {
+ ty = field_ty;
+ ty_size = field_size;
+ offset -= field_begin;
+ continue :type_key ip.indexToKey(field_ty.toIntern());
+ }
+ const field_signedness = if (field_ty.isAbiInt(zcu)) field_signedness: {
+ const field_int_info = field_ty.intInfo(zcu);
+ break :field_signedness if (field_int_info.bits <= 16) field_int_info.signedness else null;
+ } else null;
+ const field_is_vector = field_size <= 16 and
+ CallAbiIterator.homogeneousAggregateBaseType(zcu, field_ty.toIntern()) != null;
+ if (parts_len > 0) combine: {
+ const prev_part = &parts[parts_len - 1];
+ const combined_size = field_end - prev_part.offset;
+ if (combined_size > @as(u64, 1) << @min(
+ min_part_log2_stride,
+ alignment.toLog2Units(),
+ @ctz(prev_part.offset),
+ )) break :combine;
+ prev_part.size = combined_size;
+ prev_part.signedness = null;
+ prev_part.is_vector &= field_is_vector;
+ continue;
+ }
+ parts[parts_len] = .{
+ .offset = field_begin,
+ .size = field_size,
+ .signedness = field_signedness,
+ .is_vector = field_is_vector,
+ };
+ parts_len += 1;
+ }
+ vi.setParts(isel, parts_len);
+ for (parts[0..parts_len]) |part| {
+ const subpart_vi = vi.addPart(isel, part.offset - offset, part.size);
+ if (part.signedness) |signedness| subpart_vi.setSignedness(isel, signedness);
+ if (part.is_vector) subpart_vi.setIsVector(isel);
+ }
+ },
+ .simple_type => |simple_type| switch (simple_type) {
+ .f16, .f32, .f64, .f128, .c_longdouble => return isel.fail("Value.FieldPartIterator.next({f})", .{isel.fmtType(ty)}),
+ .f80 => continue :type_key .{ .int_type = .{ .signedness = .unsigned, .bits = 80 } },
+ .usize,
+ .isize,
+ .c_char,
+ .c_short,
+ .c_ushort,
+ .c_int,
+ .c_uint,
+ .c_long,
+ .c_ulong,
+ .c_longlong,
+ .c_ulonglong,
+ => continue :type_key .{ .int_type = ty.intInfo(zcu) },
+ .anyopaque,
+ .void,
+ .type,
+ .comptime_int,
+ .comptime_float,
+ .noreturn,
+ .null,
+ .undefined,
+ .enum_literal,
+ .adhoc_inferred_error_set,
+ .generic_poison,
+ => unreachable,
+ .bool => continue :type_key .{ .int_type = .{ .signedness = .unsigned, .bits = 1 } },
+ .anyerror => continue :type_key .{ .int_type = .{
+ .signedness = .unsigned,
+ .bits = zcu.errorSetBits(),
+ } },
+ },
+ .struct_type => {
+ const min_part_log2_stride: u5 = if (size > 16) 4 else if (size > 8) 3 else 0;
+ const loaded_struct = ip.loadStructType(ty.toIntern());
+ if (loaded_struct.field_types.len > Value.max_parts and
+ (std.math.divCeil(u64, size, @as(u64, 1) << min_part_log2_stride) catch unreachable) > Value.max_parts)
+ return isel.fail("Value.FieldPartIterator.next({f})", .{isel.fmtType(ty)});
+ const alignment = vi.alignment(isel);
+ const Part = struct { offset: u64, size: u64, signedness: ?std.builtin.Signedness, is_vector: bool };
+ var parts: [Value.max_parts]Part = undefined;
+ var parts_len: Value.PartsLen = 0;
+ var field_end: u64 = 0;
+ var field_it = loaded_struct.iterateRuntimeOrder(ip);
+ while (field_it.next()) |field_index| {
+ const field_ty: ZigType = .fromInterned(loaded_struct.field_types.get(ip)[field_index]);
+ const field_begin = switch (loaded_struct.fieldAlign(ip, field_index)) {
+ .none => field_ty.abiAlignment(zcu),
+ else => |field_align| field_align,
+ }.forward(field_end);
+ if (field_begin >= offset + size) break;
+ const field_size = field_ty.abiSize(zcu);
+ field_end = field_begin + field_size;
+ if (field_end <= offset) continue;
+ if (offset >= field_begin and offset + size <= field_begin + field_size) {
+ ty = field_ty;
+ ty_size = field_size;
+ offset -= field_begin;
+ continue :type_key ip.indexToKey(field_ty.toIntern());
+ }
+ const field_signedness = if (field_ty.isAbiInt(zcu)) field_signedness: {
+ const field_int_info = field_ty.intInfo(zcu);
+ break :field_signedness if (field_int_info.bits <= 16) field_int_info.signedness else null;
+ } else null;
+ const field_is_vector = field_size <= 16 and
+ CallAbiIterator.homogeneousAggregateBaseType(zcu, field_ty.toIntern()) != null;
+ if (parts_len > 0) combine: {
+ const prev_part = &parts[parts_len - 1];
+ const combined_size = field_end - prev_part.offset;
+ if (combined_size > @as(u64, 1) << @min(
+ min_part_log2_stride,
+ alignment.toLog2Units(),
+ @ctz(prev_part.offset),
+ )) break :combine;
+ prev_part.size = combined_size;
+ prev_part.signedness = null;
+ prev_part.is_vector &= field_is_vector;
+ continue;
+ }
+ parts[parts_len] = .{
+ .offset = field_begin,
+ .size = field_size,
+ .signedness = field_signedness,
+ .is_vector = field_is_vector,
+ };
+ parts_len += 1;
+ }
+ vi.setParts(isel, parts_len);
+ for (parts[0..parts_len]) |part| {
+ const subpart_vi = vi.addPart(isel, part.offset - offset, part.size);
+ if (part.signedness) |signedness| subpart_vi.setSignedness(isel, signedness);
+ if (part.is_vector) subpart_vi.setIsVector(isel);
+ }
+ },
+ .tuple_type => |tuple_type| {
+ const min_part_log2_stride: u5 = if (size > 16) 4 else if (size > 8) 3 else 0;
+ if (tuple_type.types.len > Value.max_parts and
+ (std.math.divCeil(u64, size, @as(u64, 1) << min_part_log2_stride) catch unreachable) > Value.max_parts)
+ return isel.fail("Value.FieldPartIterator.next({f})", .{isel.fmtType(ty)});
+ const alignment = vi.alignment(isel);
+ const Part = struct { offset: u64, size: u64, is_vector: bool };
+ var parts: [Value.max_parts]Part = undefined;
+ var parts_len: Value.PartsLen = 0;
+ var field_end: u64 = 0;
+ for (tuple_type.types.get(ip), tuple_type.values.get(ip)) |field_type, field_value| {
+ if (field_value != .none) continue;
+ const field_ty: ZigType = .fromInterned(field_type);
+ const field_begin = field_ty.abiAlignment(zcu).forward(field_end);
+ if (field_begin >= offset + size) break;
+ const field_size = field_ty.abiSize(zcu);
+ if (field_size == 0) continue;
+ field_end = field_begin + field_size;
+ if (field_end <= offset) continue;
+ if (offset >= field_begin and offset + size <= field_begin + field_size) {
+ ty = field_ty;
+ ty_size = field_size;
+ offset -= field_begin;
+ continue :type_key ip.indexToKey(field_ty.toIntern());
+ }
+ const field_is_vector = field_size <= 16 and
+ CallAbiIterator.homogeneousAggregateBaseType(zcu, field_ty.toIntern()) != null;
+ if (parts_len > 0) combine: {
+ const prev_part = &parts[parts_len - 1];
+ const combined_size = field_end - prev_part.offset;
+ if (combined_size > @as(u64, 1) << @min(
+ min_part_log2_stride,
+ alignment.toLog2Units(),
+ @ctz(prev_part.offset),
+ )) break :combine;
+ prev_part.size = combined_size;
+ prev_part.is_vector &= field_is_vector;
+ continue;
+ }
+ parts[parts_len] = .{ .offset = field_begin, .size = field_size, .is_vector = field_is_vector };
+ parts_len += 1;
+ }
+ vi.setParts(isel, parts_len);
+ for (parts[0..parts_len]) |part| {
+ const subpart_vi = vi.addPart(isel, part.offset - offset, part.size);
+ if (part.is_vector) subpart_vi.setIsVector(isel);
+ }
+ },
+ .opaque_type, .func_type => continue :type_key .{ .simple_type = .anyopaque },
+ .enum_type => continue :type_key ip.indexToKey(ip.loadEnumType(ty.toIntern()).tag_ty),
+ .error_set_type,
+ .inferred_error_set_type,
+ => continue :type_key .{ .simple_type = .anyerror },
+ .undef,
+ .simple_value,
+ .variable,
+ .@"extern",
+ .func,
+ .int,
+ .err,
+ .error_union,
+ .enum_literal,
+ .enum_tag,
+ .empty_enum_value,
+ .float,
+ .ptr,
+ .slice,
+ .opt,
+ .aggregate,
+ .un,
+ .memoized_call,
+ => unreachable, // values, not types
+ }
+ }
+ it.next_offset = next_offset + size;
+ return .{ .offset = next_part_offset - next_offset, .vi = vi };
+ }
+
+ fn only(it: *FieldPartIterator, isel: *Select) !?Value.Index {
+ const part = try it.next(isel);
+ assert(part.?.offset == 0);
+ return if (try it.next(isel)) |_| null else part.?.vi;
+ }
+ };
+
+ const Materialize = struct {
+ vi: Value.Index,
+ ra: Register.Alias,
+
+ fn finish(mat: Value.Materialize, isel: *Select) error{ OutOfMemory, CodegenFail }!void {
+ const live_vi = isel.live_registers.getPtr(mat.ra);
+ assert(live_vi.* == .allocating);
+ var vi = mat.vi;
+ var offset: i65 = 0;
+ const size = mat.vi.size(isel);
+ free: while (true) {
+ if (vi.register(isel)) |ra| {
+ if (ra != mat.ra) break :free try isel.emit(if (vi == mat.vi) if (mat.ra.isVector()) switch (size) {
+ else => unreachable,
+ 2 => .fmov(mat.ra.h(), .{ .register = ra.h() }),
+ 4 => .fmov(mat.ra.s(), .{ .register = ra.s() }),
+ 8 => .fmov(mat.ra.d(), .{ .register = ra.d() }),
+ 16 => .orr(mat.ra.@"16b"(), ra.@"16b"(), .{ .register = ra.@"16b"() }),
+ } else switch (size) {
+ else => unreachable,
+ 1...4 => .orr(mat.ra.w(), .wzr, .{ .register = ra.w() }),
+ 5...8 => .orr(mat.ra.x(), .xzr, .{ .register = ra.x() }),
+ } else switch (offset + size) {
+ else => unreachable,
+ 1...4 => |end_offset| switch (mat.vi.signedness(isel)) {
+ .signed => .sbfm(mat.ra.w(), ra.w(), .{
+ .N = .word,
+ .immr = @intCast(8 * offset),
+ .imms = @intCast(8 * end_offset - 1),
+ }),
+ .unsigned => .ubfm(mat.ra.w(), ra.w(), .{
+ .N = .word,
+ .immr = @intCast(8 * offset),
+ .imms = @intCast(8 * end_offset - 1),
+ }),
+ },
+ 5...8 => |end_offset| switch (mat.vi.signedness(isel)) {
+ .signed => .sbfm(mat.ra.x(), ra.x(), .{
+ .N = .doubleword,
+ .immr = @intCast(8 * offset),
+ .imms = @intCast(8 * end_offset - 1),
+ }),
+ .unsigned => .ubfm(mat.ra.x(), ra.x(), .{
+ .N = .doubleword,
+ .immr = @intCast(8 * offset),
+ .imms = @intCast(8 * end_offset - 1),
+ }),
+ },
+ });
+ mat.vi.get(isel).location_payload.small.register = mat.ra;
+ live_vi.* = mat.vi;
+ return;
+ }
+ offset += vi.get(isel).offset_from_parent;
+ switch (vi.parent(isel)) {
+ .unallocated => {
+ mat.vi.get(isel).location_payload.small.register = mat.ra;
+ live_vi.* = mat.vi;
+ return;
+ },
+ .stack_slot => |stack_slot| {
+ offset += stack_slot.offset;
+ break :free try isel.emit(switch (size) {
+ else => unreachable,
+ 1 => if (mat.ra.isVector()) .ldr(mat.ra.b(), .{ .unsigned_offset = .{
+ .base = stack_slot.base.x(),
+ .offset = @intCast(offset),
+ } }) else switch (mat.vi.signedness(isel)) {
+ .signed => .ldrsb(mat.ra.w(), .{ .unsigned_offset = .{
+ .base = stack_slot.base.x(),
+ .offset = @intCast(offset),
+ } }),
+ .unsigned => .ldrb(mat.ra.w(), .{ .unsigned_offset = .{
+ .base = stack_slot.base.x(),
+ .offset = @intCast(offset),
+ } }),
+ },
+ 2 => if (mat.ra.isVector()) .ldr(mat.ra.h(), .{ .unsigned_offset = .{
+ .base = stack_slot.base.x(),
+ .offset = @intCast(offset),
+ } }) else switch (mat.vi.signedness(isel)) {
+ .signed => .ldrsh(mat.ra.w(), .{ .unsigned_offset = .{
+ .base = stack_slot.base.x(),
+ .offset = @intCast(offset),
+ } }),
+ .unsigned => .ldrh(mat.ra.w(), .{ .unsigned_offset = .{
+ .base = stack_slot.base.x(),
+ .offset = @intCast(offset),
+ } }),
+ },
+ 4 => .ldr(if (mat.ra.isVector()) mat.ra.s() else mat.ra.w(), .{ .unsigned_offset = .{
+ .base = stack_slot.base.x(),
+ .offset = @intCast(offset),
+ } }),
+ 8 => .ldr(if (mat.ra.isVector()) mat.ra.d() else mat.ra.x(), .{ .unsigned_offset = .{
+ .base = stack_slot.base.x(),
+ .offset = @intCast(offset),
+ } }),
+ 16 => .ldr(mat.ra.q(), .{ .unsigned_offset = .{
+ .base = stack_slot.base.x(),
+ .offset = @intCast(offset),
+ } }),
+ });
+ },
+ .address => |base_vi| {
+ const base_mat = try base_vi.matReg(isel);
+ try isel.emit(switch (size) {
+ else => unreachable,
+ 1 => if (mat.ra.isVector()) .ldr(mat.ra.b(), .{ .unsigned_offset = .{
+ .base = base_mat.ra.x(),
+ .offset = @intCast(offset),
+ } }) else switch (mat.vi.signedness(isel)) {
+ .signed => .ldrsb(mat.ra.w(), .{ .unsigned_offset = .{
+ .base = base_mat.ra.x(),
+ .offset = @intCast(offset),
+ } }),
+ .unsigned => .ldrb(mat.ra.w(), .{ .unsigned_offset = .{
+ .base = base_mat.ra.x(),
+ .offset = @intCast(offset),
+ } }),
+ },
+ 2 => if (mat.ra.isVector()) .ldr(mat.ra.h(), .{ .unsigned_offset = .{
+ .base = base_mat.ra.x(),
+ .offset = @intCast(offset),
+ } }) else switch (mat.vi.signedness(isel)) {
+ .signed => .ldrsh(mat.ra.w(), .{ .unsigned_offset = .{
+ .base = base_mat.ra.x(),
+ .offset = @intCast(offset),
+ } }),
+ .unsigned => .ldrh(mat.ra.w(), .{ .unsigned_offset = .{
+ .base = base_mat.ra.x(),
+ .offset = @intCast(offset),
+ } }),
+ },
+ 4 => .ldr(if (mat.ra.isVector()) mat.ra.s() else mat.ra.w(), .{ .unsigned_offset = .{
+ .base = base_mat.ra.x(),
+ .offset = @intCast(offset),
+ } }),
+ 8 => .ldr(if (mat.ra.isVector()) mat.ra.d() else mat.ra.x(), .{ .unsigned_offset = .{
+ .base = base_mat.ra.x(),
+ .offset = @intCast(offset),
+ } }),
+ 16 => .ldr(mat.ra.q(), .{ .unsigned_offset = .{
+ .base = base_mat.ra.x(),
+ .offset = @intCast(offset),
+ } }),
+ });
+ break :free try base_mat.finish(isel);
+ },
+ .value => |parent_vi| vi = parent_vi,
+ .constant => |initial_constant| {
+ const zcu = isel.pt.zcu;
+ const ip = &zcu.intern_pool;
+ var constant = initial_constant.toIntern();
+ var constant_key = ip.indexToKey(constant);
+ while (true) {
+ constant_key: switch (constant_key) {
+ .int_type,
+ .ptr_type,
+ .array_type,
+ .vector_type,
+ .opt_type,
+ .anyframe_type,
+ .error_union_type,
+ .simple_type,
+ .struct_type,
+ .tuple_type,
+ .union_type,
+ .opaque_type,
+ .enum_type,
+ .func_type,
+ .error_set_type,
+ .inferred_error_set_type,
+
+ .enum_literal,
+ .empty_enum_value,
+ .memoized_call,
+ => unreachable, // not a runtime value
+ .undef => break :free try isel.emit(if (mat.ra.isVector()) .movi(switch (size) {
+ else => unreachable,
+ 1...8 => mat.ra.@"8b"(),
+ 9...16 => mat.ra.@"16b"(),
+ }, 0xaa, .{ .lsl = 0 }) else switch (size) {
+ else => unreachable,
+ 1...4 => .orr(mat.ra.w(), .wzr, .{ .immediate = .{
+ .N = .word,
+ .immr = 0b000001,
+ .imms = 0b111100,
+ } }),
+ 5...8 => .orr(mat.ra.x(), .xzr, .{ .immediate = .{
+ .N = .word,
+ .immr = 0b000001,
+ .imms = 0b111100,
+ } }),
+ }),
+ .simple_value => |simple_value| switch (simple_value) {
+ .undefined, .void, .null, .empty_tuple, .@"unreachable" => unreachable,
+ .true => continue :constant_key .{ .int = .{
+ .ty = .bool_type,
+ .storage = .{ .u64 = 1 },
+ } },
+ .false => continue :constant_key .{ .int = .{
+ .ty = .bool_type,
+ .storage = .{ .u64 = 0 },
+ } },
+ },
+ .int => |int| break :free storage: switch (int.storage) {
+ .u64 => |imm| try isel.movImmediate(switch (size) {
+ else => unreachable,
+ 1...4 => mat.ra.w(),
+ 5...8 => mat.ra.x(),
+ }, @bitCast(std.math.shr(u64, imm, 8 * offset))),
+ .i64 => |imm| switch (size) {
+ else => unreachable,
+ 1...4 => try isel.movImmediate(mat.ra.w(), @as(u32, @bitCast(@as(i32, @truncate(std.math.shr(i64, imm, 8 * offset)))))),
+ 5...8 => try isel.movImmediate(mat.ra.x(), @bitCast(std.math.shr(i64, imm, 8 * offset))),
+ },
+ .big_int => |big_int| {
+ assert(size == 8);
+ var imm: u64 = 0;
+ const limb_bits = @bitSizeOf(std.math.big.Limb);
+ const limbs = @divExact(64, limb_bits);
+ var limb_index: usize = @intCast(@divExact(offset, @divExact(limb_bits, 8)) + limbs);
+ for (0..limbs) |_| {
+ limb_index -= 1;
+ if (limb_index >= big_int.limbs.len) continue;
+ if (limb_bits < 64) imm <<= limb_bits;
+ imm |= big_int.limbs[limb_index];
+ }
+ if (!big_int.positive) {
+ limb_index = @min(limb_index, big_int.limbs.len);
+ imm = while (limb_index > 0) {
+ limb_index -= 1;
+ if (big_int.limbs[limb_index] != 0) break ~imm;
+ } else -%imm;
+ }
+ try isel.movImmediate(mat.ra.x(), imm);
+ },
+ .lazy_align => |ty| continue :storage .{
+ .u64 = ZigType.fromInterned(ty).abiAlignment(zcu).toByteUnits().?,
+ },
+ .lazy_size => |ty| continue :storage .{
+ .u64 = ZigType.fromInterned(ty).abiSize(zcu),
+ },
+ },
+ .err => |err| continue :constant_key .{ .int = .{
+ .ty = err.ty,
+ .storage = .{ .u64 = ip.getErrorValueIfExists(err.name).? },
+ } },
+ .error_union => |error_union| {
+ const error_union_type = ip.indexToKey(error_union.ty).error_union_type;
+ const payload_ty: ZigType = .fromInterned(error_union_type.payload_type);
+ if (!ip.isNoReturn(error_union_type.error_set_type) and
+ offset == codegen.errUnionErrorOffset(payload_ty, zcu))
+ {
+ offset = 0;
+ continue :constant_key switch (error_union.val) {
+ .err_name => |err_name| .{ .err = .{
+ .ty = error_union_type.error_set_type,
+ .name = err_name,
+ } },
+ .payload => .{ .int = .{
+ .ty = error_union_type.error_set_type,
+ .storage = .{ .u64 = 0 },
+ } },
+ };
+ }
+ assert(payload_ty.hasRuntimeBitsIgnoreComptime(zcu));
+ offset -= @intCast(codegen.errUnionPayloadOffset(payload_ty, zcu));
+ switch (error_union.val) {
+ .err_name => continue :constant_key .{ .undef = error_union_type.payload_type },
+ .payload => |payload| {
+ constant = payload;
+ constant_key = ip.indexToKey(payload);
+ continue :constant_key constant_key;
+ },
+ }
+ },
+ .enum_tag => |enum_tag| continue :constant_key .{ .int = ip.indexToKey(enum_tag.int).int },
+ .float => |float| storage: switch (float.storage) {
+ .f16 => |imm| {
+ if (!mat.ra.isVector()) continue :constant_key .{ .int = .{
+ .ty = .u16_type,
+ .storage = .{ .u64 = @as(u16, @bitCast(imm)) },
+ } };
+ const feat_fp16 = isel.target.cpu.has(.aarch64, .fullfp16);
+ if (feat_fp16) {
+ const Repr = std.math.FloatRepr(f16);
+ const repr: Repr = @bitCast(imm);
+ if (repr.mantissa & std.math.maxInt(Repr.Mantissa) >> 5 == 0 and switch (repr.exponent) {
+ .denormal, .infinite => false,
+ else => std.math.cast(i3, repr.exponent.unbias() - 1) != null,
+ }) break :free try isel.emit(.fmov(mat.ra.h(), .{ .immediate = imm }));
+ }
+ const bits: u16 = @bitCast(imm);
+ if (bits == 0) break :free try isel.emit(.movi(mat.ra.d(), 0b00000000, .replicate));
+ if (bits & std.math.maxInt(u8) == 0) break :free try isel.emit(.movi(
+ mat.ra.@"4h"(),
+ @intCast(@shrExact(bits, 8)),
+ .{ .lsl = 8 },
+ ));
+ const temp_ra = try isel.allocIntReg();
+ defer isel.freeReg(temp_ra);
+ try isel.emit(.fmov(if (feat_fp16) mat.ra.h() else mat.ra.s(), .{ .register = temp_ra.w() }));
+ break :free try isel.movImmediate(temp_ra.w(), bits);
+ },
+ .f32 => |imm| {
+ if (!mat.ra.isVector()) continue :constant_key .{ .int = .{
+ .ty = .u32_type,
+ .storage = .{ .u64 = @as(u32, @bitCast(imm)) },
+ } };
+ const Repr = std.math.FloatRepr(f32);
+ const repr: Repr = @bitCast(imm);
+ if (repr.mantissa & std.math.maxInt(Repr.Mantissa) >> 5 == 0 and switch (repr.exponent) {
+ .denormal, .infinite => false,
+ else => std.math.cast(i3, repr.exponent.unbias() - 1) != null,
+ }) break :free try isel.emit(.fmov(mat.ra.s(), .{ .immediate = @floatCast(imm) }));
+ const bits: u32 = @bitCast(imm);
+ if (bits == 0) break :free try isel.emit(.movi(mat.ra.d(), 0b00000000, .replicate));
+ if (bits & std.math.maxInt(u24) == 0) break :free try isel.emit(.movi(
+ mat.ra.@"2s"(),
+ @intCast(@shrExact(bits, 24)),
+ .{ .lsl = 24 },
+ ));
+ const temp_ra = try isel.allocIntReg();
+ defer isel.freeReg(temp_ra);
+ try isel.emit(.fmov(mat.ra.s(), .{ .register = temp_ra.w() }));
+ break :free try isel.movImmediate(temp_ra.w(), bits);
+ },
+ .f64 => |imm| {
+ if (!mat.ra.isVector()) continue :constant_key .{ .int = .{
+ .ty = .u64_type,
+ .storage = .{ .u64 = @as(u64, @bitCast(imm)) },
+ } };
+ const Repr = std.math.FloatRepr(f64);
+ const repr: Repr = @bitCast(imm);
+ if (repr.mantissa & std.math.maxInt(Repr.Mantissa) >> 5 == 0 and switch (repr.exponent) {
+ .denormal, .infinite => false,
+ else => std.math.cast(i3, repr.exponent.unbias() - 1) != null,
+ }) break :free try isel.emit(.fmov(mat.ra.d(), .{ .immediate = @floatCast(imm) }));
+ const bits: u64 = @bitCast(imm);
+ if (bits == 0) break :free try isel.emit(.movi(mat.ra.d(), 0b00000000, .replicate));
+ const temp_ra = try isel.allocIntReg();
+ defer isel.freeReg(temp_ra);
+ try isel.emit(.fmov(mat.ra.d(), .{ .register = temp_ra.x() }));
+ break :free try isel.movImmediate(temp_ra.x(), bits);
+ },
+ .f80 => |imm| break :free try isel.movImmediate(
+ mat.ra.x(),
+ @truncate(std.math.shr(u80, @bitCast(imm), 8 * offset)),
+ ),
+ .f128 => |imm| switch (ZigType.fromInterned(float.ty).floatBits(isel.target)) {
+ else => unreachable,
+ 16 => continue :storage .{ .f16 = @floatCast(imm) },
+ 32 => continue :storage .{ .f32 = @floatCast(imm) },
+ 64 => continue :storage .{ .f64 = @floatCast(imm) },
+ 128 => {
+ const bits: u128 = @bitCast(imm);
+ const hi64: u64 = @intCast(bits >> 64);
+ const lo64: u64 = @truncate(bits >> 0);
+ const temp_ra = try isel.allocIntReg();
+ defer isel.freeReg(temp_ra);
+ switch (hi64) {
+ 0 => {},
+ else => {
+ try isel.emit(.fmov(mat.ra.@"d[]"(1), .{ .register = temp_ra.x() }));
+ try isel.movImmediate(temp_ra.x(), hi64);
+ },
+ }
+ break :free switch (lo64) {
+ 0 => try isel.emit(.movi(switch (hi64) {
+ else => mat.ra.d(),
+ 0 => mat.ra.@"2d"(),
+ }, 0b00000000, .replicate)),
+ else => {
+ try isel.emit(.fmov(mat.ra.d(), .{ .register = temp_ra.x() }));
+ try isel.movImmediate(temp_ra.x(), lo64);
+ },
+ };
+ },
+ },
+ },
+ .ptr => |ptr| {
+ assert(offset == 0 and size == 8);
+ break :free switch (ptr.base_addr) {
+ .nav => |nav| if (ZigType.fromInterned(ip.getNav(nav).typeOf(ip)).isFnOrHasRuntimeBits(zcu)) switch (true) {
+ false => {
+ try isel.nav_relocs.append(zcu.gpa, .{
+ .nav = nav,
+ .reloc = .{
+ .label = @intCast(isel.instructions.items.len),
+ .addend = ptr.byte_offset,
+ },
+ });
+ try isel.emit(.adr(mat.ra.x(), 0));
+ },
+ true => {
+ try isel.nav_relocs.append(zcu.gpa, .{
+ .nav = nav,
+ .reloc = .{
+ .label = @intCast(isel.instructions.items.len),
+ .addend = ptr.byte_offset,
+ },
+ });
+ try isel.emit(.add(mat.ra.x(), mat.ra.x(), .{ .immediate = 0 }));
+ try isel.nav_relocs.append(zcu.gpa, .{
+ .nav = nav,
+ .reloc = .{
+ .label = @intCast(isel.instructions.items.len),
+ .addend = ptr.byte_offset,
+ },
+ });
+ try isel.emit(.adrp(mat.ra.x(), 0));
+ },
+ } else continue :constant_key .{ .int = .{
+ .ty = .usize_type,
+ .storage = .{ .u64 = isel.pt.navAlignment(nav).forward(0xaaaaaaaaaaaaaaaa) },
+ } },
+ .uav => |uav| if (ZigType.fromInterned(ip.typeOf(uav.val)).isFnOrHasRuntimeBits(zcu)) switch (true) {
+ false => {
+ try isel.uav_relocs.append(zcu.gpa, .{
+ .uav = uav,
+ .reloc = .{
+ .label = @intCast(isel.instructions.items.len),
+ .addend = ptr.byte_offset,
+ },
+ });
+ try isel.emit(.adr(mat.ra.x(), 0));
+ },
+ true => {
+ try isel.uav_relocs.append(zcu.gpa, .{
+ .uav = uav,
+ .reloc = .{
+ .label = @intCast(isel.instructions.items.len),
+ .addend = ptr.byte_offset,
+ },
+ });
+ try isel.emit(.add(mat.ra.x(), mat.ra.x(), .{ .immediate = 0 }));
+ try isel.uav_relocs.append(zcu.gpa, .{
+ .uav = uav,
+ .reloc = .{
+ .label = @intCast(isel.instructions.items.len),
+ .addend = ptr.byte_offset,
+ },
+ });
+ try isel.emit(.adrp(mat.ra.x(), 0));
+ },
+ } else continue :constant_key .{ .int = .{
+ .ty = .usize_type,
+ .storage = .{ .u64 = ZigType.fromInterned(uav.orig_ty).ptrAlignment(zcu).forward(0xaaaaaaaaaaaaaaaa) },
+ } },
+ .int => continue :constant_key .{ .int = .{
+ .ty = .usize_type,
+ .storage = .{ .u64 = ptr.byte_offset },
+ } },
+ .eu_payload => |base| {
+ var base_ptr = ip.indexToKey(base).ptr;
+ const eu_ty = ip.indexToKey(base_ptr.ty).ptr_type.child;
+ const payload_ty = ip.indexToKey(eu_ty).error_union_type.payload_type;
+ base_ptr.byte_offset += codegen.errUnionPayloadOffset(.fromInterned(payload_ty), zcu);
+ continue :constant_key .{ .ptr = base_ptr };
+ },
+ .opt_payload => |base| continue :constant_key .{ .ptr = ip.indexToKey(base).ptr },
+ .field => |field| {
+ var base_ptr = ip.indexToKey(field.base).ptr;
+ const agg_ty: ZigType = .fromInterned(ip.indexToKey(base_ptr.ty).ptr_type.child);
+ base_ptr.byte_offset += agg_ty.structFieldOffset(@intCast(field.index), zcu);
+ continue :constant_key .{ .ptr = base_ptr };
+ },
+ .comptime_alloc, .comptime_field, .arr_elem => unreachable,
+ };
+ },
+ .slice => |slice| switch (offset) {
+ 0 => continue :constant_key .{ .ptr = ip.indexToKey(slice.ptr).ptr },
+ else => {
+ assert(offset == @divExact(isel.target.ptrBitWidth(), 8));
+ offset = 0;
+ continue :constant_key .{ .int = ip.indexToKey(slice.len).int };
+ },
+ },
+ .opt => |opt| {
+ const child_ty = ip.indexToKey(opt.ty).opt_type;
+ const child_size = ZigType.fromInterned(child_ty).abiSize(zcu);
+ if (offset == child_size and size == 1) {
+ offset = 0;
+ continue :constant_key .{ .simple_value = switch (opt.val) {
+ .none => .false,
+ else => .true,
+ } };
+ }
+ const opt_ty: ZigType = .fromInterned(opt.ty);
+ if (offset + size <= child_size) continue :constant_key switch (opt.val) {
+ .none => if (opt_ty.optionalReprIsPayload(zcu)) .{ .int = .{
+ .ty = opt.ty,
+ .storage = .{ .u64 = 0 },
+ } } else .{ .undef = child_ty },
+ else => |child| {
+ constant = child;
+ constant_key = ip.indexToKey(child);
+ continue :constant_key constant_key;
+ },
+ };
+ },
+ .aggregate => |aggregate| switch (ip.indexToKey(aggregate.ty)) {
+ else => unreachable,
+ .array_type => |array_type| {
+ const elem_size = ZigType.fromInterned(array_type.child).abiSize(zcu);
+ const elem_offset = @mod(offset, elem_size);
+ if (size <= elem_size - elem_offset) {
+ defer offset = elem_offset;
+ continue :constant_key switch (aggregate.storage) {
+ .bytes => |bytes| .{ .int = .{ .ty = .u8_type, .storage = .{
+ .u64 = bytes.toSlice(array_type.lenIncludingSentinel(), ip)[@intCast(@divFloor(offset, elem_size))],
+ } } },
+ .elems => |elems| {
+ constant = elems[@intCast(@divFloor(offset, elem_size))];
+ constant_key = ip.indexToKey(constant);
+ continue :constant_key constant_key;
+ },
+ .repeated_elem => |repeated_elem| {
+ constant = repeated_elem;
+ constant_key = ip.indexToKey(repeated_elem);
+ continue :constant_key constant_key;
+ },
+ };
+ }
+ },
+ .vector_type => {},
+ .struct_type => {
+ const loaded_struct = ip.loadStructType(aggregate.ty);
+ switch (loaded_struct.layout) {
+ .auto => {
+ var field_offset: u64 = 0;
+ var field_it = loaded_struct.iterateRuntimeOrder(ip);
+ while (field_it.next()) |field_index| {
+ if (loaded_struct.fieldIsComptime(ip, field_index)) continue;
+ const field_ty: ZigType = .fromInterned(loaded_struct.field_types.get(ip)[field_index]);
+ field_offset = field_ty.structFieldAlignment(
+ loaded_struct.fieldAlign(ip, field_index),
+ loaded_struct.layout,
+ zcu,
+ ).forward(field_offset);
+ const field_size = field_ty.abiSize(zcu);
+ if (offset >= field_offset and offset + size <= field_offset + field_size) {
+ offset -= field_offset;
+ constant = switch (aggregate.storage) {
+ .bytes => unreachable,
+ .elems => |elems| elems[field_index],
+ .repeated_elem => |repeated_elem| repeated_elem,
+ };
+ constant_key = ip.indexToKey(constant);
+ continue :constant_key constant_key;
+ }
+ field_offset += field_size;
+ }
+ },
+ .@"extern", .@"packed" => {},
+ }
+ },
+ .tuple_type => |tuple_type| {
+ var field_offset: u64 = 0;
+ for (tuple_type.types.get(ip), tuple_type.values.get(ip), 0..) |field_type, field_value, field_index| {
+ if (field_value != .none) continue;
+ const field_ty: ZigType = .fromInterned(field_type);
+ field_offset = field_ty.abiAlignment(zcu).forward(field_offset);
+ const field_size = field_ty.abiSize(zcu);
+ if (offset >= field_offset and offset + size <= field_offset + field_size) {
+ offset -= field_offset;
+ constant = switch (aggregate.storage) {
+ .bytes => unreachable,
+ .elems => |elems| elems[field_index],
+ .repeated_elem => |repeated_elem| repeated_elem,
+ };
+ constant_key = ip.indexToKey(constant);
+ continue :constant_key constant_key;
+ }
+ field_offset += field_size;
+ }
+ },
+ },
+ else => {},
+ }
+ var buffer: [16]u8 = @splat(0);
+ if (ZigType.fromInterned(constant_key.typeOf()).abiSize(zcu) <= buffer.len and
+ try isel.writeToMemory(.fromInterned(constant), &buffer))
+ {
+ constant_key = if (mat.ra.isVector()) .{ .float = switch (size) {
+ else => unreachable,
+ 2 => .{ .ty = .f16_type, .storage = .{ .f16 = @bitCast(std.mem.readInt(
+ u16,
+ buffer[@intCast(offset)..][0..2],
+ isel.target.cpu.arch.endian(),
+ )) } },
+ 4 => .{ .ty = .f32_type, .storage = .{ .f32 = @bitCast(std.mem.readInt(
+ u32,
+ buffer[@intCast(offset)..][0..4],
+ isel.target.cpu.arch.endian(),
+ )) } },
+ 8 => .{ .ty = .f64_type, .storage = .{ .f64 = @bitCast(std.mem.readInt(
+ u64,
+ buffer[@intCast(offset)..][0..8],
+ isel.target.cpu.arch.endian(),
+ )) } },
+ 16 => .{ .ty = .f128_type, .storage = .{ .f128 = @bitCast(std.mem.readInt(
+ u128,
+ buffer[@intCast(offset)..][0..16],
+ isel.target.cpu.arch.endian(),
+ )) } },
+ } } else .{ .int = .{
+ .ty = .u64_type,
+ .storage = .{ .u64 = switch (size) {
+ else => unreachable,
+ inline 1...8 => |ct_size| std.mem.readInt(
+ @Type(.{ .int = .{ .signedness = .unsigned, .bits = 8 * ct_size } }),
+ buffer[@intCast(offset)..][0..ct_size],
+ isel.target.cpu.arch.endian(),
+ ),
+ } },
+ } };
+ offset = 0;
+ continue;
+ }
+ return isel.fail("unsupported value <{f}, {f}>", .{
+ isel.fmtType(.fromInterned(constant_key.typeOf())),
+ isel.fmtConstant(.fromInterned(constant)),
+ });
+ }
+ },
+ }
+ }
+ live_vi.* = .free;
+ }
+ };
+};
+fn initValue(isel: *Select, ty: ZigType) Value.Index {
+ const zcu = isel.pt.zcu;
+ return isel.initValueAdvanced(ty.abiAlignment(zcu), 0, ty.abiSize(zcu));
+}
+fn initValueAdvanced(
+ isel: *Select,
+ parent_alignment: InternPool.Alignment,
+ offset_from_parent: u64,
+ size: u64,
+) Value.Index {
+ defer isel.values.addOneAssumeCapacity().* = .{
+ .refs = 0,
+ .flags = .{
+ .alignment = .fromLog2Units(@min(parent_alignment.toLog2Units(), @ctz(offset_from_parent))),
+ .parent_tag = .unallocated,
+ .location_tag = if (size > 16) .large else .small,
+ .parts_len_minus_one = 0,
+ },
+ .offset_from_parent = offset_from_parent,
+ .parent_payload = .{ .unallocated = {} },
+ .location_payload = if (size > 16) .{ .large = .{
+ .size = size,
+ } } else .{ .small = .{
+ .size = @intCast(size),
+ .signedness = .unsigned,
+ .is_vector = false,
+ .hint = .zr,
+ .register = .zr,
+ } },
+ .parts = undefined,
+ };
+ return @enumFromInt(isel.values.items.len);
+}
+pub fn dumpValues(isel: *Select, which: enum { only_referenced, all }) void {
+ errdefer |err| @panic(@errorName(err));
+ const stderr = std.debug.lockStderrWriter(&.{});
+ defer std.debug.unlockStderrWriter();
+
+ const zcu = isel.pt.zcu;
+ const gpa = zcu.gpa;
+ const ip = &zcu.intern_pool;
+ const nav = ip.getNav(isel.nav_index);
+
+ var reverse_live_values: std.AutoArrayHashMapUnmanaged(Value.Index, std.ArrayListUnmanaged(Air.Inst.Index)) = .empty;
+ defer {
+ for (reverse_live_values.values()) |*list| list.deinit(gpa);
+ reverse_live_values.deinit(gpa);
+ }
+ {
+ try reverse_live_values.ensureTotalCapacity(gpa, isel.live_values.count());
+ var live_val_it = isel.live_values.iterator();
+ while (live_val_it.next()) |live_val_entry| switch (live_val_entry.value_ptr.*) {
+ _ => {
+ const gop = reverse_live_values.getOrPutAssumeCapacity(live_val_entry.value_ptr.*);
+ if (!gop.found_existing) gop.value_ptr.* = .empty;
+ try gop.value_ptr.append(gpa, live_val_entry.key_ptr.*);
+ },
+ .allocating, .free => unreachable,
+ };
+ }
+
+ var reverse_live_registers: std.AutoHashMapUnmanaged(Value.Index, Register.Alias) = .empty;
+ defer reverse_live_registers.deinit(gpa);
+ {
+ try reverse_live_registers.ensureTotalCapacity(gpa, @typeInfo(Register.Alias).@"enum".fields.len);
+ var live_reg_it = isel.live_registers.iterator();
+ while (live_reg_it.next()) |live_reg_entry| switch (live_reg_entry.value.*) {
+ _ => reverse_live_registers.putAssumeCapacityNoClobber(live_reg_entry.value.*, live_reg_entry.key),
+ .allocating, .free => {},
+ };
+ }
+
+ var roots: std.AutoArrayHashMapUnmanaged(Value.Index, u32) = .empty;
+ defer roots.deinit(gpa);
+ {
+ try roots.ensureTotalCapacity(gpa, isel.values.items.len);
+ var vi: Value.Index = @enumFromInt(isel.values.items.len);
+ while (@intFromEnum(vi) > 0) {
+ vi = @enumFromInt(@intFromEnum(vi) - 1);
+ if (which == .only_referenced and vi.get(isel).refs == 0) continue;
+ while (true) switch (vi.parent(isel)) {
+ .unallocated, .stack_slot, .constant => break,
+ .value => |parent_vi| vi = parent_vi,
+ .address => |address_vi| break roots.putAssumeCapacity(address_vi, 0),
+ };
+ roots.putAssumeCapacity(vi, 0);
+ }
+ }
+
+ try stderr.print("# Begin {s} Value Dump: {f}:\n", .{ @typeName(Select), nav.fqn.fmt(ip) });
+ while (roots.pop()) |root_entry| {
+ const vi = root_entry.key;
+ const value = vi.get(isel);
+ try stderr.splatByteAll(' ', 2 * (@as(usize, 1) + root_entry.value));
+ try stderr.print("${d}", .{@intFromEnum(vi)});
+ {
+ var first = true;
+ if (reverse_live_values.get(vi)) |aiis| for (aiis.items) |aii| {
+ if (aii == Block.main) {
+ try stderr.print("{s}%main", .{if (first) " <- " else ", "});
+ } else {
+ try stderr.print("{s}%{d}", .{ if (first) " <- " else ", ", @intFromEnum(aii) });
+ }
+ first = false;
+ };
+ if (reverse_live_registers.get(vi)) |ra| {
+ try stderr.print("{s}{s}", .{ if (first) " <- " else ", ", @tagName(ra) });
+ first = false;
+ }
+ }
+ try stderr.writeByte(':');
+ switch (value.flags.parent_tag) {
+ .unallocated => if (value.offset_from_parent != 0) try stderr.print(" +0x{x}", .{value.offset_from_parent}),
+ .stack_slot => {
+ try stderr.print(" [{s}, #{s}0x{x}", .{
+ @tagName(value.parent_payload.stack_slot.base),
+ if (value.parent_payload.stack_slot.offset < 0) "-" else "",
+ @abs(value.parent_payload.stack_slot.offset),
+ });
+ if (value.offset_from_parent != 0) try stderr.print("+0x{x}", .{value.offset_from_parent});
+ try stderr.writeByte(']');
+ },
+ .value => try stderr.print(" ${d}+0x{x}", .{ @intFromEnum(value.parent_payload.value), value.offset_from_parent }),
+ .address => try stderr.print(" ${d}[0x{x}]", .{ @intFromEnum(value.parent_payload.address), value.offset_from_parent }),
+ .constant => try stderr.print(" <{f}, {f}>", .{
+ isel.fmtType(value.parent_payload.constant.typeOf(zcu)),
+ isel.fmtConstant(value.parent_payload.constant),
+ }),
+ }
+ try stderr.print(" align({s})", .{@tagName(value.flags.alignment)});
+ switch (value.flags.location_tag) {
+ .large => try stderr.print(" size=0x{x} large", .{value.location_payload.large.size}),
+ .small => {
+ const loc = value.location_payload.small;
+ try stderr.print(" size=0x{x}", .{loc.size});
+ switch (loc.signedness) {
+ .unsigned => {},
+ .signed => try stderr.writeAll(" signed"),
+ }
+ if (loc.hint != .zr) try stderr.print(" hint={s}", .{@tagName(loc.hint)});
+ if (loc.register != .zr) try stderr.print(" loc={s}", .{@tagName(loc.register)});
+ },
+ }
+ try stderr.print(" refs={d}\n", .{value.refs});
+
+ var part_index = value.flags.parts_len_minus_one;
+ if (part_index > 0) while (true) : (part_index -= 1) {
+ roots.putAssumeCapacityNoClobber(
+ @enumFromInt(@intFromEnum(value.parts) + part_index),
+ root_entry.value + 1,
+ );
+ if (part_index == 0) break;
+ };
+ }
+ try stderr.print("# End {s} Value Dump: {f}\n\n", .{ @typeName(Select), nav.fqn.fmt(ip) });
+}
+
+fn hasRepeatedByteRepr(isel: *Select, constant: Constant) error{OutOfMemory}!?u8 {
+ const zcu = isel.pt.zcu;
+ const ty = constant.typeOf(zcu);
+ const abi_size = std.math.cast(usize, ty.abiSize(zcu)) orelse return null;
+ const byte_buffer = try zcu.gpa.alloc(u8, abi_size);
+ defer zcu.gpa.free(byte_buffer);
+ return if (try isel.writeToMemory(constant, byte_buffer) and
+ std.mem.allEqual(u8, byte_buffer[1..], byte_buffer[0])) byte_buffer[0] else null;
+}
+
+fn writeToMemory(isel: *Select, constant: Constant, buffer: []u8) error{OutOfMemory}!bool {
+ const zcu = isel.pt.zcu;
+ const ip = &zcu.intern_pool;
+ switch (ip.indexToKey(constant.toIntern())) {
+ .int_type,
+ .ptr_type,
+ .array_type,
+ .vector_type,
+ .opt_type,
+ .anyframe_type,
+ .error_union_type,
+ .simple_type,
+ .struct_type,
+ .tuple_type,
+ .union_type,
+ .opaque_type,
+ .enum_type,
+ .func_type,
+ .error_set_type,
+ .inferred_error_set_type,
+
+ .enum_literal,
+ .empty_enum_value,
+ .memoized_call,
+ => unreachable, // not a runtime value
+ .opt => |opt| {
+ const child_size: usize = @intCast(ZigType.fromInterned(ip.indexToKey(opt.ty).opt_type).abiSize(zcu));
+ switch (opt.val) {
+ .none => if (!ZigType.fromInterned(opt.ty).optionalReprIsPayload(zcu)) {
+ buffer[child_size] = @intFromBool(false);
+ } else @memset(buffer[0..child_size], 0x00),
+ else => |child_constant| {
+ if (!try isel.writeToMemory(.fromInterned(child_constant), buffer[0..child_size])) return false;
+ if (!ZigType.fromInterned(opt.ty).optionalReprIsPayload(zcu)) buffer[child_size] = @intFromBool(true);
+ },
+ }
+ return true;
+ },
+ .aggregate => |aggregate| switch (ip.indexToKey(aggregate.ty)) {
+ else => unreachable,
+ .array_type => |array_type| {
+ var elem_offset: usize = 0;
+ const elem_size: usize = @intCast(ZigType.fromInterned(array_type.child).abiSize(zcu));
+ const len_including_sentinel: usize = @intCast(array_type.lenIncludingSentinel());
+ switch (aggregate.storage) {
+ .bytes => |bytes| @memcpy(buffer[0..len_including_sentinel], bytes.toSlice(len_including_sentinel, ip)),
+ .elems => |elems| for (elems) |elem| {
+ if (!try isel.writeToMemory(.fromInterned(elem), buffer[elem_offset..][0..elem_size])) return false;
+ elem_offset += elem_size;
+ },
+ .repeated_elem => |repeated_elem| for (0..len_including_sentinel) |_| {
+ if (!try isel.writeToMemory(.fromInterned(repeated_elem), buffer[elem_offset..][0..elem_size])) return false;
+ elem_offset += elem_size;
+ },
+ }
+ return true;
+ },
+ .vector_type => {},
+ .struct_type => {
+ const loaded_struct = ip.loadStructType(aggregate.ty);
+ switch (loaded_struct.layout) {
+ .auto => {
+ var field_offset: u64 = 0;
+ var field_it = loaded_struct.iterateRuntimeOrder(ip);
+ while (field_it.next()) |field_index| {
+ if (loaded_struct.fieldIsComptime(ip, field_index)) continue;
+ const field_ty: ZigType = .fromInterned(loaded_struct.field_types.get(ip)[field_index]);
+ field_offset = field_ty.structFieldAlignment(
+ loaded_struct.fieldAlign(ip, field_index),
+ loaded_struct.layout,
+ zcu,
+ ).forward(field_offset);
+ const field_size = field_ty.abiSize(zcu);
+ if (!try isel.writeToMemory(.fromInterned(switch (aggregate.storage) {
+ .bytes => unreachable,
+ .elems => |elems| elems[field_index],
+ .repeated_elem => |repeated_elem| repeated_elem,
+ }), buffer[@intCast(field_offset)..][0..@intCast(field_size)])) return false;
+ field_offset += field_size;
+ }
+ return true;
+ },
+ .@"extern", .@"packed" => {},
+ }
+ },
+ .tuple_type => |tuple_type| {
+ var field_offset: u64 = 0;
+ for (tuple_type.types.get(ip), tuple_type.values.get(ip), 0..) |field_type, field_value, field_index| {
+ if (field_value != .none) continue;
+ const field_ty: ZigType = .fromInterned(field_type);
+ field_offset = field_ty.abiAlignment(zcu).forward(field_offset);
+ const field_size = field_ty.abiSize(zcu);
+ if (!try isel.writeToMemory(.fromInterned(switch (aggregate.storage) {
+ .bytes => unreachable,
+ .elems => |elems| elems[field_index],
+ .repeated_elem => |repeated_elem| repeated_elem,
+ }), buffer[@intCast(field_offset)..][0..@intCast(field_size)])) return false;
+ field_offset += field_size;
+ }
+ return true;
+ },
+ },
+ else => {},
+ }
+ constant.writeToMemory(isel.pt, buffer) catch |err| switch (err) {
+ error.OutOfMemory => return error.OutOfMemory,
+ error.ReinterpretDeclRef, error.Unimplemented, error.IllDefinedMemoryLayout => return false,
+ };
+ return true;
+}
+
+const TryAllocRegResult = union(enum) {
+ allocated: Register.Alias,
+ fill_candidate: Register.Alias,
+ out_of_registers,
+};
+
+fn tryAllocIntReg(isel: *Select) TryAllocRegResult {
+ var failed_result: TryAllocRegResult = .out_of_registers;
+ var ra: Register.Alias = .r0;
+ while (true) : (ra = @enumFromInt(@intFromEnum(ra) + 1)) {
+ if (ra == .r18) continue; // The Platform Register
+ if (ra == Register.Alias.fp) continue;
+ const live_vi = isel.live_registers.getPtr(ra);
+ switch (live_vi.*) {
+ _ => switch (failed_result) {
+ .allocated => unreachable,
+ .fill_candidate => {},
+ .out_of_registers => failed_result = .{ .fill_candidate = ra },
+ },
+ .allocating => {},
+ .free => {
+ live_vi.* = .allocating;
+ isel.saved_registers.insert(ra);
+ return .{ .allocated = ra };
+ },
+ }
+ if (ra == Register.Alias.lr) return failed_result;
+ }
+}
+
+fn allocIntReg(isel: *Select) !Register.Alias {
+ switch (isel.tryAllocIntReg()) {
+ .allocated => |ra| return ra,
+ .fill_candidate => |ra| {
+ assert(try isel.fillMemory(ra));
+ const live_vi = isel.live_registers.getPtr(ra);
+ assert(live_vi.* == .free);
+ live_vi.* = .allocating;
+ return ra;
+ },
+ .out_of_registers => return isel.fail("ran out of registers", .{}),
+ }
+}
+
+fn tryAllocVecReg(isel: *Select) TryAllocRegResult {
+ var failed_result: TryAllocRegResult = .out_of_registers;
+ var ra: Register.Alias = .v0;
+ while (true) : (ra = @enumFromInt(@intFromEnum(ra) + 1)) {
+ const live_vi = isel.live_registers.getPtr(ra);
+ switch (live_vi.*) {
+ _ => switch (failed_result) {
+ .allocated => unreachable,
+ .fill_candidate => {},
+ .out_of_registers => failed_result = .{ .fill_candidate = ra },
+ },
+ .allocating => {},
+ .free => {
+ live_vi.* = .allocating;
+ isel.saved_registers.insert(ra);
+ return .{ .allocated = ra };
+ },
+ }
+ if (ra == Register.Alias.v31) return failed_result;
+ }
+}
+
+fn allocVecReg(isel: *Select) !Register.Alias {
+ switch (isel.tryAllocVecReg()) {
+ .allocated => |ra| return ra,
+ .fill_candidate => |ra| {
+ assert(try isel.fillMemory(ra));
+ return ra;
+ },
+ .out_of_registers => return isel.fail("ran out of registers", .{}),
+ }
+}
+
+const RegLock = struct {
+ ra: Register.Alias,
+ const empty: RegLock = .{ .ra = .zr };
+ fn unlock(lock: RegLock, isel: *Select) void {
+ switch (lock.ra) {
+ else => |ra| isel.freeReg(ra),
+ .zr => {},
+ }
+ }
+};
+fn lockReg(isel: *Select, ra: Register.Alias) RegLock {
+ assert(ra != .zr);
+ const live_vi = isel.live_registers.getPtr(ra);
+ assert(live_vi.* == .free);
+ live_vi.* = .allocating;
+ return .{ .ra = ra };
+}
+fn tryLockReg(isel: *Select, ra: Register.Alias) RegLock {
+ assert(ra != .zr);
+ const live_vi = isel.live_registers.getPtr(ra);
+ switch (live_vi.*) {
+ _ => unreachable,
+ .allocating => return .{ .ra = .zr },
+ .free => {
+ live_vi.* = .allocating;
+ return .{ .ra = ra };
+ },
+ }
+}
+
+fn freeReg(isel: *Select, ra: Register.Alias) void {
+ assert(ra != .zr);
+ const live_vi = isel.live_registers.getPtr(ra);
+ assert(live_vi.* == .allocating);
+ live_vi.* = .free;
+}
+
+fn use(isel: *Select, air_ref: Air.Inst.Ref) !Value.Index {
+ const zcu = isel.pt.zcu;
+ const ip = &zcu.intern_pool;
+ try isel.values.ensureUnusedCapacity(zcu.gpa, 1);
+ const vi, const ty = if (air_ref.toIndex()) |air_inst_index| vi_ty: {
+ const live_gop = try isel.live_values.getOrPut(zcu.gpa, air_inst_index);
+ if (live_gop.found_existing) return live_gop.value_ptr.*;
+ const ty = isel.air.typeOf(air_ref, ip);
+ const vi = isel.initValue(ty);
+ tracking_log.debug("${d} <- %{d}", .{
+ @intFromEnum(vi),
+ @intFromEnum(air_inst_index),
+ });
+ live_gop.value_ptr.* = vi.ref(isel);
+ break :vi_ty .{ vi, ty };
+ } else vi_ty: {
+ const constant: Constant = .fromInterned(air_ref.toInterned().?);
+ const ty = constant.typeOf(zcu);
+ const vi = isel.initValue(ty);
+ tracking_log.debug("${d} <- <{f}, {f}>", .{
+ @intFromEnum(vi),
+ isel.fmtType(ty),
+ isel.fmtConstant(constant),
+ });
+ vi.setParent(isel, .{ .constant = constant });
+ break :vi_ty .{ vi, ty };
+ };
+ if (ty.isAbiInt(zcu)) {
+ const int_info = ty.intInfo(zcu);
+ if (int_info.bits <= 16) vi.setSignedness(isel, int_info.signedness);
+ } else if (vi.size(isel) <= 16 and
+ CallAbiIterator.homogeneousAggregateBaseType(zcu, ty.toIntern()) != null) vi.setIsVector(isel);
+ return vi;
+}
+
+fn fill(isel: *Select, dst_ra: Register.Alias) error{ OutOfMemory, CodegenFail }!bool {
+ switch (dst_ra) {
+ else => {},
+ Register.Alias.fp, .zr, .sp, .pc, .fpcr, .fpsr, .ffr => return false,
+ }
+ const dst_live_vi = isel.live_registers.getPtr(dst_ra);
+ const dst_vi = switch (dst_live_vi.*) {
+ _ => |dst_vi| dst_vi,
+ .allocating => return false,
+ .free => return true,
+ };
+ const src_ra = src_ra: {
+ if (dst_vi.hint(isel)) |hint_ra| {
+ assert(dst_live_vi.* == dst_vi);
+ dst_live_vi.* = .allocating;
+ defer dst_live_vi.* = dst_vi;
+ if (try isel.fill(hint_ra)) {
+ isel.saved_registers.insert(hint_ra);
+ break :src_ra hint_ra;
+ }
+ }
+ switch (if (dst_vi.isVector(isel)) isel.tryAllocVecReg() else isel.tryAllocIntReg()) {
+ .allocated => |ra| break :src_ra ra,
+ .fill_candidate, .out_of_registers => return isel.fillMemory(dst_ra),
+ }
+ };
+ try dst_vi.liveIn(isel, src_ra, comptime &.initFill(.free));
+ const src_live_vi = isel.live_registers.getPtr(src_ra);
+ assert(src_live_vi.* == .allocating);
+ src_live_vi.* = dst_vi;
+ return true;
+}
+
+fn fillMemory(isel: *Select, dst_ra: Register.Alias) error{ OutOfMemory, CodegenFail }!bool {
+ const dst_live_vi = isel.live_registers.getPtr(dst_ra);
+ const dst_vi = switch (dst_live_vi.*) {
+ _ => |dst_vi| dst_vi,
+ .allocating => return false,
+ .free => return true,
+ };
+ const dst_vi_ra = &dst_vi.get(isel).location_payload.small.register;
+ assert(dst_vi_ra.* == dst_ra);
+ const base_ra = if (dst_ra.isVector()) try isel.allocIntReg() else dst_ra;
+ defer if (base_ra != dst_ra) isel.freeReg(base_ra);
+ try isel.emit(switch (dst_vi.size(isel)) {
+ else => unreachable,
+ 1 => if (dst_ra.isVector())
+ .ldr(dst_ra.b(), .{ .base = base_ra.x() })
+ else switch (dst_vi.signedness(isel)) {
+ .signed => .ldrsb(dst_ra.w(), .{ .base = base_ra.x() }),
+ .unsigned => .ldrb(dst_ra.w(), .{ .base = base_ra.x() }),
+ },
+ 2 => if (dst_ra.isVector())
+ .ldr(dst_ra.h(), .{ .base = base_ra.x() })
+ else switch (dst_vi.signedness(isel)) {
+ .signed => .ldrsh(dst_ra.w(), .{ .base = base_ra.x() }),
+ .unsigned => .ldrh(dst_ra.w(), .{ .base = base_ra.x() }),
+ },
+ 4 => .ldr(if (dst_ra.isVector()) dst_ra.s() else dst_ra.w(), .{ .base = base_ra.x() }),
+ 8 => .ldr(if (dst_ra.isVector()) dst_ra.d() else dst_ra.x(), .{ .base = base_ra.x() }),
+ 16 => .ldr(dst_ra.q(), .{ .base = base_ra.x() }),
+ });
+ dst_vi_ra.* = .zr;
+ try dst_vi.address(isel, 0, base_ra);
+ dst_live_vi.* = .free;
+ return true;
+}
+
+/// Merges possibly differing value tracking into a consistent state.
+///
+/// At a conditional branch, if a value is expected in the same register on both
+/// paths, or only expected in a register on only one path, tracking is updated:
+///
+/// $0 -> r0 // final state is now consistent with both paths
+/// b.cond else
+/// then:
+/// $0 -> r0 // updated if not already consistent with else
+/// ...
+/// b end
+/// else:
+/// $0 -> r0
+/// ...
+/// end:
+///
+/// At a conditional branch, if a value is expected in different registers on
+/// each path, mov instructions are emitted:
+///
+/// $0 -> r0 // final state is now consistent with both paths
+/// b.cond else
+/// then:
+/// $0 -> r0 // updated to be consistent with else
+/// mov x1, x0 // emitted to merge the inconsistent states
+/// $0 -> r1
+/// ...
+/// b end
+/// else:
+/// $0 -> r0
+/// ...
+/// end:
+///
+/// At a loop, a value that is expected in a register at the repeats is updated:
+///
+/// $0 -> r0 // final state is now consistent with all paths
+/// loop:
+/// $0 -> r0 // updated to be consistent with the repeats
+/// ...
+/// $0 -> r0
+/// b.cond loop
+/// ...
+/// $0 -> r0
+/// b loop
+///
+/// At a loop, a value that is expected in a register at the top is filled:
+///
+/// $0 -> [sp, #A] // final state is now consistent with all paths
+/// loop:
+/// $0 -> [sp, #A] // updated to be consistent with the repeats
+/// ldr x0, [sp, #A] // emitted to merge the inconsistent states
+/// $0 -> r0
+/// ...
+/// $0 -> [sp, #A]
+/// b.cond loop
+/// ...
+/// $0 -> [sp, #A]
+/// b loop
+///
+/// At a loop, if a value that is expected in different registers on each path,
+/// mov instructions are emitted:
+///
+/// $0 -> r0 // final state is now consistent with all paths
+/// loop:
+/// $0 -> r0 // updated to be consistent with the repeats
+/// mov x1, x0 // emitted to merge the inconsistent states
+/// $0 -> r1
+/// ...
+/// $0 -> r0
+/// b.cond loop
+/// ...
+/// $0 -> r0
+/// b loop
+fn merge(
+ isel: *Select,
+ expected_live_registers: *const LiveRegisters,
+ comptime opts: struct { fill_extra: bool = false },
+) !void {
+ var live_reg_it = isel.live_registers.iterator();
+ while (live_reg_it.next()) |live_reg_entry| {
+ const ra = live_reg_entry.key;
+ const actual_vi = live_reg_entry.value;
+ const expected_vi = expected_live_registers.get(ra);
+ switch (expected_vi) {
+ else => switch (actual_vi.*) {
+ _ => {},
+ .allocating => unreachable,
+ .free => actual_vi.* = .allocating,
+ },
+ .free => {},
+ }
+ }
+ live_reg_it = isel.live_registers.iterator();
+ while (live_reg_it.next()) |live_reg_entry| {
+ const ra = live_reg_entry.key;
+ const actual_vi = live_reg_entry.value;
+ const expected_vi = expected_live_registers.get(ra);
+ switch (expected_vi) {
+ _ => {
+ switch (actual_vi.*) {
+ _ => _ = if (opts.fill_extra) {
+ assert(try isel.fillMemory(ra));
+ assert(actual_vi.* == .free);
+ },
+ .allocating => actual_vi.* = .free,
+ .free => unreachable,
+ }
+ try expected_vi.liveIn(isel, ra, expected_live_registers);
+ },
+ .allocating => if (if (opts.fill_extra) try isel.fillMemory(ra) else try isel.fill(ra)) {
+ assert(actual_vi.* == .free);
+ actual_vi.* = .allocating;
+ },
+ .free => if (opts.fill_extra) assert(try isel.fillMemory(ra) and actual_vi.* == .free),
+ }
+ }
+ live_reg_it = isel.live_registers.iterator();
+ while (live_reg_it.next()) |live_reg_entry| {
+ const ra = live_reg_entry.key;
+ const actual_vi = live_reg_entry.value;
+ const expected_vi = expected_live_registers.get(ra);
+ switch (expected_vi) {
+ _ => {
+ assert(actual_vi.* == .allocating and expected_vi.register(isel) == ra);
+ actual_vi.* = expected_vi;
+ },
+ .allocating => assert(actual_vi.* == .allocating),
+ .free => if (opts.fill_extra) assert(actual_vi.* == .free),
+ }
+ }
+}
+
+const call = struct {
+ const param_reg: Value.Index = @enumFromInt(@intFromEnum(Value.Index.allocating) - 2);
+ const callee_clobbered_reg: Value.Index = @enumFromInt(@intFromEnum(Value.Index.allocating) - 1);
+ const caller_saved_regs: LiveRegisters = .init(.{
+ .r0 = param_reg,
+ .r1 = param_reg,
+ .r2 = param_reg,
+ .r3 = param_reg,
+ .r4 = param_reg,
+ .r5 = param_reg,
+ .r6 = param_reg,
+ .r7 = param_reg,
+ .r8 = param_reg,
+ .r9 = callee_clobbered_reg,
+ .r10 = callee_clobbered_reg,
+ .r11 = callee_clobbered_reg,
+ .r12 = callee_clobbered_reg,
+ .r13 = callee_clobbered_reg,
+ .r14 = callee_clobbered_reg,
+ .r15 = callee_clobbered_reg,
+ .r16 = callee_clobbered_reg,
+ .r17 = callee_clobbered_reg,
+ .r18 = callee_clobbered_reg,
+ .r19 = .free,
+ .r20 = .free,
+ .r21 = .free,
+ .r22 = .free,
+ .r23 = .free,
+ .r24 = .free,
+ .r25 = .free,
+ .r26 = .free,
+ .r27 = .free,
+ .r28 = .free,
+ .r29 = .free,
+ .r30 = callee_clobbered_reg,
+ .zr = .free,
+ .sp = .free,
+
+ .pc = .free,
+
+ .v0 = param_reg,
+ .v1 = param_reg,
+ .v2 = param_reg,
+ .v3 = param_reg,
+ .v4 = param_reg,
+ .v5 = param_reg,
+ .v6 = param_reg,
+ .v7 = param_reg,
+ .v8 = .free,
+ .v9 = .free,
+ .v10 = .free,
+ .v11 = .free,
+ .v12 = .free,
+ .v13 = .free,
+ .v14 = .free,
+ .v15 = .free,
+ .v16 = callee_clobbered_reg,
+ .v17 = callee_clobbered_reg,
+ .v18 = callee_clobbered_reg,
+ .v19 = callee_clobbered_reg,
+ .v20 = callee_clobbered_reg,
+ .v21 = callee_clobbered_reg,
+ .v22 = callee_clobbered_reg,
+ .v23 = callee_clobbered_reg,
+ .v24 = callee_clobbered_reg,
+ .v25 = callee_clobbered_reg,
+ .v26 = callee_clobbered_reg,
+ .v27 = callee_clobbered_reg,
+ .v28 = callee_clobbered_reg,
+ .v29 = callee_clobbered_reg,
+ .v30 = callee_clobbered_reg,
+ .v31 = callee_clobbered_reg,
+
+ .fpcr = .free,
+ .fpsr = .free,
+
+ .p0 = callee_clobbered_reg,
+ .p1 = callee_clobbered_reg,
+ .p2 = callee_clobbered_reg,
+ .p3 = callee_clobbered_reg,
+ .p4 = callee_clobbered_reg,
+ .p5 = callee_clobbered_reg,
+ .p6 = callee_clobbered_reg,
+ .p7 = callee_clobbered_reg,
+ .p8 = callee_clobbered_reg,
+ .p9 = callee_clobbered_reg,
+ .p10 = callee_clobbered_reg,
+ .p11 = callee_clobbered_reg,
+ .p12 = callee_clobbered_reg,
+ .p13 = callee_clobbered_reg,
+ .p14 = callee_clobbered_reg,
+ .p15 = callee_clobbered_reg,
+
+ .ffr = .free,
+ });
+ fn prepareReturn(isel: *Select) !void {
+ var live_reg_it = isel.live_registers.iterator();
+ while (live_reg_it.next()) |live_reg_entry| switch (caller_saved_regs.get(live_reg_entry.key)) {
+ else => unreachable,
+ param_reg, callee_clobbered_reg => switch (live_reg_entry.value.*) {
+ _ => {},
+ .allocating => unreachable,
+ .free => live_reg_entry.value.* = .allocating,
+ },
+ .free => {},
+ };
+ }
+ fn returnFill(isel: *Select, ra: Register.Alias) !void {
+ const live_vi = isel.live_registers.getPtr(ra);
+ if (try isel.fill(ra)) {
+ assert(live_vi.* == .free);
+ live_vi.* = .allocating;
+ }
+ assert(live_vi.* == .allocating);
+ }
+ fn returnLiveIn(isel: *Select, vi: Value.Index, ra: Register.Alias) !void {
+ try vi.defLiveIn(isel, ra, &caller_saved_regs);
+ }
+ fn finishReturn(isel: *Select) !void {
+ var live_reg_it = isel.live_registers.iterator();
+ while (live_reg_it.next()) |live_reg_entry| {
+ switch (live_reg_entry.value.*) {
+ _ => |live_vi| switch (live_vi.size(isel)) {
+ else => unreachable,
+ 1, 2, 4, 8 => {},
+ 16 => {
+ assert(try isel.fillMemory(live_reg_entry.key));
+ assert(live_reg_entry.value.* == .free);
+ switch (caller_saved_regs.get(live_reg_entry.key)) {
+ else => unreachable,
+ param_reg, callee_clobbered_reg => live_reg_entry.value.* = .allocating,
+ .free => {},
+ }
+ continue;
+ },
+ },
+ .allocating, .free => {},
+ }
+ switch (caller_saved_regs.get(live_reg_entry.key)) {
+ else => unreachable,
+ param_reg, callee_clobbered_reg => switch (live_reg_entry.value.*) {
+ _ => {
+ assert(try isel.fill(live_reg_entry.key));
+ assert(live_reg_entry.value.* == .free);
+ live_reg_entry.value.* = .allocating;
+ },
+ .allocating => {},
+ .free => unreachable,
+ },
+ .free => {},
+ }
+ }
+ }
+ fn prepareCallee(isel: *Select) !void {
+ var live_reg_it = isel.live_registers.iterator();
+ while (live_reg_it.next()) |live_reg_entry| switch (caller_saved_regs.get(live_reg_entry.key)) {
+ else => unreachable,
+ param_reg => assert(live_reg_entry.value.* == .allocating),
+ callee_clobbered_reg => isel.freeReg(live_reg_entry.key),
+ .free => {},
+ };
+ }
+ fn finishCallee(_: *Select) !void {}
+ fn prepareParams(_: *Select) !void {}
+ fn paramLiveOut(isel: *Select, vi: Value.Index, ra: Register.Alias) !void {
+ isel.freeReg(ra);
+ try vi.liveOut(isel, ra);
+ const live_vi = isel.live_registers.getPtr(ra);
+ if (live_vi.* == .free) live_vi.* = .allocating;
+ }
+ fn paramAddress(isel: *Select, vi: Value.Index, ra: Register.Alias) !void {
+ isel.freeReg(ra);
+ try vi.address(isel, 0, ra);
+ const live_vi = isel.live_registers.getPtr(ra);
+ if (live_vi.* == .free) live_vi.* = .allocating;
+ }
+ fn finishParams(isel: *Select) !void {
+ var live_reg_it = isel.live_registers.iterator();
+ while (live_reg_it.next()) |live_reg_entry| switch (caller_saved_regs.get(live_reg_entry.key)) {
+ else => unreachable,
+ param_reg => switch (live_reg_entry.value.*) {
+ _ => {},
+ .allocating => live_reg_entry.value.* = .free,
+ .free => unreachable,
+ },
+ callee_clobbered_reg, .free => {},
+ };
+ }
+};
+
+pub const CallAbiIterator = struct {
+ /// Next General-purpose Register Number
+ ngrn: Register.Alias,
+ /// Next SIMD and Floating-point Register Number
+ nsrn: Register.Alias,
+ /// next stacked argument address
+ nsaa: u24,
+
+ pub const ngrn_start: Register.Alias = .r0;
+ pub const ngrn_end: Register.Alias = .r8;
+ pub const nsrn_start: Register.Alias = .v0;
+ pub const nsrn_end: Register.Alias = .v8;
+ pub const nsaa_start: u42 = 0;
+
+ pub const init: CallAbiIterator = .{
+ // A.1
+ .ngrn = ngrn_start,
+ // A.2
+ .nsrn = nsrn_start,
+ // A.3
+ .nsaa = nsaa_start,
+ };
+
+ pub fn param(it: *CallAbiIterator, isel: *Select, ty: ZigType) !?Value.Index {
+ const zcu = isel.pt.zcu;
+ const ip = &zcu.intern_pool;
+
+ if (ty.isNoReturn(zcu) or !ty.hasRuntimeBitsIgnoreComptime(zcu)) return null;
+ try isel.values.ensureUnusedCapacity(zcu.gpa, Value.max_parts);
+ const wip_vi = isel.initValue(ty);
+ type_key: switch (ip.indexToKey(ty.toIntern())) {
+ else => return isel.fail("CallAbiIterator.param({f})", .{isel.fmtType(ty)}),
+ .int_type => |int_type| switch (int_type.bits) {
+ 0 => unreachable,
+ 1...16 => {
+ wip_vi.setSignedness(isel, int_type.signedness);
+ // C.7
+ it.integer(isel, wip_vi);
+ },
+ // C.7
+ 17...64 => it.integer(isel, wip_vi),
+ // C.9
+ 65...128 => it.integers(isel, wip_vi, @splat(@divExact(wip_vi.size(isel), 2))),
+ else => it.indirect(isel, wip_vi),
+ },
+ .array_type => switch (wip_vi.size(isel)) {
+ 0 => unreachable,
+ 1...8 => it.integer(isel, wip_vi),
+ 9...16 => |size| it.integers(isel, wip_vi, .{ 8, size - 8 }),
+ else => it.indirect(isel, wip_vi),
+ },
+ .ptr_type => |ptr_type| switch (ptr_type.flags.size) {
+ .one, .many, .c => continue :type_key .{ .int_type = .{
+ .signedness = .unsigned,
+ .bits = 64,
+ } },
+ .slice => it.integers(isel, wip_vi, @splat(8)),
+ },
+ .opt_type => |child_type| if (ty.optionalReprIsPayload(zcu))
+ continue :type_key ip.indexToKey(child_type)
+ else switch (ZigType.fromInterned(child_type).abiSize(zcu)) {
+ 0 => continue :type_key .{ .simple_type = .bool },
+ 1...7 => it.integer(isel, wip_vi),
+ 8...15 => |child_size| it.integers(isel, wip_vi, .{ 8, child_size - 7 }),
+ else => return isel.fail("CallAbiIterator.param({f})", .{isel.fmtType(ty)}),
+ },
+ .anyframe_type => unreachable,
+ .error_union_type => |error_union_type| switch (wip_vi.size(isel)) {
+ 0 => unreachable,
+ 1...8 => it.integer(isel, wip_vi),
+ 9...16 => {
+ var sizes: [2]u64 = @splat(0);
+ const payload_ty: ZigType = .fromInterned(error_union_type.payload_type);
+ {
+ const error_set_ty: ZigType = .fromInterned(error_union_type.error_set_type);
+ const offset = codegen.errUnionErrorOffset(payload_ty, zcu);
+ const size = error_set_ty.abiSize(zcu);
+ const end = offset % 8 + size;
+ const part_index: usize = @intCast(offset / 8);
+ sizes[part_index] = @max(sizes[part_index], @min(end, 8));
+ if (end > 8) sizes[part_index + 1] = @max(sizes[part_index + 1], end - 8);
+ }
+ {
+ const offset = codegen.errUnionPayloadOffset(payload_ty, zcu);
+ const size = payload_ty.abiSize(zcu);
+ const end = offset % 8 + size;
+ const part_index: usize = @intCast(offset / 8);
+ sizes[part_index] = @max(sizes[part_index], @min(end, 8));
+ if (end > 8) sizes[part_index + 1] = @max(sizes[part_index + 1], end - 8);
+ }
+ it.integers(isel, wip_vi, sizes);
+ },
+ else => it.indirect(isel, wip_vi),
+ },
+ .simple_type => |simple_type| switch (simple_type) {
+ .f16, .f32, .f64, .f128, .c_longdouble => it.vector(isel, wip_vi),
+ .f80 => continue :type_key .{ .int_type = .{ .signedness = .unsigned, .bits = 80 } },
+ .usize,
+ .isize,
+ .c_char,
+ .c_short,
+ .c_ushort,
+ .c_int,
+ .c_uint,
+ .c_long,
+ .c_ulong,
+ .c_longlong,
+ .c_ulonglong,
+ => continue :type_key .{ .int_type = ty.intInfo(zcu) },
+ // B.1
+ .anyopaque => it.indirect(isel, wip_vi),
+ .bool => continue :type_key .{ .int_type = .{ .signedness = .unsigned, .bits = 1 } },
+ .anyerror => continue :type_key .{ .int_type = .{
+ .signedness = .unsigned,
+ .bits = zcu.errorSetBits(),
+ } },
+ .void,
+ .type,
+ .comptime_int,
+ .comptime_float,
+ .noreturn,
+ .null,
+ .undefined,
+ .enum_literal,
+ .adhoc_inferred_error_set,
+ .generic_poison,
+ => unreachable,
+ },
+ .struct_type => {
+ const size = wip_vi.size(isel);
+ const loaded_struct = ip.loadStructType(ty.toIntern());
+ if (size <= 16 * 4) homogeneous_aggregate: {
+ const fdt = homogeneousStructBaseType(zcu, &loaded_struct) orelse break :homogeneous_aggregate;
+ const parts_len = @shrExact(size, fdt.log2Size());
+ if (parts_len > 4) break :homogeneous_aggregate;
+ it.vectors(isel, wip_vi, fdt, @intCast(parts_len));
+ break :type_key;
+ }
+ switch (size) {
+ 0 => unreachable,
+ 1...8 => it.integer(isel, wip_vi),
+ 9...16 => {
+ var part_offset: u64 = 0;
+ var part_sizes: [2]u64 = undefined;
+ var parts_len: Value.PartsLen = 0;
+ var next_field_end: u64 = 0;
+ var field_it = loaded_struct.iterateRuntimeOrder(ip);
+ while (part_offset < size) {
+ const field_end = next_field_end;
+ const next_field_begin = if (field_it.next()) |field_index| next_field_begin: {
+ const field_ty: ZigType = .fromInterned(loaded_struct.field_types.get(ip)[field_index]);
+ const next_field_begin = switch (loaded_struct.fieldAlign(ip, field_index)) {
+ .none => field_ty.abiAlignment(zcu),
+ else => |field_align| field_align,
+ }.forward(field_end);
+ next_field_end = next_field_begin + field_ty.abiSize(zcu);
+ break :next_field_begin next_field_begin;
+ } else std.mem.alignForward(u64, size, 8);
+ while (next_field_begin - part_offset >= 8) {
+ const part_size = field_end - part_offset;
+ part_sizes[parts_len] = part_size;
+ assert(part_offset + part_size <= size);
+ parts_len += 1;
+ part_offset = next_field_begin;
+ }
+ }
+ assert(parts_len == part_sizes.len);
+ it.integers(isel, wip_vi, part_sizes);
+ },
+ else => it.indirect(isel, wip_vi),
+ }
+ },
+ .tuple_type => |tuple_type| {
+ const size = wip_vi.size(isel);
+ if (size <= 16 * 4) homogeneous_aggregate: {
+ const fdt = homogeneousTupleBaseType(zcu, tuple_type) orelse break :homogeneous_aggregate;
+ const parts_len = @shrExact(size, fdt.log2Size());
+ if (parts_len > 4) break :homogeneous_aggregate;
+ it.vectors(isel, wip_vi, fdt, @intCast(parts_len));
+ break :type_key;
+ }
+ switch (size) {
+ 0 => unreachable,
+ 1...8 => it.integer(isel, wip_vi),
+ 9...16 => {
+ var part_offset: u64 = 0;
+ var part_sizes: [2]u64 = undefined;
+ var parts_len: Value.PartsLen = 0;
+ var next_field_end: u64 = 0;
+ var field_index: usize = 0;
+ while (part_offset < size) {
+ const field_end = next_field_end;
+ const next_field_begin = while (field_index < tuple_type.types.len) {
+ defer field_index += 1;
+ if (tuple_type.values.get(ip)[field_index] != .none) continue;
+ const field_ty: ZigType = .fromInterned(tuple_type.types.get(ip)[field_index]);
+ const next_field_begin = field_ty.abiAlignment(zcu).forward(field_end);
+ next_field_end = next_field_begin + field_ty.abiSize(zcu);
+ break next_field_begin;
+ } else std.mem.alignForward(u64, size, 8);
+ while (next_field_begin - part_offset >= 8) {
+ const part_size = @min(field_end - part_offset, 8);
+ part_sizes[parts_len] = part_size;
+ assert(part_offset + part_size <= size);
+ parts_len += 1;
+ part_offset += part_size;
+ if (part_offset >= field_end) part_offset = next_field_begin;
+ }
+ }
+ assert(parts_len == part_sizes.len);
+ it.integers(isel, wip_vi, part_sizes);
+ },
+ else => it.indirect(isel, wip_vi),
+ }
+ },
+ .opaque_type, .func_type => continue :type_key .{ .simple_type = .anyopaque },
+ .enum_type => continue :type_key ip.indexToKey(ip.loadEnumType(ty.toIntern()).tag_ty),
+ .error_set_type,
+ .inferred_error_set_type,
+ => continue :type_key .{ .simple_type = .anyerror },
+ .undef,
+ .simple_value,
+ .variable,
+ .@"extern",
+ .func,
+ .int,
+ .err,
+ .error_union,
+ .enum_literal,
+ .enum_tag,
+ .empty_enum_value,
+ .float,
+ .ptr,
+ .slice,
+ .opt,
+ .aggregate,
+ .un,
+ .memoized_call,
+ => unreachable, // values, not types
+ }
+ return wip_vi.ref(isel);
+ }
+
+ pub fn ret(it: *CallAbiIterator, isel: *Select, ty: ZigType) !?Value.Index {
+ const wip_vi = try it.param(isel, ty) orelse return null;
+ switch (wip_vi.parent(isel)) {
+ .unallocated, .stack_slot => {},
+ .value, .constant => unreachable,
+ .address => |address_vi| {
+ assert(address_vi.hint(isel) == ngrn_start);
+ address_vi.setHint(isel, ngrn_end);
+ },
+ }
+ return wip_vi;
+ }
+
+ pub const FundamentalDataType = enum {
+ half,
+ single,
+ double,
+ quad,
+ vector64,
+ vector128,
+ fn log2Size(fdt: FundamentalDataType) u3 {
+ return switch (fdt) {
+ .half => 1,
+ .single => 2,
+ .double, .vector64 => 3,
+ .quad, .vector128 => 4,
+ };
+ }
+ fn size(fdt: FundamentalDataType) u64 {
+ return @as(u64, 1) << fdt.log2Size();
+ }
+ };
+ fn homogeneousAggregateBaseType(zcu: *Zcu, initial_ty: InternPool.Index) ?FundamentalDataType {
+ const ip = &zcu.intern_pool;
+ var ty = initial_ty;
+ return type_key: switch (ip.indexToKey(ty)) {
+ else => null,
+ .array_type => |array_type| {
+ ty = array_type.child;
+ continue :type_key ip.indexToKey(ty);
+ },
+ .vector_type => switch (ZigType.fromInterned(ty).abiSize(zcu)) {
+ else => null,
+ 8 => .vector64,
+ 16 => .vector128,
+ },
+ .simple_type => |simple_type| switch (simple_type) {
+ .f16 => .half,
+ .f32 => .single,
+ .f64 => .double,
+ .f128 => .quad,
+ .c_longdouble => switch (zcu.getTarget().cTypeBitSize(.longdouble)) {
+ else => unreachable,
+ 16 => .half,
+ 32 => .single,
+ 64 => .double,
+ 80 => null,
+ 128 => .quad,
+ },
+ else => null,
+ },
+ .struct_type => homogeneousStructBaseType(zcu, &ip.loadStructType(ty)),
+ .tuple_type => |tuple_type| homogeneousTupleBaseType(zcu, tuple_type),
+ };
+ }
+ fn homogeneousStructBaseType(zcu: *Zcu, loaded_struct: *const InternPool.LoadedStructType) ?FundamentalDataType {
+ const ip = &zcu.intern_pool;
+ var common_fdt: ?FundamentalDataType = null;
+ for (0.., loaded_struct.field_types.get(ip)) |field_index, field_ty| {
+ if (loaded_struct.fieldIsComptime(ip, field_index)) continue;
+ if (loaded_struct.fieldAlign(ip, field_index) != .none) return null;
+ if (!ZigType.fromInterned(field_ty).hasRuntimeBits(zcu)) continue;
+ const fdt = homogeneousAggregateBaseType(zcu, field_ty);
+ if (common_fdt == null) common_fdt = fdt else if (fdt != common_fdt) return null;
+ }
+ return common_fdt;
+ }
+ fn homogeneousTupleBaseType(zcu: *Zcu, tuple_type: InternPool.Key.TupleType) ?FundamentalDataType {
+ const ip = &zcu.intern_pool;
+ var common_fdt: ?FundamentalDataType = null;
+ for (tuple_type.values.get(ip), tuple_type.types.get(ip)) |field_val, field_ty| {
+ if (field_val != .none) continue;
+ const fdt = homogeneousAggregateBaseType(zcu, field_ty);
+ if (common_fdt == null) common_fdt = fdt else if (fdt != common_fdt) return null;
+ }
+ return common_fdt;
+ }
+
+ const Spec = struct {
+ offset: u64,
+ size: u64,
+ };
+
+ fn stack(it: *CallAbiIterator, isel: *Select, wip_vi: Value.Index) void {
+ // C.12
+ it.nsaa = @intCast(wip_vi.alignment(isel).forward(it.nsaa));
+ const parent_vi = switch (wip_vi.parent(isel)) {
+ .unallocated, .stack_slot => wip_vi,
+ .address, .constant => unreachable,
+ .value => |parent_vi| parent_vi,
+ };
+ switch (parent_vi.parent(isel)) {
+ .unallocated => parent_vi.setParent(isel, .{ .stack_slot = .{
+ .base = .sp,
+ .offset = it.nsaa,
+ } }),
+ .stack_slot => {},
+ .address, .value, .constant => unreachable,
+ }
+ it.nsaa += @intCast(wip_vi.size(isel));
+ }
+
+ fn integer(it: *CallAbiIterator, isel: *Select, wip_vi: Value.Index) void {
+ assert(wip_vi.size(isel) <= 8);
+ const natural_alignment = wip_vi.alignment(isel);
+ assert(natural_alignment.order(.@"16").compare(.lte));
+ wip_vi.setAlignment(isel, natural_alignment.maxStrict(.@"8"));
+ if (it.ngrn == ngrn_end) return it.stack(isel, wip_vi);
+ wip_vi.setHint(isel, it.ngrn);
+ it.ngrn = @enumFromInt(@intFromEnum(it.ngrn) + 1);
+ }
+
+ fn integers(it: *CallAbiIterator, isel: *Select, wip_vi: Value.Index, part_sizes: [2]u64) void {
+ assert(wip_vi.size(isel) <= 16);
+ const natural_alignment = wip_vi.alignment(isel);
+ assert(natural_alignment.order(.@"16").compare(.lte));
+ wip_vi.setAlignment(isel, natural_alignment.maxStrict(.@"8"));
+ // C.8
+ if (natural_alignment == .@"16") it.ngrn = @enumFromInt(std.mem.alignForward(
+ @typeInfo(Register.Alias).@"enum".tag_type,
+ @intFromEnum(it.ngrn),
+ 2,
+ ));
+ if (it.ngrn == ngrn_end) return it.stack(isel, wip_vi);
+ wip_vi.setParts(isel, part_sizes.len);
+ for (0.., part_sizes) |part_index, part_size|
+ it.integer(isel, wip_vi.addPart(isel, 8 * part_index, part_size));
+ }
+
+ fn vector(it: *CallAbiIterator, isel: *Select, wip_vi: Value.Index) void {
+ assert(wip_vi.size(isel) <= 16);
+ const natural_alignment = wip_vi.alignment(isel);
+ assert(natural_alignment.order(.@"16").compare(.lte));
+ wip_vi.setAlignment(isel, natural_alignment.maxStrict(.@"8"));
+ wip_vi.setIsVector(isel);
+ if (it.nsrn == nsrn_end) return it.stack(isel, wip_vi);
+ wip_vi.setHint(isel, it.nsrn);
+ it.nsrn = @enumFromInt(@intFromEnum(it.nsrn) + 1);
+ }
+
+ fn vectors(
+ it: *CallAbiIterator,
+ isel: *Select,
+ wip_vi: Value.Index,
+ fdt: FundamentalDataType,
+ parts_len: Value.PartsLen,
+ ) void {
+ const fdt_log2_size = fdt.log2Size();
+ assert(wip_vi.size(isel) == @shlExact(@as(u9, parts_len), fdt_log2_size));
+ const natural_alignment = wip_vi.alignment(isel);
+ assert(natural_alignment.order(.@"16").compare(.lte));
+ wip_vi.setAlignment(isel, natural_alignment.maxStrict(.@"8"));
+ if (@intFromEnum(it.nsrn) > @intFromEnum(nsrn_end) - parts_len) return it.stack(isel, wip_vi);
+ if (parts_len == 1) return it.vector(isel, wip_vi);
+ wip_vi.setParts(isel, parts_len);
+ const fdt_size = @as(u64, 1) << fdt_log2_size;
+ for (0..parts_len) |part_index|
+ it.vector(isel, wip_vi.addPart(isel, part_index << fdt_log2_size, fdt_size));
+ }
+
+ fn indirect(it: *CallAbiIterator, isel: *Select, wip_vi: Value.Index) void {
+ const wip_address_vi = isel.initValue(.usize);
+ wip_vi.setParent(isel, .{ .address = wip_address_vi });
+ it.integer(isel, wip_address_vi);
+ }
+};
+
+const Air = @import("../../Air.zig");
+const assert = std.debug.assert;
+const codegen = @import("../../codegen.zig");
+const Constant = @import("../../Value.zig");
+const InternPool = @import("../../InternPool.zig");
+const Package = @import("../../Package.zig");
+const Register = codegen.aarch64.encoding.Register;
+const Select = @This();
+const std = @import("std");
+const tracking_log = std.log.scoped(.tracking);
+const wip_mir_log = std.log.scoped(.@"wip-mir");
+const Zcu = @import("../../Zcu.zig");
+const ZigType = @import("../../Type.zig");
src/codegen/aarch64.zig
@@ -0,0 +1,194 @@
+pub const abi = @import("aarch64/abi.zig");
+pub const Assemble = @import("aarch64/Assemble.zig");
+pub const Disassemble = @import("aarch64/Disassemble.zig");
+pub const encoding = @import("aarch64/encoding.zig");
+pub const Mir = @import("aarch64/Mir.zig");
+pub const Select = @import("aarch64/Select.zig");
+
+pub fn legalizeFeatures(_: *const std.Target) ?*Air.Legalize.Features {
+ return null;
+}
+
+pub fn generate(
+ _: *link.File,
+ pt: Zcu.PerThread,
+ _: Zcu.LazySrcLoc,
+ func_index: InternPool.Index,
+ air: *const Air,
+ liveness: *const ?Air.Liveness,
+) !Mir {
+ const zcu = pt.zcu;
+ const gpa = zcu.gpa;
+ const func = zcu.funcInfo(func_index);
+ const func_type = zcu.intern_pool.indexToKey(func.ty).func_type;
+ assert(liveness.* == null);
+
+ const mod = zcu.navFileScope(func.owner_nav).mod.?;
+ var isel: Select = .{
+ .pt = pt,
+ .target = &mod.resolved_target.result,
+ .air = air.*,
+ .nav_index = zcu.funcInfo(func_index).owner_nav,
+
+ .def_order = .empty,
+ .blocks = .empty,
+ .loops = .empty,
+ .active_loops = .empty,
+ .loop_live = .{
+ .set = .empty,
+ .list = .empty,
+ },
+ .dom_start = 0,
+ .dom_len = 0,
+ .dom = .empty,
+
+ .saved_registers = comptime .initEmpty(),
+ .instructions = .empty,
+ .literals = .empty,
+ .nav_relocs = .empty,
+ .uav_relocs = .empty,
+ .global_relocs = .empty,
+ .literal_relocs = .empty,
+
+ .returns = false,
+ .va_list = undefined,
+ .stack_size = 0,
+ .stack_align = .@"16",
+
+ .live_registers = comptime .initFill(.free),
+ .live_values = .empty,
+ .values = .empty,
+ };
+ defer isel.deinit();
+
+ const air_main_body = air.getMainBody();
+ var param_it: Select.CallAbiIterator = .init;
+ const air_args = for (air_main_body, 0..) |air_inst_index, body_index| {
+ if (air.instructions.items(.tag)[@intFromEnum(air_inst_index)] != .arg) break air_main_body[0..body_index];
+ const param_ty = air.instructions.items(.data)[@intFromEnum(air_inst_index)].arg.ty.toType();
+ const param_vi = try param_it.param(&isel, param_ty);
+ tracking_log.debug("${d} <- %{d}", .{ @intFromEnum(param_vi.?), @intFromEnum(air_inst_index) });
+ try isel.live_values.putNoClobber(gpa, air_inst_index, param_vi.?);
+ } else unreachable;
+
+ const saved_gra_start = if (mod.strip) param_it.ngrn else Select.CallAbiIterator.ngrn_start;
+ const saved_gra_end = if (func_type.is_var_args) Select.CallAbiIterator.ngrn_end else param_it.ngrn;
+ const saved_gra_len = @intFromEnum(saved_gra_end) - @intFromEnum(saved_gra_start);
+
+ const saved_vra_start = if (mod.strip) param_it.nsrn else Select.CallAbiIterator.nsrn_start;
+ const saved_vra_end = if (func_type.is_var_args) Select.CallAbiIterator.nsrn_end else param_it.nsrn;
+ const saved_vra_len = @intFromEnum(saved_vra_end) - @intFromEnum(saved_vra_start);
+
+ const frame_record = 2;
+ const named_stack_args: Select.Value.Indirect = .{
+ .base = .fp,
+ .offset = 8 * std.mem.alignForward(u7, frame_record + saved_gra_len, 2),
+ };
+ isel.va_list = .{
+ .__stack = named_stack_args.withOffset(param_it.nsaa),
+ .__gr_top = named_stack_args,
+ .__vr_top = .{ .base = .fp, .offset = 0 },
+ };
+
+ // translate arg locations from caller-based to callee-based
+ for (air_args) |air_inst_index| {
+ assert(air.instructions.items(.tag)[@intFromEnum(air_inst_index)] == .arg);
+ const arg_vi = isel.live_values.get(air_inst_index).?;
+ const passed_vi = switch (arg_vi.parent(&isel)) {
+ .unallocated, .stack_slot => arg_vi,
+ .value, .constant => unreachable,
+ .address => |address_vi| address_vi,
+ };
+ switch (passed_vi.parent(&isel)) {
+ .unallocated => if (!mod.strip) {
+ var part_it = arg_vi.parts(&isel);
+ const first_passed_part_vi = part_it.next() orelse passed_vi;
+ const hint_ra = first_passed_part_vi.hint(&isel).?;
+ passed_vi.setParent(&isel, .{ .stack_slot = if (hint_ra.isVector())
+ isel.va_list.__vr_top.withOffset(@as(i8, -16) *
+ (@intFromEnum(saved_vra_end) - @intFromEnum(hint_ra)))
+ else
+ isel.va_list.__gr_top.withOffset(@as(i8, -8) *
+ (@intFromEnum(saved_gra_end) - @intFromEnum(hint_ra))) });
+ },
+ .stack_slot => |stack_slot| {
+ assert(stack_slot.base == .sp);
+ passed_vi.setParent(&isel, .{
+ .stack_slot = named_stack_args.withOffset(stack_slot.offset),
+ });
+ },
+ .address, .value, .constant => unreachable,
+ }
+ }
+
+ ret: {
+ var ret_it: Select.CallAbiIterator = .init;
+ const ret_vi = try ret_it.ret(&isel, .fromInterned(func_type.return_type)) orelse break :ret;
+ tracking_log.debug("${d} <- %main", .{@intFromEnum(ret_vi)});
+ try isel.live_values.putNoClobber(gpa, Select.Block.main, ret_vi);
+ }
+
+ assert(!(try isel.blocks.getOrPut(gpa, Select.Block.main)).found_existing);
+ try isel.analyze(air_main_body);
+ try isel.finishAnalysis();
+ isel.verify(false);
+
+ isel.blocks.values()[0] = .{
+ .live_registers = isel.live_registers,
+ .target_label = @intCast(isel.instructions.items.len),
+ };
+ try isel.body(air_main_body);
+ if (isel.live_values.fetchRemove(Select.Block.main)) |ret_vi| {
+ switch (ret_vi.value.parent(&isel)) {
+ .unallocated, .stack_slot => {},
+ .value, .constant => unreachable,
+ .address => |address_vi| try address_vi.liveIn(
+ &isel,
+ address_vi.hint(&isel).?,
+ comptime &.initFill(.free),
+ ),
+ }
+ ret_vi.value.deref(&isel);
+ }
+ isel.verify(true);
+
+ const prologue = isel.instructions.items.len;
+ const epilogue = try isel.layout(
+ param_it,
+ func_type.is_var_args,
+ saved_gra_len,
+ saved_vra_len,
+ mod,
+ );
+
+ const instructions = try isel.instructions.toOwnedSlice(gpa);
+ var mir: Mir = .{
+ .prologue = instructions[prologue..epilogue],
+ .body = instructions[0..prologue],
+ .epilogue = instructions[epilogue..],
+ .literals = &.{},
+ .nav_relocs = &.{},
+ .uav_relocs = &.{},
+ .global_relocs = &.{},
+ .literal_relocs = &.{},
+ };
+ errdefer mir.deinit(gpa);
+ mir.literals = try isel.literals.toOwnedSlice(gpa);
+ mir.nav_relocs = try isel.nav_relocs.toOwnedSlice(gpa);
+ mir.uav_relocs = try isel.uav_relocs.toOwnedSlice(gpa);
+ mir.global_relocs = try isel.global_relocs.toOwnedSlice(gpa);
+ mir.literal_relocs = try isel.literal_relocs.toOwnedSlice(gpa);
+ return mir;
+}
+
+test {
+ _ = Assemble;
+}
+
+const Air = @import("../Air.zig");
+const assert = std.debug.assert;
+const InternPool = @import("../InternPool.zig");
+const link = @import("../link.zig");
+const std = @import("std");
+const tracking_log = std.log.scoped(.tracking);
+const Zcu = @import("../Zcu.zig");
src/codegen/c.zig
@@ -449,14 +449,15 @@ pub const Function = struct {
if (gop.found_existing) return gop.value_ptr.*;
const pt = f.object.dg.pt;
+ const zcu = pt.zcu;
const val = (try f.air.value(ref, pt)).?;
const ty = f.typeOf(ref);
- const result: CValue = if (lowersToArray(ty, pt)) result: {
+ const result: CValue = if (lowersToArray(ty, zcu)) result: {
const ch = &f.object.code_header.writer;
const decl_c_value = try f.allocLocalValue(.{
.ctype = try f.ctypeFromType(ty, .complete),
- .alignas = CType.AlignAs.fromAbiAlignment(ty.abiAlignment(pt.zcu)),
+ .alignas = CType.AlignAs.fromAbiAlignment(ty.abiAlignment(zcu)),
});
const gpa = f.object.dg.gpa;
try f.allocs.put(gpa, decl_c_value.new_local, false);
@@ -916,7 +917,7 @@ pub const DeclGen = struct {
// Ensure complete type definition is available before accessing fields.
_ = try dg.ctypeFromType(parent_ptr_ty.childType(zcu), .complete);
- switch (fieldLocation(parent_ptr_ty, field.result_ptr_ty, field.field_idx, pt)) {
+ switch (fieldLocation(parent_ptr_ty, field.result_ptr_ty, field.field_idx, zcu)) {
.begin => {
const ptr_ctype = try dg.ctypeFromType(field.result_ptr_ty, .complete);
try w.writeByte('(');
@@ -3008,7 +3009,7 @@ pub fn generate(
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
air: *const Air,
- liveness: *const Air.Liveness,
+ liveness: *const ?Air.Liveness,
) @import("../codegen.zig").CodeGenError!Mir {
const zcu = pt.zcu;
const gpa = zcu.gpa;
@@ -3021,7 +3022,7 @@ pub fn generate(
var function: Function = .{
.value_map = .init(gpa),
.air = air.*,
- .liveness = liveness.*,
+ .liveness = liveness.*.?,
.func_index = func_index,
.object = .{
.dg = .{
@@ -3961,7 +3962,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
ptr_info.flags.alignment.order(src_ty.abiAlignment(zcu)).compare(.gte)
else
true;
- const is_array = lowersToArray(src_ty, pt);
+ const is_array = lowersToArray(src_ty, zcu);
const need_memcpy = !is_aligned or is_array;
const w = &f.object.code.writer;
@@ -4044,7 +4045,7 @@ fn airRet(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !void {
const operand = try f.resolveInst(un_op);
try reap(f, inst, &.{un_op});
var deref = is_ptr;
- const is_array = lowersToArray(ret_ty, pt);
+ const is_array = lowersToArray(ret_ty, zcu);
const ret_val = if (is_array) ret_val: {
const array_local = try f.allocAlignedLocal(inst, .{
.ctype = ret_ctype,
@@ -4228,7 +4229,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
ptr_info.flags.alignment.order(src_ty.abiAlignment(zcu)).compare(.gte)
else
true;
- const is_array = lowersToArray(.fromInterned(ptr_info.child), pt);
+ const is_array = lowersToArray(.fromInterned(ptr_info.child), zcu);
const need_memcpy = !is_aligned or is_array;
const src_val = try f.resolveInst(bin_op.rhs);
@@ -4873,7 +4874,7 @@ fn airCall(
}
const result = result: {
- if (result_local == .none or !lowersToArray(ret_ty, pt))
+ if (result_local == .none or !lowersToArray(ret_ty, zcu))
break :result result_local;
const array_local = try f.allocLocal(inst, ret_ty);
@@ -5971,13 +5972,12 @@ fn fieldLocation(
container_ptr_ty: Type,
field_ptr_ty: Type,
field_index: u32,
- pt: Zcu.PerThread,
+ zcu: *Zcu,
) union(enum) {
begin: void,
field: CValue,
byte_offset: u64,
} {
- const zcu = pt.zcu;
const ip = &zcu.intern_pool;
const container_ty: Type = .fromInterned(ip.indexToKey(container_ptr_ty.toIntern()).ptr_type.child);
switch (ip.indexToKey(container_ty.toIntern())) {
@@ -5994,7 +5994,7 @@ fn fieldLocation(
else
.{ .field = field_index } },
.@"packed" => if (field_ptr_ty.ptrInfo(zcu).packed_offset.host_size == 0)
- .{ .byte_offset = @divExact(pt.structPackedFieldBitOffset(loaded_struct, field_index) +
+ .{ .byte_offset = @divExact(zcu.structPackedFieldBitOffset(loaded_struct, field_index) +
container_ptr_ty.ptrInfo(zcu).packed_offset.bit_offset, 8) }
else
.begin,
@@ -6076,7 +6076,7 @@ fn airFieldParentPtr(f: *Function, inst: Air.Inst.Index) !CValue {
try f.renderType(w, container_ptr_ty);
try w.writeByte(')');
- switch (fieldLocation(container_ptr_ty, field_ptr_ty, extra.field_index, pt)) {
+ switch (fieldLocation(container_ptr_ty, field_ptr_ty, extra.field_index, zcu)) {
.begin => try f.writeCValue(w, field_ptr_val, .Other),
.field => |field| {
const u8_ptr_ty = try pt.adjustPtrTypeChild(field_ptr_ty, .u8);
@@ -6131,7 +6131,7 @@ fn fieldPtr(
try f.renderType(w, field_ptr_ty);
try w.writeByte(')');
- switch (fieldLocation(container_ptr_ty, field_ptr_ty, field_index, pt)) {
+ switch (fieldLocation(container_ptr_ty, field_ptr_ty, field_index, zcu)) {
.begin => try f.writeCValue(w, container_ptr_val, .Other),
.field => |field| {
try w.writeByte('&');
@@ -6189,7 +6189,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
const bit_offset_ty = try pt.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1));
- const bit_offset = pt.structPackedFieldBitOffset(loaded_struct, extra.field_index);
+ const bit_offset = zcu.structPackedFieldBitOffset(loaded_struct, extra.field_index);
const field_int_signedness = if (inst_ty.isAbiInt(zcu))
inst_ty.intInfo(zcu).signedness
@@ -8573,8 +8573,7 @@ const Vectorize = struct {
}
};
-fn lowersToArray(ty: Type, pt: Zcu.PerThread) bool {
- const zcu = pt.zcu;
+fn lowersToArray(ty: Type, zcu: *Zcu) bool {
return switch (ty.zigTypeTag(zcu)) {
.array, .vector => return true,
else => return ty.isAbiInt(zcu) and toCIntBits(@as(u32, @intCast(ty.bitSize(zcu)))) == null,
src/codegen/llvm.zig
@@ -20,6 +20,7 @@ const Package = @import("../Package.zig");
const Air = @import("../Air.zig");
const Value = @import("../Value.zig");
const Type = @import("../Type.zig");
+const codegen = @import("../codegen.zig");
const x86_64_abi = @import("../arch/x86_64/abi.zig");
const wasm_c_abi = @import("wasm/abi.zig");
const aarch64_c_abi = @import("aarch64/abi.zig");
@@ -1131,7 +1132,7 @@ pub const Object = struct {
pt: Zcu.PerThread,
func_index: InternPool.Index,
air: *const Air,
- liveness: *const Air.Liveness,
+ liveness: *const ?Air.Liveness,
) !void {
const zcu = pt.zcu;
const comp = zcu.comp;
@@ -1489,7 +1490,7 @@ pub const Object = struct {
var fg: FuncGen = .{
.gpa = gpa,
.air = air.*,
- .liveness = liveness.*,
+ .liveness = liveness.*.?,
.ng = &ng,
.wip = wip,
.is_naked = fn_info.cc == .naked,
@@ -4210,7 +4211,7 @@ pub const Object = struct {
.eu_payload => |eu_ptr| try o.lowerPtr(
pt,
eu_ptr,
- offset + @import("../codegen.zig").errUnionPayloadOffset(
+ offset + codegen.errUnionPayloadOffset(
Value.fromInterned(eu_ptr).typeOf(zcu).childType(zcu),
zcu,
),
@@ -6969,7 +6970,7 @@ pub const FuncGen = struct {
.@"struct" => switch (struct_ty.containerLayout(zcu)) {
.@"packed" => {
const struct_type = zcu.typeToStruct(struct_ty).?;
- const bit_offset = pt.structPackedFieldBitOffset(struct_type, field_index);
+ const bit_offset = zcu.structPackedFieldBitOffset(struct_type, field_index);
const containing_int = struct_llvm_val;
const shift_amt =
try o.builder.intValue(containing_int.typeOfWip(&self.wip), bit_offset);
@@ -11364,7 +11365,7 @@ pub const FuncGen = struct {
// We have a pointer to a packed struct field that happens to be byte-aligned.
// Offset our operand pointer by the correct number of bytes.
- const byte_offset = @divExact(pt.structPackedFieldBitOffset(struct_type, field_index) + struct_ptr_ty_info.packed_offset.bit_offset, 8);
+ const byte_offset = @divExact(zcu.structPackedFieldBitOffset(struct_type, field_index) + struct_ptr_ty_info.packed_offset.bit_offset, 8);
if (byte_offset == 0) return struct_ptr;
const usize_ty = try o.lowerType(pt, Type.usize);
const llvm_index = try o.builder.intValue(usize_ty, byte_offset);
src/codegen/spirv.zig
@@ -251,11 +251,11 @@ pub const Object = struct {
pt: Zcu.PerThread,
func_index: InternPool.Index,
air: *const Air,
- liveness: *const Air.Liveness,
+ liveness: *const ?Air.Liveness,
) !void {
const nav = pt.zcu.funcInfo(func_index).owner_nav;
// TODO: Separate types for generating decls and functions?
- try self.genNav(pt, nav, air.*, liveness.*, true);
+ try self.genNav(pt, nav, air.*, liveness.*.?, true);
}
pub fn updateNav(
@@ -5134,7 +5134,7 @@ const NavGen = struct {
.@"struct" => switch (object_ty.containerLayout(zcu)) {
.@"packed" => {
const struct_ty = zcu.typeToPackedStruct(object_ty).?;
- const bit_offset = pt.structPackedFieldBitOffset(struct_ty, field_index);
+ const bit_offset = zcu.structPackedFieldBitOffset(struct_ty, field_index);
const bit_offset_id = try self.constInt(.u16, bit_offset);
const signedness = if (field_ty.isInt(zcu)) field_ty.intInfo(zcu).signedness else .unsigned;
const field_bit_size: u16 = @intCast(field_ty.bitSize(zcu));
src/link/Elf/Atom.zig
@@ -1627,7 +1627,7 @@ const aarch64 = struct {
const S_ = th.targetAddress(target_index, elf_file);
break :blk math.cast(i28, S_ + A - P) orelse return error.Overflow;
};
- aarch64_util.writeBranchImm(disp, code);
+ util.writeBranchImm(disp, code);
},
.PREL32 => {
@@ -1640,15 +1640,18 @@ const aarch64 = struct {
mem.writeInt(u64, code_buffer[r_offset..][0..8], @bitCast(value), .little);
},
+ .ADR_PREL_LO21 => {
+ const value = math.cast(i21, S + A - P) orelse return error.Overflow;
+ util.writeAdrInst(value, code);
+ },
+
.ADR_PREL_PG_HI21 => {
// TODO: check for relaxation of ADRP+ADD
- const pages = @as(u21, @bitCast(try aarch64_util.calcNumberOfPages(P, S + A)));
- aarch64_util.writeAdrpInst(pages, code);
+ util.writeAdrInst(try util.calcNumberOfPages(P, S + A), code);
},
.ADR_GOT_PAGE => if (target.flags.has_got) {
- const pages = @as(u21, @bitCast(try aarch64_util.calcNumberOfPages(P, G + GOT + A)));
- aarch64_util.writeAdrpInst(pages, code);
+ util.writeAdrInst(try util.calcNumberOfPages(P, G + GOT + A), code);
} else {
// TODO: relax
var err = try diags.addErrorWithNotes(1);
@@ -1663,12 +1666,12 @@ const aarch64 = struct {
.LD64_GOT_LO12_NC => {
assert(target.flags.has_got);
const taddr = @as(u64, @intCast(G + GOT + A));
- aarch64_util.writeLoadStoreRegInst(@divExact(@as(u12, @truncate(taddr)), 8), code);
+ util.writeLoadStoreRegInst(@divExact(@as(u12, @truncate(taddr)), 8), code);
},
.ADD_ABS_LO12_NC => {
const taddr = @as(u64, @intCast(S + A));
- aarch64_util.writeAddImmInst(@truncate(taddr), code);
+ util.writeAddImmInst(@truncate(taddr), code);
},
.LDST8_ABS_LO12_NC,
@@ -1687,57 +1690,54 @@ const aarch64 = struct {
.LDST128_ABS_LO12_NC => @divExact(@as(u12, @truncate(taddr)), 16),
else => unreachable,
};
- aarch64_util.writeLoadStoreRegInst(off, code);
+ util.writeLoadStoreRegInst(off, code);
},
.TLSLE_ADD_TPREL_HI12 => {
const value = math.cast(i12, (S + A - TP) >> 12) orelse
return error.Overflow;
- aarch64_util.writeAddImmInst(@bitCast(value), code);
+ util.writeAddImmInst(@bitCast(value), code);
},
.TLSLE_ADD_TPREL_LO12_NC => {
const value: i12 = @truncate(S + A - TP);
- aarch64_util.writeAddImmInst(@bitCast(value), code);
+ util.writeAddImmInst(@bitCast(value), code);
},
.TLSIE_ADR_GOTTPREL_PAGE21 => {
const S_ = target.gotTpAddress(elf_file);
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
- const pages: u21 = @bitCast(try aarch64_util.calcNumberOfPages(P, S_ + A));
- aarch64_util.writeAdrpInst(pages, code);
+ util.writeAdrInst(try util.calcNumberOfPages(P, S_ + A), code);
},
.TLSIE_LD64_GOTTPREL_LO12_NC => {
const S_ = target.gotTpAddress(elf_file);
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
const off: u12 = try math.divExact(u12, @truncate(@as(u64, @bitCast(S_ + A))), 8);
- aarch64_util.writeLoadStoreRegInst(off, code);
+ util.writeLoadStoreRegInst(off, code);
},
.TLSGD_ADR_PAGE21 => {
const S_ = target.tlsGdAddress(elf_file);
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
- const pages: u21 = @bitCast(try aarch64_util.calcNumberOfPages(P, S_ + A));
- aarch64_util.writeAdrpInst(pages, code);
+ util.writeAdrInst(try util.calcNumberOfPages(P, S_ + A), code);
},
.TLSGD_ADD_LO12_NC => {
const S_ = target.tlsGdAddress(elf_file);
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
const off: u12 = @truncate(@as(u64, @bitCast(S_ + A)));
- aarch64_util.writeAddImmInst(off, code);
+ util.writeAddImmInst(off, code);
},
.TLSDESC_ADR_PAGE21 => {
if (target.flags.has_tlsdesc) {
const S_ = target.tlsDescAddress(elf_file);
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
- const pages: u21 = @bitCast(try aarch64_util.calcNumberOfPages(P, S_ + A));
- aarch64_util.writeAdrpInst(pages, code);
+ util.writeAdrInst(try util.calcNumberOfPages(P, S_ + A), code);
} else {
relocs_log.debug(" relaxing adrp => nop", .{});
- mem.writeInt(u32, code, Instruction.nop().toU32(), .little);
+ util.encoding.Instruction.nop().write(code);
}
},
@@ -1746,10 +1746,10 @@ const aarch64 = struct {
const S_ = target.tlsDescAddress(elf_file);
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
const off: u12 = try math.divExact(u12, @truncate(@as(u64, @bitCast(S_ + A))), 8);
- aarch64_util.writeLoadStoreRegInst(off, code);
+ util.writeLoadStoreRegInst(off, code);
} else {
relocs_log.debug(" relaxing ldr => nop", .{});
- mem.writeInt(u32, code, Instruction.nop().toU32(), .little);
+ util.encoding.Instruction.nop().write(code);
}
},
@@ -1758,32 +1758,18 @@ const aarch64 = struct {
const S_ = target.tlsDescAddress(elf_file);
relocs_log.debug(" [{x} => {x}]", .{ P, S_ + A });
const off: u12 = @truncate(@as(u64, @bitCast(S_ + A)));
- aarch64_util.writeAddImmInst(off, code);
+ util.writeAddImmInst(off, code);
} else {
- const old_inst: Instruction = .{
- .add_subtract_immediate = mem.bytesToValue(@FieldType(
- Instruction,
- @tagName(Instruction.add_subtract_immediate),
- ), code),
- };
- const rd: Register = @enumFromInt(old_inst.add_subtract_immediate.rd);
- relocs_log.debug(" relaxing add({s}) => movz(x0, {x})", .{ @tagName(rd), S + A - TP });
+ relocs_log.debug(" relaxing add => movz(x0, {x})", .{S + A - TP});
const value: u16 = @bitCast(math.cast(i16, (S + A - TP) >> 16) orelse return error.Overflow);
- mem.writeInt(u32, code, Instruction.movz(.x0, value, 16).toU32(), .little);
+ util.encoding.Instruction.movz(.x0, value, .{ .lsl = .@"16" }).write(code);
}
},
.TLSDESC_CALL => if (!target.flags.has_tlsdesc) {
- const old_inst: Instruction = .{
- .unconditional_branch_register = mem.bytesToValue(@FieldType(
- Instruction,
- @tagName(Instruction.unconditional_branch_register),
- ), code),
- };
- const rn: Register = @enumFromInt(old_inst.unconditional_branch_register.rn);
- relocs_log.debug(" relaxing br({s}) => movk(x0, {x})", .{ @tagName(rn), S + A - TP });
+ relocs_log.debug(" relaxing br => movk(x0, {x})", .{S + A - TP});
const value: u16 = @bitCast(@as(i16, @truncate(S + A - TP)));
- mem.writeInt(u32, code, Instruction.movk(.x0, value, 0).toU32(), .little);
+ util.encoding.Instruction.movk(.x0, value, .{}).write(code);
},
else => try atom.reportUnhandledRelocError(rel, elf_file),
@@ -1819,9 +1805,7 @@ const aarch64 = struct {
}
}
- const aarch64_util = @import("../aarch64.zig");
- const Instruction = aarch64_util.Instruction;
- const Register = aarch64_util.Register;
+ const util = @import("../aarch64.zig");
};
const riscv = struct {
src/link/Elf/relocation.zig
@@ -94,14 +94,18 @@ pub fn encode(comptime kind: Kind, cpu_arch: std.Target.Cpu.Arch) u32 {
pub const dwarf = struct {
pub fn crossSectionRelocType(format: DW.Format, cpu_arch: std.Target.Cpu.Arch) u32 {
return switch (cpu_arch) {
- .x86_64 => @intFromEnum(switch (format) {
- .@"32" => elf.R_X86_64.@"32",
+ .x86_64 => @intFromEnum(@as(elf.R_X86_64, switch (format) {
+ .@"32" => .@"32",
.@"64" => .@"64",
- }),
- .riscv64 => @intFromEnum(switch (format) {
- .@"32" => elf.R_RISCV.@"32",
+ })),
+ .aarch64 => @intFromEnum(@as(elf.R_AARCH64, switch (format) {
+ .@"32" => .ABS32,
+ .@"64" => .ABS64,
+ })),
+ .riscv64 => @intFromEnum(@as(elf.R_RISCV, switch (format) {
+ .@"32" => .@"32",
.@"64" => .@"64",
- }),
+ })),
else => @panic("TODO unhandled cpu arch"),
};
}
@@ -121,6 +125,14 @@ pub const dwarf = struct {
},
.debug_frame => .PC32,
})),
+ .aarch64 => @intFromEnum(@as(elf.R_AARCH64, switch (source_section) {
+ else => switch (address_size) {
+ .@"32" => .ABS32,
+ .@"64" => .ABS64,
+ else => unreachable,
+ },
+ .debug_frame => .PREL32,
+ })),
.riscv64 => @intFromEnum(@as(elf.R_RISCV, switch (source_section) {
else => switch (address_size) {
.@"32" => .@"32",
src/link/Elf/synthetic_sections.zig
@@ -810,54 +810,43 @@ pub const PltSection = struct {
const got_plt_addr: i64 = @intCast(shdrs[elf_file.section_indexes.got_plt.?].sh_addr);
// TODO: relax if possible
// .got.plt[2]
- const pages = try aarch64_util.calcNumberOfPages(plt_addr + 4, got_plt_addr + 16);
- const ldr_off = try math.divExact(u12, @truncate(@as(u64, @bitCast(got_plt_addr + 16))), 8);
+ const pages = try util.calcNumberOfPages(plt_addr + 4, got_plt_addr + 16);
+ const ldr_off: u12 = @truncate(@as(u64, @bitCast(got_plt_addr + 16)));
const add_off: u12 = @truncate(@as(u64, @bitCast(got_plt_addr + 16)));
- const preamble = &[_]Instruction{
- Instruction.stp(
- .x16,
- .x30,
- Register.sp,
- Instruction.LoadStorePairOffset.pre_index(-16),
- ),
- Instruction.adrp(.x16, pages),
- Instruction.ldr(.x17, .x16, Instruction.LoadStoreOffset.imm(ldr_off)),
- Instruction.add(.x16, .x16, add_off, false),
- Instruction.br(.x17),
- Instruction.nop(),
- Instruction.nop(),
- Instruction.nop(),
+ const preamble = [_]util.encoding.Instruction{
+ .stp(.x16, .x30, .{ .pre_index = .{ .base = .sp, .index = -16 } }),
+ .adrp(.x16, pages << 12),
+ .ldr(.x17, .{ .unsigned_offset = .{ .base = .x16, .offset = ldr_off } }),
+ .add(.x16, .x16, .{ .immediate = add_off }),
+ .br(.x17),
+ .nop(),
+ .nop(),
+ .nop(),
};
comptime assert(preamble.len == 8);
- for (preamble) |inst| {
- try writer.writeInt(u32, inst.toU32(), .little);
- }
+ for (preamble) |inst| try writer.writeInt(util.encoding.Instruction.Backing, @bitCast(inst), .little);
}
for (plt.symbols.items) |ref| {
const sym = elf_file.symbol(ref).?;
const target_addr = sym.gotPltAddress(elf_file);
const source_addr = sym.pltAddress(elf_file);
- const pages = try aarch64_util.calcNumberOfPages(source_addr, target_addr);
- const ldr_off = try math.divExact(u12, @truncate(@as(u64, @bitCast(target_addr))), 8);
+ const pages = try util.calcNumberOfPages(source_addr, target_addr);
+ const ldr_off: u12 = @truncate(@as(u64, @bitCast(target_addr)));
const add_off: u12 = @truncate(@as(u64, @bitCast(target_addr)));
- const insts = &[_]Instruction{
- Instruction.adrp(.x16, pages),
- Instruction.ldr(.x17, .x16, Instruction.LoadStoreOffset.imm(ldr_off)),
- Instruction.add(.x16, .x16, add_off, false),
- Instruction.br(.x17),
+ const insts = [_]util.encoding.Instruction{
+ .adrp(.x16, pages << 12),
+ .ldr(.x17, .{ .unsigned_offset = .{ .base = .x16, .offset = ldr_off } }),
+ .add(.x16, .x16, .{ .immediate = add_off }),
+ .br(.x17),
};
comptime assert(insts.len == 4);
- for (insts) |inst| {
- try writer.writeInt(u32, inst.toU32(), .little);
- }
+ for (insts) |inst| try writer.writeInt(util.encoding.Instruction.Backing, @bitCast(inst), .little);
}
}
- const aarch64_util = @import("../aarch64.zig");
- const Instruction = aarch64_util.Instruction;
- const Register = aarch64_util.Register;
+ const util = @import("../aarch64.zig");
};
};
@@ -979,24 +968,20 @@ pub const PltGotSection = struct {
const sym = elf_file.symbol(ref).?;
const target_addr = sym.gotAddress(elf_file);
const source_addr = sym.pltGotAddress(elf_file);
- const pages = try aarch64_util.calcNumberOfPages(source_addr, target_addr);
- const off = try math.divExact(u12, @truncate(@as(u64, @bitCast(target_addr))), 8);
- const insts = &[_]Instruction{
- Instruction.adrp(.x16, pages),
- Instruction.ldr(.x17, .x16, Instruction.LoadStoreOffset.imm(off)),
- Instruction.br(.x17),
- Instruction.nop(),
+ const pages = try util.calcNumberOfPages(source_addr, target_addr);
+ const off: u12 = @truncate(@as(u64, @bitCast(target_addr)));
+ const insts = [_]util.encoding.Instruction{
+ .adrp(.x16, pages << 12),
+ .ldr(.x17, .{ .unsigned_offset = .{ .base = .x16, .offset = off } }),
+ .br(.x17),
+ .nop(),
};
comptime assert(insts.len == 4);
- for (insts) |inst| {
- try writer.writeInt(u32, inst.toU32(), .little);
- }
+ for (insts) |inst| try writer.writeInt(util.encoding.Instruction.Backing, @bitCast(inst), .little);
}
}
- const aarch64_util = @import("../aarch64.zig");
- const Instruction = aarch64_util.Instruction;
- const Register = aarch64_util.Register;
+ const util = @import("../aarch64.zig");
};
};
src/link/Elf/Thunk.zig
@@ -95,18 +95,21 @@ const aarch64 = struct {
const sym = elf_file.symbol(ref).?;
const saddr = thunk.address(elf_file) + @as(i64, @intCast(i * trampoline_size));
const taddr = sym.address(.{}, elf_file);
- const pages = try util.calcNumberOfPages(saddr, taddr);
- try writer.writeInt(u32, Instruction.adrp(.x16, pages).toU32(), .little);
- const off: u12 = @truncate(@as(u64, @bitCast(taddr)));
- try writer.writeInt(u32, Instruction.add(.x16, .x16, off, false).toU32(), .little);
- try writer.writeInt(u32, Instruction.br(.x16).toU32(), .little);
+ try writer.writeInt(u32, @bitCast(
+ util.encoding.Instruction.adrp(.x16, try util.calcNumberOfPages(saddr, taddr) << 12),
+ ), .little);
+ try writer.writeInt(u32, @bitCast(util.encoding.Instruction.add(
+ .x16,
+ .x16,
+ .{ .immediate = @truncate(@as(u64, @bitCast(taddr))) },
+ )), .little);
+ try writer.writeInt(u32, @bitCast(util.encoding.Instruction.br(.x16)), .little);
}
}
const trampoline_size = 3 * @sizeOf(u32);
const util = @import("../aarch64.zig");
- const Instruction = util.Instruction;
};
const assert = std.debug.assert;
src/link/Elf/ZigObject.zig
@@ -1270,9 +1270,13 @@ fn updateNavCode(
log.debug("updateNavCode {f}({d})", .{ nav.fqn.fmt(ip), nav_index });
- const target = &zcu.navFileScope(nav_index).mod.?.resolved_target.result;
- const required_alignment = switch (pt.navAlignment(nav_index)) {
- .none => target_util.defaultFunctionAlignment(target),
+ const mod = zcu.navFileScope(nav_index).mod.?;
+ const target = &mod.resolved_target.result;
+ const required_alignment = switch (nav.status.fully_resolved.alignment) {
+ .none => switch (mod.optimize_mode) {
+ .Debug, .ReleaseSafe, .ReleaseFast => target_util.defaultFunctionAlignment(target),
+ .ReleaseSmall => target_util.minFunctionAlignment(target),
+ },
else => |a| a.maxStrict(target_util.minFunctionAlignment(target)),
};
src/link/MachO/Atom.zig
@@ -780,8 +780,7 @@ fn resolveRelocInner(
};
break :target math.cast(u64, target) orelse return error.Overflow;
};
- const pages = @as(u21, @bitCast(try aarch64.calcNumberOfPages(@intCast(source), @intCast(target))));
- aarch64.writeAdrpInst(pages, code[rel_offset..][0..4]);
+ aarch64.writeAdrInst(try aarch64.calcNumberOfPages(@intCast(source), @intCast(target)), code[rel_offset..][0..aarch64.encoding.Instruction.size]);
},
.pageoff => {
@@ -789,26 +788,18 @@ fn resolveRelocInner(
assert(rel.meta.length == 2);
assert(!rel.meta.pcrel);
const target = math.cast(u64, S + A) orelse return error.Overflow;
- const inst_code = code[rel_offset..][0..4];
- if (aarch64.isArithmeticOp(inst_code)) {
- aarch64.writeAddImmInst(@truncate(target), inst_code);
- } else {
- var inst = aarch64.Instruction{
- .load_store_register = mem.bytesToValue(@FieldType(
- aarch64.Instruction,
- @tagName(aarch64.Instruction.load_store_register),
- ), inst_code),
- };
- inst.load_store_register.offset = switch (inst.load_store_register.size) {
- 0 => if (inst.load_store_register.v == 1)
- try divExact(self, rel, @truncate(target), 16, macho_file)
- else
- @truncate(target),
- 1 => try divExact(self, rel, @truncate(target), 2, macho_file),
- 2 => try divExact(self, rel, @truncate(target), 4, macho_file),
- 3 => try divExact(self, rel, @truncate(target), 8, macho_file),
- };
- try writer.writeInt(u32, inst.toU32(), .little);
+ const inst_code = code[rel_offset..][0..aarch64.encoding.Instruction.size];
+ var inst: aarch64.encoding.Instruction = .read(inst_code);
+ switch (inst.decode()) {
+ else => unreachable,
+ .data_processing_immediate => aarch64.writeAddImmInst(@truncate(target), inst_code),
+ .load_store => |load_store| {
+ inst.load_store.register_unsigned_immediate.group.imm12 = switch (load_store.register_unsigned_immediate.decode()) {
+ .integer => |integer| try divExact(self, rel, @truncate(target), @as(u4, 1) << @intFromEnum(integer.group.size), macho_file),
+ .vector => |vector| try divExact(self, rel, @truncate(target), @as(u5, 1) << @intFromEnum(vector.group.opc1.decode(vector.group.size)), macho_file),
+ };
+ try writer.writeInt(u32, @bitCast(inst), .little);
+ },
}
},
@@ -834,59 +825,26 @@ fn resolveRelocInner(
break :target math.cast(u64, target) orelse return error.Overflow;
};
- const RegInfo = struct {
- rd: u5,
- rn: u5,
- size: u2,
- };
-
const inst_code = code[rel_offset..][0..4];
- const reg_info: RegInfo = blk: {
- if (aarch64.isArithmeticOp(inst_code)) {
- const inst = mem.bytesToValue(@FieldType(
- aarch64.Instruction,
- @tagName(aarch64.Instruction.add_subtract_immediate),
- ), inst_code);
- break :blk .{
- .rd = inst.rd,
- .rn = inst.rn,
- .size = inst.sf,
- };
- } else {
- const inst = mem.bytesToValue(@FieldType(
- aarch64.Instruction,
- @tagName(aarch64.Instruction.load_store_register),
- ), inst_code);
- break :blk .{
- .rd = inst.rt,
- .rn = inst.rn,
- .size = inst.size,
- };
- }
- };
-
- var inst = if (sym.getSectionFlags().tlv_ptr) aarch64.Instruction{
- .load_store_register = .{
- .rt = reg_info.rd,
- .rn = reg_info.rn,
- .offset = try divExact(self, rel, @truncate(target), 8, macho_file),
- .opc = 0b01,
- .op1 = 0b01,
- .v = 0,
- .size = reg_info.size,
+ const rd, const rn = switch (aarch64.encoding.Instruction.read(inst_code).decode()) {
+ else => unreachable,
+ .data_processing_immediate => |decoded| .{
+ decoded.add_subtract_immediate.group.Rd.decodeInteger(.doubleword, .{ .sp = true }),
+ decoded.add_subtract_immediate.group.Rn.decodeInteger(.doubleword, .{ .sp = true }),
},
- } else aarch64.Instruction{
- .add_subtract_immediate = .{
- .rd = reg_info.rd,
- .rn = reg_info.rn,
- .imm12 = @truncate(target),
- .sh = 0,
- .s = 0,
- .op = 0,
- .sf = @as(u1, @truncate(reg_info.size)),
+ .load_store => |decoded| .{
+ decoded.register_unsigned_immediate.integer.group.Rt.decodeInteger(.doubleword, .{}),
+ decoded.register_unsigned_immediate.group.Rn.decodeInteger(.doubleword, .{ .sp = true }),
},
};
- try writer.writeInt(u32, inst.toU32(), .little);
+
+ try writer.writeInt(u32, @bitCast(@as(
+ aarch64.encoding.Instruction,
+ if (sym.getSectionFlags().tlv_ptr) .ldr(rd, .{ .unsigned_offset = .{
+ .base = rn,
+ .offset = try divExact(self, rel, @truncate(target), 8, macho_file) * 8,
+ } }) else .add(rd, rn, .{ .immediate = @truncate(target) }),
+ )), .little);
},
}
}
src/link/MachO/synthetic.zig
@@ -105,16 +105,15 @@ pub const StubsSection = struct {
try writer.writeInt(i32, @intCast(target - source - 2 - 4), .little);
},
.aarch64 => {
+ const Instruction = aarch64.encoding.Instruction;
// TODO relax if possible
const pages = try aarch64.calcNumberOfPages(@intCast(source), @intCast(target));
- try writer.writeInt(u32, aarch64.Instruction.adrp(.x16, pages).toU32(), .little);
- const off = try math.divExact(u12, @truncate(target), 8);
- try writer.writeInt(
- u32,
- aarch64.Instruction.ldr(.x16, .x16, aarch64.Instruction.LoadStoreOffset.imm(off)).toU32(),
- .little,
- );
- try writer.writeInt(u32, aarch64.Instruction.br(.x16).toU32(), .little);
+ try writer.writeInt(u32, @bitCast(Instruction.adrp(.x16, pages << 12)), .little);
+ try writer.writeInt(u32, @bitCast(Instruction.ldr(
+ .x16,
+ .{ .unsigned_offset = .{ .base = .x16, .offset = @as(u12, @truncate(target)) } },
+ )), .little);
+ try writer.writeInt(u32, @bitCast(Instruction.br(.x16)), .little);
},
else => unreachable,
}
@@ -201,18 +200,16 @@ pub const StubsHelperSection = struct {
try writer.writeInt(i32, @intCast(target - source - 6 - 4), .little);
},
.aarch64 => {
- const literal = blk: {
- const div_res = try std.math.divExact(u64, entry_size - @sizeOf(u32), 4);
- break :blk std.math.cast(u18, div_res) orelse return error.Overflow;
- };
- try writer.writeInt(u32, aarch64.Instruction.ldrLiteral(
- .w16,
- literal,
- ).toU32(), .little);
+ const Instruction = aarch64.encoding.Instruction;
+ if (entry_size % Instruction.size != 0) return error.UnexpectedRemainder;
+ try writer.writeInt(u32, @bitCast(
+ Instruction.ldr(.w16, .{ .literal = std.math.cast(i21, entry_size - Instruction.size) orelse
+ return error.Overflow }),
+ ), .little);
const disp = math.cast(i28, @as(i64, @intCast(target)) - @as(i64, @intCast(source + 4))) orelse
return error.Overflow;
- try writer.writeInt(u32, aarch64.Instruction.b(disp).toU32(), .little);
- try writer.writeAll(&.{ 0x0, 0x0, 0x0, 0x0 });
+ try writer.writeInt(u32, @bitCast(Instruction.b(disp)), .little);
+ try writer.writeInt(u32, @bitCast(Instruction.udf(0x0)), .little);
},
else => unreachable,
}
@@ -242,31 +239,28 @@ pub const StubsHelperSection = struct {
try writer.writeByte(0x90);
},
.aarch64 => {
+ const Instruction = aarch64.encoding.Instruction;
{
// TODO relax if possible
const pages = try aarch64.calcNumberOfPages(@intCast(sect.addr), @intCast(dyld_private_addr));
- try writer.writeInt(u32, aarch64.Instruction.adrp(.x17, pages).toU32(), .little);
- const off: u12 = @truncate(dyld_private_addr);
- try writer.writeInt(u32, aarch64.Instruction.add(.x17, .x17, off, false).toU32(), .little);
+ try writer.writeInt(Instruction.Backing, @bitCast(Instruction.adrp(.x17, pages << 12)), .little);
+ try writer.writeInt(Instruction.Backing, @bitCast(
+ Instruction.add(.x17, .x17, .{ .immediate = @as(u12, @truncate(dyld_private_addr)) }),
+ ), .little);
}
- try writer.writeInt(u32, aarch64.Instruction.stp(
- .x16,
- .x17,
- aarch64.Register.sp,
- aarch64.Instruction.LoadStorePairOffset.pre_index(-16),
- ).toU32(), .little);
+ try writer.writeInt(Instruction.Backing, @bitCast(
+ Instruction.stp(.x16, .x17, .{ .pre_index = .{ .base = .sp, .index = -16 } }),
+ ), .little);
{
// TODO relax if possible
const pages = try aarch64.calcNumberOfPages(@intCast(sect.addr + 12), @intCast(dyld_stub_binder_addr));
- try writer.writeInt(u32, aarch64.Instruction.adrp(.x16, pages).toU32(), .little);
- const off = try math.divExact(u12, @truncate(dyld_stub_binder_addr), 8);
- try writer.writeInt(u32, aarch64.Instruction.ldr(
- .x16,
+ try writer.writeInt(Instruction.Backing, @bitCast(Instruction.adrp(.x16, pages << 12)), .little);
+ try writer.writeInt(Instruction.Backing, @bitCast(Instruction.ldr(
.x16,
- aarch64.Instruction.LoadStoreOffset.imm(off),
- ).toU32(), .little);
+ .{ .unsigned_offset = .{ .base = .x16, .offset = @as(u12, @truncate(dyld_stub_binder_addr)) } },
+ )), .little);
}
- try writer.writeInt(u32, aarch64.Instruction.br(.x16).toU32(), .little);
+ try writer.writeInt(Instruction.Backing, @bitCast(Instruction.br(.x16)), .little);
},
else => unreachable,
}
@@ -426,35 +420,32 @@ pub const ObjcStubsSection = struct {
}
},
.aarch64 => {
+ const Instruction = aarch64.encoding.Instruction;
{
const target = sym.getObjcSelrefsAddress(macho_file);
const source = addr;
const pages = try aarch64.calcNumberOfPages(@intCast(source), @intCast(target));
- try writer.writeInt(u32, aarch64.Instruction.adrp(.x1, pages).toU32(), .little);
- const off = try math.divExact(u12, @truncate(target), 8);
- try writer.writeInt(
- u32,
- aarch64.Instruction.ldr(.x1, .x1, aarch64.Instruction.LoadStoreOffset.imm(off)).toU32(),
- .little,
- );
+ try writer.writeInt(u32, @bitCast(Instruction.adrp(.x1, pages << 12)), .little);
+ try writer.writeInt(u32, @bitCast(Instruction.ldr(
+ .x1,
+ .{ .unsigned_offset = .{ .base = .x1, .offset = @as(u12, @truncate(target)) } },
+ )), .little);
}
{
const target_sym = obj.getObjcMsgSendRef(macho_file).?.getSymbol(macho_file).?;
const target = target_sym.getGotAddress(macho_file);
const source = addr + 2 * @sizeOf(u32);
const pages = try aarch64.calcNumberOfPages(@intCast(source), @intCast(target));
- try writer.writeInt(u32, aarch64.Instruction.adrp(.x16, pages).toU32(), .little);
- const off = try math.divExact(u12, @truncate(target), 8);
- try writer.writeInt(
- u32,
- aarch64.Instruction.ldr(.x16, .x16, aarch64.Instruction.LoadStoreOffset.imm(off)).toU32(),
- .little,
- );
+ try writer.writeInt(u32, @bitCast(Instruction.adrp(.x16, pages << 12)), .little);
+ try writer.writeInt(u32, @bitCast(Instruction.ldr(
+ .x16,
+ .{ .unsigned_offset = .{ .base = .x16, .offset = @as(u12, @truncate(target)) } },
+ )), .little);
}
- try writer.writeInt(u32, aarch64.Instruction.br(.x16).toU32(), .little);
- try writer.writeInt(u32, aarch64.Instruction.brk(1).toU32(), .little);
- try writer.writeInt(u32, aarch64.Instruction.brk(1).toU32(), .little);
- try writer.writeInt(u32, aarch64.Instruction.brk(1).toU32(), .little);
+ try writer.writeInt(u32, @bitCast(Instruction.br(.x16)), .little);
+ try writer.writeInt(u32, @bitCast(Instruction.brk(0x1)), .little);
+ try writer.writeInt(u32, @bitCast(Instruction.brk(0x1)), .little);
+ try writer.writeInt(u32, @bitCast(Instruction.brk(0x1)), .little);
},
else => unreachable,
}
src/link/MachO/Thunk.zig
@@ -21,15 +21,17 @@ pub fn getTargetAddress(thunk: Thunk, ref: MachO.Ref, macho_file: *MachO) u64 {
}
pub fn write(thunk: Thunk, macho_file: *MachO, writer: anytype) !void {
+ const Instruction = aarch64.encoding.Instruction;
for (thunk.symbols.keys(), 0..) |ref, i| {
const sym = ref.getSymbol(macho_file).?;
const saddr = thunk.getAddress(macho_file) + i * trampoline_size;
const taddr = sym.getAddress(.{}, macho_file);
const pages = try aarch64.calcNumberOfPages(@intCast(saddr), @intCast(taddr));
- try writer.writeInt(u32, aarch64.Instruction.adrp(.x16, pages).toU32(), .little);
- const off: u12 = @truncate(taddr);
- try writer.writeInt(u32, aarch64.Instruction.add(.x16, .x16, off, false).toU32(), .little);
- try writer.writeInt(u32, aarch64.Instruction.br(.x16).toU32(), .little);
+ try writer.writeInt(u32, @bitCast(Instruction.adrp(.x16, pages << 12)), .little);
+ try writer.writeInt(u32, @bitCast(
+ Instruction.add(.x16, .x16, .{ .immediate = @truncate(taddr) }),
+ ), .little);
+ try writer.writeInt(u32, @bitCast(Instruction.br(.x16)), .little);
}
}
src/link/MachO/ZigObject.zig
@@ -945,9 +945,13 @@ fn updateNavCode(
log.debug("updateNavCode {f} 0x{x}", .{ nav.fqn.fmt(ip), nav_index });
- const target = &zcu.navFileScope(nav_index).mod.?.resolved_target.result;
- const required_alignment = switch (pt.navAlignment(nav_index)) {
- .none => target_util.defaultFunctionAlignment(target),
+ const mod = zcu.navFileScope(nav_index).mod.?;
+ const target = &mod.resolved_target.result;
+ const required_alignment = switch (nav.status.fully_resolved.alignment) {
+ .none => switch (mod.optimize_mode) {
+ .Debug, .ReleaseSafe, .ReleaseFast => target_util.defaultFunctionAlignment(target),
+ .ReleaseSmall => target_util.minFunctionAlignment(target),
+ },
else => |a| a.maxStrict(target_util.minFunctionAlignment(target)),
};
src/link/aarch64.zig
@@ -1,66 +1,36 @@
-pub inline fn isArithmeticOp(inst: *const [4]u8) bool {
- const group_decode = @as(u5, @truncate(inst[3]));
- return ((group_decode >> 2) == 4);
-}
+pub const encoding = @import("../codegen.zig").aarch64.encoding;
pub fn writeAddImmInst(value: u12, code: *[4]u8) void {
- var inst = Instruction{
- .add_subtract_immediate = mem.bytesToValue(@FieldType(
- Instruction,
- @tagName(Instruction.add_subtract_immediate),
- ), code),
- };
- inst.add_subtract_immediate.imm12 = value;
- mem.writeInt(u32, code, inst.toU32(), .little);
+ var inst: encoding.Instruction = .read(code);
+ inst.data_processing_immediate.add_subtract_immediate.group.imm12 = value;
+ inst.write(code);
}
pub fn writeLoadStoreRegInst(value: u12, code: *[4]u8) void {
- var inst: Instruction = .{
- .load_store_register = mem.bytesToValue(@FieldType(
- Instruction,
- @tagName(Instruction.load_store_register),
- ), code),
- };
- inst.load_store_register.offset = value;
- mem.writeInt(u32, code, inst.toU32(), .little);
+ var inst: encoding.Instruction = .read(code);
+ inst.load_store.register_unsigned_immediate.group.imm12 = value;
+ inst.write(code);
}
-pub fn calcNumberOfPages(saddr: i64, taddr: i64) error{Overflow}!i21 {
- const spage = math.cast(i32, saddr >> 12) orelse return error.Overflow;
- const tpage = math.cast(i32, taddr >> 12) orelse return error.Overflow;
- const pages = math.cast(i21, tpage - spage) orelse return error.Overflow;
- return pages;
+pub fn calcNumberOfPages(saddr: i64, taddr: i64) error{Overflow}!i33 {
+ return math.cast(i21, (taddr >> 12) - (saddr >> 12)) orelse error.Overflow;
}
-pub fn writeAdrpInst(pages: u21, code: *[4]u8) void {
- var inst = Instruction{
- .pc_relative_address = mem.bytesToValue(@FieldType(
- Instruction,
- @tagName(Instruction.pc_relative_address),
- ), code),
- };
- inst.pc_relative_address.immhi = @as(u19, @truncate(pages >> 2));
- inst.pc_relative_address.immlo = @as(u2, @truncate(pages));
- mem.writeInt(u32, code, inst.toU32(), .little);
+pub fn writeAdrInst(imm: i33, code: *[4]u8) void {
+ var inst: encoding.Instruction = .read(code);
+ inst.data_processing_immediate.pc_relative_addressing.group.immhi = @intCast(imm >> 2);
+ inst.data_processing_immediate.pc_relative_addressing.group.immlo = @bitCast(@as(i2, @truncate(imm)));
+ inst.write(code);
}
pub fn writeBranchImm(disp: i28, code: *[4]u8) void {
- var inst = Instruction{
- .unconditional_branch_immediate = mem.bytesToValue(@FieldType(
- Instruction,
- @tagName(Instruction.unconditional_branch_immediate),
- ), code),
- };
- inst.unconditional_branch_immediate.imm26 = @as(u26, @truncate(@as(u28, @bitCast(disp >> 2))));
- mem.writeInt(u32, code, inst.toU32(), .little);
+ var inst: encoding.Instruction = .read(code);
+ inst.branch_exception_generating_system.unconditional_branch_immediate.group.imm26 = @intCast(@shrExact(disp, 2));
+ inst.write(code);
}
const assert = std.debug.assert;
-const bits = @import("../arch/aarch64/bits.zig");
const builtin = @import("builtin");
const math = std.math;
const mem = std.mem;
const std = @import("std");
-
-pub const Instruction = bits.Instruction;
-pub const Register = bits.Register;
src/link/Coff.zig
@@ -1335,9 +1335,13 @@ fn updateNavCode(
log.debug("updateNavCode {f} 0x{x}", .{ nav.fqn.fmt(ip), nav_index });
- const target = &zcu.navFileScope(nav_index).mod.?.resolved_target.result;
- const required_alignment = switch (pt.navAlignment(nav_index)) {
- .none => target_util.defaultFunctionAlignment(target),
+ const mod = zcu.navFileScope(nav_index).mod.?;
+ const target = &mod.resolved_target.result;
+ const required_alignment = switch (nav.status.fully_resolved.alignment) {
+ .none => switch (mod.optimize_mode) {
+ .Debug, .ReleaseSafe, .ReleaseFast => target_util.defaultFunctionAlignment(target),
+ .ReleaseSmall => target_util.minFunctionAlignment(target),
+ },
else => |a| a.maxStrict(target_util.minFunctionAlignment(target)),
};
@@ -2832,58 +2836,33 @@ pub const Relocation = struct {
};
fn resolveAarch64(reloc: Relocation, ctx: Context) void {
+ const Instruction = aarch64_util.encoding.Instruction;
var buffer = ctx.code[reloc.offset..];
switch (reloc.type) {
.got_page, .import_page, .page => {
const source_page = @as(i32, @intCast(ctx.source_vaddr >> 12));
const target_page = @as(i32, @intCast(ctx.target_vaddr >> 12));
- const pages = @as(u21, @bitCast(@as(i21, @intCast(target_page - source_page))));
- var inst = aarch64_util.Instruction{
- .pc_relative_address = mem.bytesToValue(@FieldType(
- aarch64_util.Instruction,
- @tagName(aarch64_util.Instruction.pc_relative_address),
- ), buffer[0..4]),
- };
- inst.pc_relative_address.immhi = @as(u19, @truncate(pages >> 2));
- inst.pc_relative_address.immlo = @as(u2, @truncate(pages));
- mem.writeInt(u32, buffer[0..4], inst.toU32(), .little);
+ const pages: i21 = @intCast(target_page - source_page);
+ var inst: Instruction = .read(buffer[0..Instruction.size]);
+ inst.data_processing_immediate.pc_relative_addressing.group.immhi = @intCast(pages >> 2);
+ inst.data_processing_immediate.pc_relative_addressing.group.immlo = @truncate(@as(u21, @bitCast(pages)));
+ inst.write(buffer[0..Instruction.size]);
},
.got_pageoff, .import_pageoff, .pageoff => {
assert(!reloc.pcrel);
- const narrowed = @as(u12, @truncate(@as(u64, @intCast(ctx.target_vaddr))));
- if (isArithmeticOp(buffer[0..4])) {
- var inst = aarch64_util.Instruction{
- .add_subtract_immediate = mem.bytesToValue(@FieldType(
- aarch64_util.Instruction,
- @tagName(aarch64_util.Instruction.add_subtract_immediate),
- ), buffer[0..4]),
- };
- inst.add_subtract_immediate.imm12 = narrowed;
- mem.writeInt(u32, buffer[0..4], inst.toU32(), .little);
- } else {
- var inst = aarch64_util.Instruction{
- .load_store_register = mem.bytesToValue(@FieldType(
- aarch64_util.Instruction,
- @tagName(aarch64_util.Instruction.load_store_register),
- ), buffer[0..4]),
- };
- const offset: u12 = blk: {
- if (inst.load_store_register.size == 0) {
- if (inst.load_store_register.v == 1) {
- // 128-bit SIMD is scaled by 16.
- break :blk @divExact(narrowed, 16);
- }
- // Otherwise, 8-bit SIMD or ldrb.
- break :blk narrowed;
- } else {
- const denom: u4 = math.powi(u4, 2, inst.load_store_register.size) catch unreachable;
- break :blk @divExact(narrowed, denom);
- }
- };
- inst.load_store_register.offset = offset;
- mem.writeInt(u32, buffer[0..4], inst.toU32(), .little);
+ const narrowed: u12 = @truncate(@as(u64, @intCast(ctx.target_vaddr)));
+ var inst: Instruction = .read(buffer[0..Instruction.size]);
+ switch (inst.decode()) {
+ else => unreachable,
+ .data_processing_immediate => inst.data_processing_immediate.add_subtract_immediate.group.imm12 = narrowed,
+ .load_store => |load_store| inst.load_store.register_unsigned_immediate.group.imm12 =
+ switch (load_store.register_unsigned_immediate.decode()) {
+ .integer => |integer| @shrExact(narrowed, @intFromEnum(integer.group.size)),
+ .vector => |vector| @shrExact(narrowed, @intFromEnum(vector.group.opc1.decode(vector.group.size))),
+ },
}
+ inst.write(buffer[0..Instruction.size]);
},
.direct => {
assert(!reloc.pcrel);
@@ -2934,11 +2913,6 @@ pub const Relocation = struct {
},
}
}
-
- fn isArithmeticOp(inst: *const [4]u8) bool {
- const group_decode = @as(u5, @truncate(inst[3]));
- return ((group_decode >> 2) == 4);
- }
};
pub fn addRelocation(coff: *Coff, atom_index: Atom.Index, reloc: Relocation) !void {
@@ -3112,7 +3086,7 @@ const Path = std.Build.Cache.Path;
const Directory = std.Build.Cache.Directory;
const Cache = std.Build.Cache;
-const aarch64_util = @import("../arch/aarch64/bits.zig");
+const aarch64_util = link.aarch64;
const allocPrint = std.fmt.allocPrint;
const codegen = @import("../codegen.zig");
const link = @import("../link.zig");
src/link/Dwarf.zig
@@ -2487,7 +2487,13 @@ fn initWipNavInner(
try wip_nav.strp(nav.fqn.toSlice(ip));
const ty: Type = nav_val.typeOf(zcu);
const addr: Loc = .{ .addr_reloc = sym_index };
- const loc: Loc = if (decl.is_threadlocal) .{ .form_tls_address = &addr } else addr;
+ const loc: Loc = if (decl.is_threadlocal) loc: {
+ const target = zcu.comp.root_mod.resolved_target.result;
+ break :loc switch (target.cpu.arch) {
+ .x86_64 => .{ .form_tls_address = &addr },
+ else => .empty,
+ };
+ } else addr;
switch (decl.kind) {
.unnamed_test, .@"test", .decltest, .@"comptime" => unreachable,
.@"const" => {
src/link/MachO.zig
@@ -328,6 +328,7 @@ pub fn deinit(self: *MachO) void {
self.unwind_info.deinit(gpa);
self.data_in_code.deinit(gpa);
+ for (self.thunks.items) |*thunk| thunk.deinit(gpa);
self.thunks.deinit(gpa);
}
@@ -5373,7 +5374,7 @@ const mem = std.mem;
const meta = std.meta;
const Writer = std.io.Writer;
-const aarch64 = @import("../arch/aarch64/bits.zig");
+const aarch64 = codegen.aarch64.encoding;
const bind = @import("MachO/dyld_info/bind.zig");
const calcUuid = @import("MachO/uuid.zig").calcUuid;
const codegen = @import("../codegen.zig");
src/Zcu/PerThread.zig
@@ -3737,30 +3737,6 @@ pub fn intBitsForValue(pt: Zcu.PerThread, val: Value, sign: bool) u16 {
}
}
-/// https://github.com/ziglang/zig/issues/17178 explored storing these bit offsets
-/// into the packed struct InternPool data rather than computing this on the
-/// fly, however it was found to perform worse when measured on real world
-/// projects.
-pub fn structPackedFieldBitOffset(
- pt: Zcu.PerThread,
- struct_type: InternPool.LoadedStructType,
- field_index: u32,
-) u16 {
- const zcu = pt.zcu;
- const ip = &zcu.intern_pool;
- assert(struct_type.layout == .@"packed");
- assert(struct_type.haveLayout(ip));
- var bit_sum: u64 = 0;
- for (0..struct_type.field_types.len) |i| {
- if (i == field_index) {
- return @intCast(bit_sum);
- }
- const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
- bit_sum += field_ty.bitSize(zcu);
- }
- unreachable; // index out of bounds
-}
-
pub fn navPtrType(pt: Zcu.PerThread, nav_id: InternPool.Nav.Index) Allocator.Error!Type {
const zcu = pt.zcu;
const ip = &zcu.intern_pool;
@@ -4381,8 +4357,11 @@ fn runCodegenInner(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air) e
try air.legalize(pt, features);
}
- var liveness: Air.Liveness = try .analyze(zcu, air.*, ip);
- defer liveness.deinit(gpa);
+ var liveness: ?Air.Liveness = if (codegen.wantsLiveness(pt, nav))
+ try .analyze(zcu, air.*, ip)
+ else
+ null;
+ defer if (liveness) |*l| l.deinit(gpa);
if (build_options.enable_debug_extensions and comp.verbose_air) {
const stderr = std.debug.lockStderrWriter(&.{});
@@ -4392,12 +4371,12 @@ fn runCodegenInner(pt: Zcu.PerThread, func_index: InternPool.Index, air: *Air) e
stderr.print("# End Function AIR: {f}\n\n", .{fqn.fmt(ip)}) catch {};
}
- if (std.debug.runtime_safety) {
+ if (std.debug.runtime_safety) verify_liveness: {
var verify: Air.Liveness.Verify = .{
.gpa = gpa,
.zcu = zcu,
.air = air.*,
- .liveness = liveness,
+ .liveness = liveness orelse break :verify_liveness,
.intern_pool = ip,
};
defer verify.deinit();
src/codegen.zig
@@ -22,6 +22,8 @@ const Zir = std.zig.Zir;
const Alignment = InternPool.Alignment;
const dev = @import("dev.zig");
+pub const aarch64 = @import("codegen/aarch64.zig");
+
pub const CodeGenError = GenerateSymbolError || error{
/// Indicates the error is already stored in Zcu `failed_codegen`.
CodegenFail,
@@ -48,7 +50,7 @@ fn devFeatureForBackend(backend: std.builtin.CompilerBackend) dev.Feature {
fn importBackend(comptime backend: std.builtin.CompilerBackend) type {
return switch (backend) {
.other, .stage1 => unreachable,
- .stage2_aarch64 => unreachable,
+ .stage2_aarch64 => aarch64,
.stage2_arm => unreachable,
.stage2_c => @import("codegen/c.zig"),
.stage2_llvm => @import("codegen/llvm.zig"),
@@ -71,6 +73,7 @@ pub fn legalizeFeatures(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) ?*co
.stage2_c,
.stage2_wasm,
.stage2_x86_64,
+ .stage2_aarch64,
.stage2_x86,
.stage2_riscv64,
.stage2_sparc64,
@@ -82,10 +85,20 @@ pub fn legalizeFeatures(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) ?*co
}
}
+pub fn wantsLiveness(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) bool {
+ const zcu = pt.zcu;
+ const target = &zcu.navFileScope(nav_index).mod.?.resolved_target.result;
+ return switch (target_util.zigBackend(target, zcu.comp.config.use_llvm)) {
+ else => true,
+ .stage2_aarch64 => false,
+ };
+}
+
/// Every code generation backend has a different MIR representation. However, we want to pass
/// MIR from codegen to the linker *regardless* of which backend is in use. So, we use this: a
/// union of all MIR types. The active tag is known from the backend in use; see `AnyMir.tag`.
pub const AnyMir = union {
+ aarch64: @import("codegen/aarch64/Mir.zig"),
riscv64: @import("arch/riscv64/Mir.zig"),
sparc64: @import("arch/sparc64/Mir.zig"),
x86_64: @import("arch/x86_64/Mir.zig"),
@@ -95,7 +108,6 @@ pub const AnyMir = union {
pub inline fn tag(comptime backend: std.builtin.CompilerBackend) []const u8 {
return switch (backend) {
.stage2_aarch64 => "aarch64",
- .stage2_arm => "arm",
.stage2_riscv64 => "riscv64",
.stage2_sparc64 => "sparc64",
.stage2_x86_64 => "x86_64",
@@ -110,7 +122,8 @@ pub const AnyMir = union {
const backend = target_util.zigBackend(&zcu.root_mod.resolved_target.result, zcu.comp.config.use_llvm);
switch (backend) {
else => unreachable,
- inline .stage2_riscv64,
+ inline .stage2_aarch64,
+ .stage2_riscv64,
.stage2_sparc64,
.stage2_x86_64,
.stage2_wasm,
@@ -131,14 +144,15 @@ pub fn generateFunction(
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
air: *const Air,
- liveness: *const Air.Liveness,
+ liveness: *const ?Air.Liveness,
) CodeGenError!AnyMir {
const zcu = pt.zcu;
const func = zcu.funcInfo(func_index);
const target = &zcu.navFileScope(func.owner_nav).mod.?.resolved_target.result;
switch (target_util.zigBackend(target, false)) {
else => unreachable,
- inline .stage2_riscv64,
+ inline .stage2_aarch64,
+ .stage2_riscv64,
.stage2_sparc64,
.stage2_x86_64,
.stage2_wasm,
@@ -173,7 +187,8 @@ pub fn emitFunction(
const target = &zcu.navFileScope(func.owner_nav).mod.?.resolved_target.result;
switch (target_util.zigBackend(target, zcu.comp.config.use_llvm)) {
else => unreachable,
- inline .stage2_riscv64,
+ inline .stage2_aarch64,
+ .stage2_riscv64,
.stage2_sparc64,
.stage2_x86_64,
=> |backend| {
@@ -420,7 +435,7 @@ pub fn generateSymbol(
const int_tag_ty = ty.intTagType(zcu);
try generateSymbol(bin_file, pt, src_loc, try pt.getCoerced(Value.fromInterned(enum_tag.int), int_tag_ty), code, reloc_parent);
},
- .float => |float| switch (float.storage) {
+ .float => |float| storage: switch (float.storage) {
.f16 => |f16_val| writeFloat(f16, f16_val, target, endian, try code.addManyAsArray(gpa, 2)),
.f32 => |f32_val| writeFloat(f32, f32_val, target, endian, try code.addManyAsArray(gpa, 4)),
.f64 => |f64_val| writeFloat(f64, f64_val, target, endian, try code.addManyAsArray(gpa, 8)),
@@ -429,7 +444,13 @@ pub fn generateSymbol(
const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
try code.appendNTimes(gpa, 0, abi_size - 10);
},
- .f128 => |f128_val| writeFloat(f128, f128_val, target, endian, try code.addManyAsArray(gpa, 16)),
+ .f128 => |f128_val| switch (Type.fromInterned(float.ty).floatBits(target)) {
+ else => unreachable,
+ 16 => continue :storage .{ .f16 = @floatCast(f128_val) },
+ 32 => continue :storage .{ .f32 = @floatCast(f128_val) },
+ 64 => continue :storage .{ .f64 = @floatCast(f128_val) },
+ 128 => writeFloat(f128, f128_val, target, endian, try code.addManyAsArray(gpa, 16)),
+ },
},
.ptr => try lowerPtr(bin_file, pt, src_loc, val.toIntern(), code, reloc_parent, 0),
.slice => |slice| {
@@ -1218,3 +1239,17 @@ pub fn errUnionErrorOffset(payload_ty: Type, zcu: *Zcu) u64 {
return 0;
}
}
+
+pub fn fieldOffset(ptr_agg_ty: Type, ptr_field_ty: Type, field_index: u32, zcu: *Zcu) u64 {
+ const agg_ty = ptr_agg_ty.childType(zcu);
+ return switch (agg_ty.containerLayout(zcu)) {
+ .auto, .@"extern" => agg_ty.structFieldOffset(field_index, zcu),
+ .@"packed" => @divExact(@as(u64, ptr_agg_ty.ptrInfo(zcu).packed_offset.bit_offset) +
+ (if (zcu.typeToPackedStruct(agg_ty)) |loaded_struct| zcu.structPackedFieldBitOffset(loaded_struct, field_index) else 0) -
+ ptr_field_ty.ptrInfo(zcu).packed_offset.bit_offset, 8),
+ };
+}
+
+test {
+ _ = aarch64;
+}
src/Compilation.zig
@@ -1850,7 +1850,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
// approach, since the ubsan runtime uses quite a lot of the standard library
// and this reduces unnecessary bloat.
const ubsan_rt_strat: RtStrat = s: {
- const can_build_ubsan_rt = target_util.canBuildLibUbsanRt(target);
+ const can_build_ubsan_rt = target_util.canBuildLibUbsanRt(target, use_llvm, build_options.have_llvm);
const want_ubsan_rt = options.want_ubsan_rt orelse (can_build_ubsan_rt and any_sanitize_c == .full and is_exe_or_dyn_lib);
if (!want_ubsan_rt) break :s .none;
if (options.skip_linker_dependencies) break :s .none;
src/dev.zig
@@ -25,13 +25,13 @@ pub const Env = enum {
/// - `zig build-* -fno-emit-bin`
sema,
+ /// - sema
+ /// - `zig build-* -fincremental -fno-llvm -fno-lld -target aarch64-linux --listen=-`
+ @"aarch64-linux",
+
/// - `zig build-* -ofmt=c`
cbe,
- /// - sema
- /// - `zig build-* -fincremental -fno-llvm -fno-lld -target x86_64-linux --listen=-`
- @"x86_64-linux",
-
/// - sema
/// - `zig build-* -fincremental -fno-llvm -fno-lld -target powerpc(64)(le)-linux --listen=-`
@"powerpc-linux",
@@ -48,6 +48,10 @@ pub const Env = enum {
/// - `zig build-* -fno-llvm -fno-lld -target wasm32-* --listen=-`
wasm,
+ /// - sema
+ /// - `zig build-* -fincremental -fno-llvm -fno-lld -target x86_64-linux --listen=-`
+ @"x86_64-linux",
+
pub inline fn supports(comptime dev_env: Env, comptime feature: Feature) bool {
return switch (dev_env) {
.full => true,
@@ -153,23 +157,22 @@ pub const Env = enum {
=> true,
else => Env.ast_gen.supports(feature),
},
- .cbe => switch (feature) {
- .legalize,
- .c_backend,
- .c_linker,
- => true,
- else => Env.sema.supports(feature),
- },
- .@"x86_64-linux" => switch (feature) {
+ .@"aarch64-linux" => switch (feature) {
.build_command,
.stdio_listen,
.incremental,
- .legalize,
- .x86_64_backend,
+ .aarch64_backend,
.elf_linker,
=> true,
else => Env.sema.supports(feature),
},
+ .cbe => switch (feature) {
+ .legalize,
+ .c_backend,
+ .c_linker,
+ => true,
+ else => Env.sema.supports(feature),
+ },
.@"powerpc-linux" => switch (feature) {
.build_command,
.stdio_listen,
@@ -199,6 +202,16 @@ pub const Env = enum {
=> true,
else => Env.sema.supports(feature),
},
+ .@"x86_64-linux" => switch (feature) {
+ .build_command,
+ .stdio_listen,
+ .incremental,
+ .legalize,
+ .x86_64_backend,
+ .elf_linker,
+ => true,
+ else => Env.sema.supports(feature),
+ },
};
}
src/InternPool.zig
@@ -7556,12 +7556,18 @@ fn extraFuncCoerced(ip: *const InternPool, extra: Local.Extra, extra_index: u32)
fn indexToKeyBigInt(ip: *const InternPool, tid: Zcu.PerThread.Id, limb_index: u32, positive: bool) Key {
const limbs_items = ip.getLocalShared(tid).getLimbs().view().items(.@"0");
const int: Int = @bitCast(limbs_items[limb_index..][0..Int.limbs_items_len].*);
+ const big_int: BigIntConst = .{
+ .limbs = limbs_items[limb_index + Int.limbs_items_len ..][0..int.limbs_len],
+ .positive = positive,
+ };
return .{ .int = .{
.ty = int.ty,
- .storage = .{ .big_int = .{
- .limbs = limbs_items[limb_index + Int.limbs_items_len ..][0..int.limbs_len],
- .positive = positive,
- } },
+ .storage = if (big_int.toInt(u64)) |x|
+ .{ .u64 = x }
+ else |_| if (big_int.toInt(i64)) |x|
+ .{ .i64 = x }
+ else |_|
+ .{ .big_int = big_int },
} };
}
src/link.zig
@@ -23,6 +23,7 @@ const dev = @import("dev.zig");
const target_util = @import("target.zig");
const codegen = @import("codegen.zig");
+pub const aarch64 = @import("link/aarch64.zig");
pub const LdScript = @import("link/LdScript.zig");
pub const Queue = @import("link/Queue.zig");
src/main.zig
@@ -37,6 +37,7 @@ const dev = @import("dev.zig");
test {
_ = Package;
+ _ = @import("codegen.zig");
}
const thread_stack_size = 60 << 20;
src/Sema.zig
@@ -16522,7 +16522,7 @@ fn zirAsm(
break :empty try sema.structInitEmpty(block, clobbers_ty, src, src);
} else try sema.resolveInst(extra.data.clobbers); // Already coerced by AstGen.
const clobbers_val = try sema.resolveConstDefinedValue(block, src, clobbers, .{ .simple = .clobber });
- needed_capacity += (asm_source.len + 3) / 4;
+ needed_capacity += asm_source.len / 4 + 1;
const gpa = sema.gpa;
try sema.air_extra.ensureUnusedCapacity(gpa, needed_capacity);
@@ -16562,7 +16562,8 @@ fn zirAsm(
{
const buffer = mem.sliceAsBytes(sema.air_extra.unusedCapacitySlice());
@memcpy(buffer[0..asm_source.len], asm_source);
- sema.air_extra.items.len += (asm_source.len + 3) / 4;
+ buffer[asm_source.len] = 0;
+ sema.air_extra.items.len += asm_source.len / 4 + 1;
}
return asm_air;
}
@@ -24846,7 +24847,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins
},
.@"packed" => {
const byte_offset = std.math.divExact(u32, @abs(@as(i32, actual_parent_ptr_info.packed_offset.bit_offset) +
- (if (zcu.typeToStruct(parent_ty)) |struct_obj| pt.structPackedFieldBitOffset(struct_obj, field_index) else 0) -
+ (if (zcu.typeToStruct(parent_ty)) |struct_obj| zcu.structPackedFieldBitOffset(struct_obj, field_index) else 0) -
actual_field_ptr_info.packed_offset.bit_offset), 8) catch
return sema.fail(block, inst_src, "pointer bit-offset mismatch", .{});
actual_parent_ptr_info.flags.alignment = actual_field_ptr_info.flags.alignment.minStrict(if (byte_offset > 0)
@@ -24873,7 +24874,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.Ins
// Logic lifted from type computation above - I'm just assuming it's correct.
// `catch unreachable` since error case handled above.
const byte_offset = std.math.divExact(u32, @abs(@as(i32, actual_parent_ptr_info.packed_offset.bit_offset) +
- pt.structPackedFieldBitOffset(zcu.typeToStruct(parent_ty).?, field_index) -
+ zcu.structPackedFieldBitOffset(zcu.typeToStruct(parent_ty).?, field_index) -
actual_field_ptr_info.packed_offset.bit_offset), 8) catch unreachable;
const parent_ptr_val = try sema.ptrSubtract(block, field_ptr_src, field_ptr_val, byte_offset, actual_parent_ptr_ty);
break :result Air.internedToRef(parent_ptr_val.toIntern());
src/target.zig
@@ -347,7 +347,7 @@ pub fn defaultCompilerRtOptimizeMode(target: *const std.Target) std.builtin.Opti
}
}
-pub fn canBuildLibCompilerRt(target: *const std.Target, use_llvm: bool, have_llvm: bool) bool {
+pub fn canBuildLibCompilerRt(target: *const std.Target, use_llvm: bool, comptime have_llvm: bool) bool {
switch (target.os.tag) {
.plan9 => return false,
else => {},
@@ -359,6 +359,7 @@ pub fn canBuildLibCompilerRt(target: *const std.Target, use_llvm: bool, have_llv
else => {},
}
return switch (zigBackend(target, use_llvm)) {
+ .stage2_aarch64 => true,
.stage2_llvm => true,
.stage2_x86_64 => switch (target.ofmt) {
.elf, .macho => true,
@@ -368,13 +369,21 @@ pub fn canBuildLibCompilerRt(target: *const std.Target, use_llvm: bool, have_llv
};
}
-pub fn canBuildLibUbsanRt(target: *const std.Target) bool {
+pub fn canBuildLibUbsanRt(target: *const std.Target, use_llvm: bool, comptime have_llvm: bool) bool {
switch (target.cpu.arch) {
.spirv32, .spirv64 => return false,
// Remove this once https://github.com/ziglang/zig/issues/23715 is fixed
.nvptx, .nvptx64 => return false,
- else => return true,
+ else => {},
}
+ return switch (zigBackend(target, use_llvm)) {
+ .stage2_llvm => true,
+ .stage2_x86_64 => switch (target.ofmt) {
+ .elf, .macho => true,
+ else => have_llvm,
+ },
+ else => have_llvm,
+ };
}
pub fn hasRedZone(target: *const std.Target) bool {
@@ -767,6 +776,7 @@ pub fn supportsTailCall(target: *const std.Target, backend: std.builtin.Compiler
pub fn supportsThreads(target: *const std.Target, backend: std.builtin.CompilerBackend) bool {
return switch (backend) {
+ .stage2_aarch64 => false,
.stage2_powerpc => true,
.stage2_x86_64 => target.ofmt == .macho or target.ofmt == .elf,
else => true,
@@ -864,7 +874,7 @@ pub inline fn backendSupportsFeature(backend: std.builtin.CompilerBackend, compt
else => false,
},
.field_reordering => switch (backend) {
- .stage2_c, .stage2_llvm, .stage2_x86_64 => true,
+ .stage2_aarch64, .stage2_c, .stage2_llvm, .stage2_x86_64 => true,
else => false,
},
.separate_thread => switch (backend) {
src/Type.zig
@@ -4166,7 +4166,7 @@ pub const generic_poison: Type = .{ .ip_index = .generic_poison_type };
pub fn smallestUnsignedBits(max: u64) u16 {
return switch (max) {
0 => 0,
- else => 1 + std.math.log2_int(u64, max),
+ else => @as(u16, 1) + std.math.log2_int(u64, max),
};
}
src/Zcu.zig
@@ -3891,6 +3891,29 @@ pub fn typeToPackedStruct(zcu: *const Zcu, ty: Type) ?InternPool.LoadedStructTyp
return s;
}
+/// https://github.com/ziglang/zig/issues/17178 explored storing these bit offsets
+/// into the packed struct InternPool data rather than computing this on the
+/// fly, however it was found to perform worse when measured on real world
+/// projects.
+pub fn structPackedFieldBitOffset(
+ zcu: *Zcu,
+ struct_type: InternPool.LoadedStructType,
+ field_index: u32,
+) u16 {
+ const ip = &zcu.intern_pool;
+ assert(struct_type.layout == .@"packed");
+ assert(struct_type.haveLayout(ip));
+ var bit_sum: u64 = 0;
+ for (0..struct_type.field_types.len) |i| {
+ if (i == field_index) {
+ return @intCast(bit_sum);
+ }
+ const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
+ bit_sum += field_ty.bitSize(zcu);
+ }
+ unreachable; // index out of bounds
+}
+
pub fn typeToUnion(zcu: *const Zcu, ty: Type) ?InternPool.LoadedUnionType {
if (ty.ip_index == .none) return null;
const ip = &zcu.intern_pool;
@@ -4436,11 +4459,7 @@ pub fn callconvSupported(zcu: *Zcu, cc: std.builtin.CallingConvention) union(enu
else => false,
},
.stage2_aarch64 => switch (cc) {
- .aarch64_aapcs,
- .aarch64_aapcs_darwin,
- .aarch64_aapcs_win,
- => |opts| opts.incoming_stack_alignment == null,
- .naked => true,
+ .aarch64_aapcs, .aarch64_aapcs_darwin, .naked => true,
else => false,
},
.stage2_x86 => switch (cc) {
test/behavior/abs.zig
@@ -3,7 +3,6 @@ const std = @import("std");
const expect = std.testing.expect;
test "@abs integers" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -50,7 +49,6 @@ fn testAbsIntegers() !void {
}
test "@abs unsigned integers" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -90,7 +88,6 @@ fn testAbsUnsignedIntegers() !void {
}
test "@abs big int <= 128 bits" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
@@ -153,7 +150,6 @@ fn testAbsUnsignedBigInt() !void {
}
test "@abs floats" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -207,9 +203,9 @@ fn testAbsFloats(comptime T: type) !void {
}
test "@abs int vectors" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -275,8 +271,8 @@ fn testAbsIntVectors(comptime len: comptime_int) !void {
}
test "@abs unsigned int vectors" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -334,8 +330,8 @@ fn testAbsUnsignedIntVectors(comptime len: comptime_int) !void {
}
test "@abs float vectors" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
test/behavior/align.zig
@@ -16,7 +16,6 @@ test "global variable alignment" {
}
test "large alignment of local constant" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // flaky
@@ -25,7 +24,6 @@ test "large alignment of local constant" {
}
test "slicing array of length 1 can not assume runtime index is always zero" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // flaky
@@ -74,7 +72,6 @@ test "alignment of struct with pointer has same alignment as usize" {
test "alignment and size of structs with 128-bit fields" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const A = struct {
@@ -160,7 +157,6 @@ test "alignment and size of structs with 128-bit fields" {
}
test "implicitly decreasing slice alignment" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -173,7 +169,6 @@ fn addUnalignedSlice(a: []align(1) const u32, b: []align(1) const u32) u32 {
}
test "specifying alignment allows pointer cast" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -186,7 +181,6 @@ fn testBytesAlign(b: u8) !void {
}
test "@alignCast slices" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -205,7 +199,6 @@ fn sliceExpects4(slice: []align(4) u32) void {
test "return error union with 128-bit integer" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -218,7 +211,6 @@ fn give() anyerror!u128 {
test "page aligned array on stack" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -238,7 +230,6 @@ test "page aligned array on stack" {
}
test "function alignment" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -268,7 +259,6 @@ test "function alignment" {
}
test "implicitly decreasing fn alignment" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -292,7 +282,6 @@ fn alignedBig() align(16) i32 {
}
test "@alignCast functions" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -376,7 +365,6 @@ const DefaultAligned = struct {
test "read 128-bit field from default aligned struct in stack memory" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -396,7 +384,6 @@ var default_aligned_global = DefaultAligned{
test "read 128-bit field from default aligned struct in global memory" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -405,8 +392,8 @@ test "read 128-bit field from default aligned struct in global memory" {
}
test "struct field explicit alignment" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // flaky
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
@@ -426,7 +413,6 @@ test "struct field explicit alignment" {
}
test "align(N) on functions" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -455,7 +441,6 @@ test "comptime alloc alignment" {
// TODO: it's impossible to test this in Zig today, since comptime vars do not have runtime addresses.
if (true) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // flaky
@@ -468,7 +453,6 @@ test "comptime alloc alignment" {
}
test "@alignCast null" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -484,7 +468,6 @@ test "alignment of slice element" {
}
test "sub-aligned pointer field access" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -538,7 +521,6 @@ test "alignment of zero-bit types is respected" {
test "zero-bit fields in extern struct pad fields appropriately" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
test/behavior/array.zig
@@ -19,7 +19,6 @@ test "array to slice" {
}
test "arrays" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -47,7 +46,6 @@ fn getArrayLen(a: []const u32) usize {
}
test "array concat with undefined" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -73,7 +71,6 @@ test "array concat with undefined" {
test "array concat with tuple" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const array: [2]u8 = .{ 1, 2 };
@@ -89,7 +86,6 @@ test "array concat with tuple" {
test "array init with concat" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
const a = 'a';
var i: [4]u8 = [2]u8{ a, 'b' } ++ [2]u8{ 'c', 'd' };
@@ -98,7 +94,6 @@ test "array init with concat" {
test "array init with mult" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const a = 'a';
@@ -110,7 +105,6 @@ test "array init with mult" {
}
test "array literal with explicit type" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const hex_mult: [4]u16 = .{ 4096, 256, 16, 1 };
@@ -138,7 +132,6 @@ const ArrayDotLenConstExpr = struct {
const some_array = [_]u8{ 0, 1, 2, 3 };
test "array literal with specified size" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -162,7 +155,6 @@ test "array len field" {
test "array with sentinels" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -200,7 +192,6 @@ test "void arrays" {
test "nested arrays of strings" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -215,7 +206,6 @@ test "nested arrays of strings" {
}
test "nested arrays of integers" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const array_of_numbers = [_][2]u8{
@@ -230,7 +220,6 @@ test "nested arrays of integers" {
}
test "implicit comptime in array type size" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var arr: [plusOne(10)]bool = undefined;
@@ -243,7 +232,6 @@ fn plusOne(x: u32) u32 {
}
test "single-item pointer to array indexing and slicing" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -285,7 +273,6 @@ test "implicit cast zero sized array ptr to slice" {
}
test "anonymous list literal syntax" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -308,7 +295,6 @@ const Sub = struct { b: u8 };
const Str = struct { a: []Sub };
test "set global var array via slice embedded in struct" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var s = Str{ .a = s_array[0..] };
@@ -323,7 +309,6 @@ test "set global var array via slice embedded in struct" {
}
test "read/write through global variable array of struct fields initialized via array mult" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -345,7 +330,6 @@ test "read/write through global variable array of struct fields initialized via
test "implicit cast single-item pointer" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testImplicitCastSingleItemPtr();
@@ -364,7 +348,6 @@ fn testArrayByValAtComptime(b: [2]u8) u8 {
}
test "comptime evaluating function that takes array by value" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const arr = [_]u8{ 1, 2 };
@@ -376,7 +359,6 @@ test "comptime evaluating function that takes array by value" {
test "runtime initialize array elem and then implicit cast to slice" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var two: i32 = 2;
@@ -387,7 +369,6 @@ test "runtime initialize array elem and then implicit cast to slice" {
test "array literal as argument to function" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -414,8 +395,8 @@ test "array literal as argument to function" {
}
test "double nested array to const slice cast in array literal" {
- if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -476,7 +457,6 @@ test "double nested array to const slice cast in array literal" {
}
test "anonymous literal in array" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -502,7 +482,6 @@ test "anonymous literal in array" {
}
test "access the null element of a null terminated array" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -520,7 +499,6 @@ test "access the null element of a null terminated array" {
}
test "type deduction for array subscript expression" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -540,7 +518,6 @@ test "type deduction for array subscript expression" {
test "sentinel element count towards the ABI size calculation" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -564,7 +541,7 @@ test "sentinel element count towards the ABI size calculation" {
}
test "zero-sized array with recursive type definition" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -587,8 +564,8 @@ test "zero-sized array with recursive type definition" {
}
test "type coercion of anon struct literal to array" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -628,7 +605,6 @@ test "array with comptime-only element type" {
}
test "tuple to array handles sentinel" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -641,7 +617,6 @@ test "tuple to array handles sentinel" {
test "array init of container level array variable" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -675,8 +650,8 @@ test "runtime initialized sentinel-terminated array literal" {
}
test "array of array agregate init" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var a = [1]u32{11} ** 10;
@@ -725,7 +700,6 @@ test "array init with no result location has result type" {
}
test "slicing array of zero-sized values" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -890,7 +864,6 @@ test "tuple initialized through reference to anonymous array init provides resul
test "copied array element doesn't alias source" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var x: [10][10]u32 = undefined;
@@ -945,7 +918,6 @@ test "array initialized with array with sentinel" {
}
test "store array of array of structs at comptime" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -970,7 +942,6 @@ test "store array of array of structs at comptime" {
}
test "accessing multidimensional global array at comptime" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -986,8 +957,8 @@ test "accessing multidimensional global array at comptime" {
}
test "union that needs padding bytes inside an array" {
- if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1023,7 +994,6 @@ test "runtime index of array of zero-bit values" {
}
test "@splat array" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1046,7 +1016,6 @@ test "@splat array" {
test "@splat array with sentinel" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1070,7 +1039,6 @@ test "@splat array with sentinel" {
test "@splat zero-length array" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
test/behavior/asm.zig
@@ -7,7 +7,6 @@ const is_x86_64_linux = builtin.cpu.arch == .x86_64 and builtin.os.tag == .linux
comptime {
if (builtin.zig_backend != .stage2_arm and
- builtin.zig_backend != .stage2_aarch64 and
!(builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) and // MSVC doesn't support inline assembly
is_x86_64_linux)
{
@@ -30,7 +29,6 @@ test "module level assembly" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // MSVC doesn't support inline assembly
@@ -41,9 +39,9 @@ test "module level assembly" {
}
test "output constraint modifiers" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -63,9 +61,9 @@ test "output constraint modifiers" {
}
test "alternative constraints" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -83,7 +81,6 @@ test "alternative constraints" {
test "sized integer/float in asm input" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -127,7 +124,6 @@ test "sized integer/float in asm input" {
test "struct/array/union types as input values" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -167,7 +163,6 @@ test "rw constraint (x86_64)" {
test "asm modifiers (AArch64)" {
if (!builtin.target.cpu.arch.isAARCH64()) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // MSVC doesn't support inline assembly
test/behavior/atomics.zig
@@ -12,7 +12,7 @@ const supports_128_bit_atomics = switch (builtin.cpu.arch) {
};
test "cmpxchg" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -39,7 +39,7 @@ fn testCmpxchg() !void {
}
test "atomicrmw and atomicload" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -68,7 +68,7 @@ fn testAtomicLoad(ptr: *u8) !void {
}
test "cmpxchg with ptr" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -94,7 +94,7 @@ test "cmpxchg with ptr" {
}
test "cmpxchg with ignored result" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -110,8 +110,8 @@ test "128-bit cmpxchg" {
// TODO: this must appear first
if (!supports_128_bit_atomics) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@@ -139,7 +139,7 @@ fn test_u128_cmpxchg() !void {
var a_global_variable = @as(u32, 1234);
test "cmpxchg on a global variable" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -149,7 +149,7 @@ test "cmpxchg on a global variable" {
}
test "atomic load and rmw with enum" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -167,7 +167,7 @@ test "atomic load and rmw with enum" {
}
test "atomic store" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -185,7 +185,7 @@ fn testAtomicStore() !void {
}
test "atomicrmw with floats" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -211,7 +211,7 @@ fn testAtomicRmwFloat() !void {
}
test "atomicrmw with ints" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -290,7 +290,7 @@ test "atomicrmw with 128-bit ints" {
// TODO: this must appear first
if (!supports_128_bit_atomics) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
try testAtomicRmwInt128(.signed);
try testAtomicRmwInt128(.unsigned);
@@ -359,7 +359,7 @@ fn testAtomicRmwInt128(comptime signedness: std.builtin.Signedness) !void {
}
test "atomics with different types" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -409,7 +409,6 @@ fn testAtomicsWithPackedStruct(comptime T: type, a: T, b: T) !void {
}
test "return @atomicStore, using it as a void value" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
test/behavior/basic.zig
@@ -39,7 +39,6 @@ test "truncate to non-power-of-two integers" {
}
test "truncate to non-power-of-two integers from 128-bit" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -422,7 +421,6 @@ fn copy(src: *const u64, dst: *u64) void {
}
test "call result of if else expression" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -448,7 +446,6 @@ fn hereIsAnOpaqueType(ptr: *OpaqueA) *OpaqueA {
}
test "take address of parameter" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -536,7 +533,6 @@ fn nine() u8 {
}
test "struct inside function" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testStructInFn();
@@ -588,7 +584,6 @@ test "global variable assignment with optional unwrapping with var initialized t
var global_foo: *i32 = undefined;
test "peer result location with typed parent, runtime condition, comptime prongs" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -719,7 +714,6 @@ test "global constant is loaded with a runtime-known index" {
}
test "multiline string literal is null terminated" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const s1 =
@@ -732,7 +726,6 @@ test "multiline string literal is null terminated" {
}
test "string escapes" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -764,7 +757,6 @@ fn ptrEql(a: *const []const u8, b: *const []const u8) bool {
}
test "string concatenation" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -787,7 +779,6 @@ test "string concatenation" {
}
test "result location is optional inside error union" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -803,7 +794,6 @@ fn maybe(x: bool) anyerror!?u32 {
}
test "auto created variables have correct alignment" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -821,7 +811,6 @@ test "auto created variables have correct alignment" {
test "extern variable with non-pointer opaque type" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
@@ -866,7 +855,6 @@ test "if expression type coercion" {
}
test "discarding the result of various expressions" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -908,7 +896,6 @@ test "labeled block implicitly ends in a break" {
}
test "catch in block has correct result location" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -964,7 +951,6 @@ test "vector initialized with array init syntax has proper type" {
}
test "weird array and tuple initializations" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1010,7 +996,6 @@ test "generic function uses return type of other generic function" {
// https://github.com/ziglang/zig/issues/12208
return error.SkipZigTest;
}
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
const S = struct {
fn call(
@@ -1128,7 +1113,6 @@ test "returning an opaque type from a function" {
}
test "orelse coercion as function argument" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const Loc = struct { start: i32 = -1 };
@@ -1378,7 +1362,6 @@ test "copy array of self-referential struct" {
test "break out of block based on comptime known values" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -1412,8 +1395,8 @@ test "break out of block based on comptime known values" {
}
test "allocation and looping over 3-byte integer" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
test/behavior/bit_shifting.zig
@@ -112,7 +112,7 @@ test "comptime shift safety check" {
}
test "Saturating Shift Left where lhs is of a computed type" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -161,6 +161,7 @@ comptime {
}
test "Saturating Shift Left" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
test/behavior/bitcast.zig
@@ -20,7 +20,6 @@ test "@bitCast iX -> uX (32, 64)" {
}
test "@bitCast iX -> uX (8, 16, 128)" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -35,8 +34,8 @@ test "@bitCast iX -> uX (8, 16, 128)" {
}
test "@bitCast iX -> uX exotic integers" {
- if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -80,8 +79,8 @@ fn conv_uN(comptime N: usize, x: std.meta.Int(.unsigned, N)) std.meta.Int(.signe
}
test "bitcast uX to bytes" {
- if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -296,9 +295,9 @@ test "triple level result location with bitcast sandwich passed as tuple element
}
test "@bitCast packed struct of floats" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -334,9 +333,9 @@ test "@bitCast packed struct of floats" {
}
test "comptime @bitCast packed struct to int and back" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -379,7 +378,6 @@ test "comptime bitcast with fields following f80" {
}
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -393,7 +391,7 @@ test "comptime bitcast with fields following f80" {
}
test "bitcast vector to integer and back" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
@@ -420,7 +418,6 @@ fn bitCastWrapper128(x: f128) u128 {
return @as(u128, @bitCast(x));
}
test "bitcast nan float does not modify signaling bit" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -473,7 +470,7 @@ test "bitcast nan float does not modify signaling bit" {
}
test "@bitCast of packed struct of bools all true" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
@@ -494,7 +491,7 @@ test "@bitCast of packed struct of bools all true" {
}
test "@bitCast of packed struct of bools all false" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
@@ -514,7 +511,7 @@ test "@bitCast of packed struct of bools all false" {
}
test "@bitCast of packed struct containing pointer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
@@ -544,7 +541,7 @@ test "@bitCast of packed struct containing pointer" {
}
test "@bitCast of extern struct containing pointer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
test/behavior/bitreverse.zig
@@ -8,8 +8,8 @@ test "@bitReverse large exotic integer" {
}
test "@bitReverse" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -121,9 +121,9 @@ fn vector8() !void {
}
test "bitReverse vectors u8" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -141,9 +141,9 @@ fn vector16() !void {
}
test "bitReverse vectors u16" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -161,9 +161,9 @@ fn vector24() !void {
}
test "bitReverse vectors u24" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
test/behavior/builtin_functions_returning_void_or_noreturn.zig
@@ -7,7 +7,6 @@ var x: u8 = 1;
// This excludes builtin functions that return void or noreturn that cannot be tested.
test {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
test/behavior/byteswap.zig
@@ -3,40 +3,8 @@ const builtin = @import("builtin");
const expect = std.testing.expect;
test "@byteSwap integers" {
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
-
- if (builtin.zig_backend == .stage2_wasm) {
- // TODO: Remove when self-hosted wasm supports more types for byteswap
- const ByteSwapIntTest = struct {
- fn run() !void {
- try t(u8, 0x12, 0x12);
- try t(u16, 0x1234, 0x3412);
- try t(u24, 0x123456, 0x563412);
- try t(i24, @as(i24, @bitCast(@as(u24, 0xf23456))), 0x5634f2);
- try t(i24, 0x1234f6, @as(i24, @bitCast(@as(u24, 0xf63412))));
- try t(u32, 0x12345678, 0x78563412);
- try t(i32, @as(i32, @bitCast(@as(u32, 0xf2345678))), 0x785634f2);
- try t(i32, 0x123456f8, @as(i32, @bitCast(@as(u32, 0xf8563412))));
- try t(u64, 0x123456789abcdef1, 0xf1debc9a78563412);
-
- try t(u0, @as(u0, 0), 0);
- try t(i8, @as(i8, -50), -50);
- try t(i16, @as(i16, @bitCast(@as(u16, 0x1234))), @as(i16, @bitCast(@as(u16, 0x3412))));
- try t(i24, @as(i24, @bitCast(@as(u24, 0x123456))), @as(i24, @bitCast(@as(u24, 0x563412))));
- try t(i32, @as(i32, @bitCast(@as(u32, 0x12345678))), @as(i32, @bitCast(@as(u32, 0x78563412))));
- try t(i64, @as(i64, @bitCast(@as(u64, 0x123456789abcdef1))), @as(i64, @bitCast(@as(u64, 0xf1debc9a78563412))));
- }
- fn t(comptime I: type, input: I, expected_output: I) !void {
- try std.testing.expect(expected_output == @byteSwap(input));
- }
- };
- try comptime ByteSwapIntTest.run();
- try ByteSwapIntTest.run();
- return;
- }
-
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -51,23 +19,44 @@ test "@byteSwap integers" {
try t(u32, 0x12345678, 0x78563412);
try t(i32, @as(i32, @bitCast(@as(u32, 0xf2345678))), 0x785634f2);
try t(i32, 0x123456f8, @as(i32, @bitCast(@as(u32, 0xf8563412))));
- try t(u40, 0x123456789a, 0x9a78563412);
- try t(i48, 0x123456789abc, @as(i48, @bitCast(@as(u48, 0xbc9a78563412))));
- try t(u56, 0x123456789abcde, 0xdebc9a78563412);
try t(u64, 0x123456789abcdef1, 0xf1debc9a78563412);
- try t(u88, 0x123456789abcdef1112131, 0x312111f1debc9a78563412);
- try t(u96, 0x123456789abcdef111213141, 0x41312111f1debc9a78563412);
- try t(u128, 0x123456789abcdef11121314151617181, 0x8171615141312111f1debc9a78563412);
try t(u0, @as(u0, 0), 0);
try t(i8, @as(i8, -50), -50);
try t(i16, @as(i16, @bitCast(@as(u16, 0x1234))), @as(i16, @bitCast(@as(u16, 0x3412))));
try t(i24, @as(i24, @bitCast(@as(u24, 0x123456))), @as(i24, @bitCast(@as(u24, 0x563412))));
try t(i32, @as(i32, @bitCast(@as(u32, 0x12345678))), @as(i32, @bitCast(@as(u32, 0x78563412))));
+ try t(i64, @as(i64, @bitCast(@as(u64, 0x123456789abcdef1))), @as(i64, @bitCast(@as(u64, 0xf1debc9a78563412))));
+ }
+ fn t(comptime I: type, input: I, expected_output: I) !void {
+ try std.testing.expect(expected_output == @byteSwap(input));
+ }
+ };
+ try comptime ByteSwapIntTest.run();
+ try ByteSwapIntTest.run();
+}
+
+test "@byteSwap exotic integers" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
+
+ const ByteSwapIntTest = struct {
+ fn run() !void {
+ try t(u0, 0, 0);
+ try t(u40, 0x123456789a, 0x9a78563412);
+ try t(i48, 0x123456789abc, @as(i48, @bitCast(@as(u48, 0xbc9a78563412))));
+ try t(u56, 0x123456789abcde, 0xdebc9a78563412);
+ try t(u88, 0x123456789abcdef1112131, 0x312111f1debc9a78563412);
+ try t(u96, 0x123456789abcdef111213141, 0x41312111f1debc9a78563412);
+ try t(u128, 0x123456789abcdef11121314151617181, 0x8171615141312111f1debc9a78563412);
+
try t(u40, @as(i40, @bitCast(@as(u40, 0x123456789a))), @as(u40, 0x9a78563412));
try t(i48, @as(i48, @bitCast(@as(u48, 0x123456789abc))), @as(i48, @bitCast(@as(u48, 0xbc9a78563412))));
try t(i56, @as(i56, @bitCast(@as(u56, 0x123456789abcde))), @as(i56, @bitCast(@as(u56, 0xdebc9a78563412))));
- try t(i64, @as(i64, @bitCast(@as(u64, 0x123456789abcdef1))), @as(i64, @bitCast(@as(u64, 0xf1debc9a78563412))));
try t(i88, @as(i88, @bitCast(@as(u88, 0x123456789abcdef1112131))), @as(i88, @bitCast(@as(u88, 0x312111f1debc9a78563412))));
try t(i96, @as(i96, @bitCast(@as(u96, 0x123456789abcdef111213141))), @as(i96, @bitCast(@as(u96, 0x41312111f1debc9a78563412))));
try t(
@@ -93,9 +82,9 @@ fn vector8() !void {
}
test "@byteSwap vectors u8" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -113,9 +102,9 @@ fn vector16() !void {
}
test "@byteSwap vectors u16" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -133,9 +122,9 @@ fn vector24() !void {
}
test "@byteSwap vectors u24" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
test/behavior/call.zig
@@ -20,8 +20,8 @@ test "super basic invocations" {
}
test "basic invocations" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -60,7 +60,6 @@ test "basic invocations" {
}
test "tuple parameters" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -95,7 +94,6 @@ test "tuple parameters" {
test "result location of function call argument through runtime condition and struct init" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const E = enum { a, b };
@@ -115,6 +113,7 @@ test "result location of function call argument through runtime condition and st
}
test "function call with 40 arguments" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -270,7 +269,7 @@ test "arguments to comptime parameters generated in comptime blocks" {
}
test "forced tail call" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@@ -305,7 +304,7 @@ test "forced tail call" {
}
test "inline call preserves tail call" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@@ -342,7 +341,6 @@ test "inline call preserves tail call" {
}
test "inline call doesn't re-evaluate non generic struct" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -409,7 +407,6 @@ test "recursive inline call with comptime known argument" {
}
test "inline while with @call" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const S = struct {
@@ -439,7 +436,6 @@ test "method call as parameter type" {
}
test "non-anytype generic parameters provide result type" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -468,7 +464,6 @@ test "non-anytype generic parameters provide result type" {
}
test "argument to generic function has correct result type" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -521,7 +516,6 @@ test "call function in comptime field" {
test "call function pointer in comptime field" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -573,7 +567,6 @@ test "value returned from comptime function is comptime known" {
}
test "registers get overwritten when ignoring return" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.cpu.arch != .x86_64 or builtin.os.tag != .linux) return error.SkipZigTest;
@@ -619,7 +612,6 @@ test "call with union with zero sized field is not memorized incorrectly" {
}
test "function call with cast to anyopaque pointer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -637,6 +629,7 @@ test "function call with cast to anyopaque pointer" {
}
test "arguments pointed to on stack into tailcall" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -708,7 +701,7 @@ test "arguments pointed to on stack into tailcall" {
}
test "tail call function pointer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
test/behavior/cast.zig
@@ -21,7 +21,6 @@ test "integer literal to pointer cast" {
}
test "peer type resolution: ?T and T" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -100,7 +99,6 @@ test "comptime_int @floatFromInt" {
}
test "@floatFromInt" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -121,7 +119,6 @@ test "@floatFromInt" {
}
test "@floatFromInt(f80)" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -157,7 +154,6 @@ test "@floatFromInt(f80)" {
}
test "@intFromFloat" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -181,7 +177,6 @@ fn expectIntFromFloat(comptime F: type, f: F, comptime I: type, i: I) !void {
}
test "implicitly cast indirect pointer to maybe-indirect pointer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -241,7 +236,6 @@ test "@floatCast comptime_int and comptime_float" {
}
test "coerce undefined to optional" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -262,7 +256,6 @@ fn MakeType(comptime T: type) type {
}
test "implicit cast from *[N]T to [*c]T" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -299,7 +292,6 @@ test "@intCast to u0 and use the result" {
}
test "peer result null and comptime_int" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -324,7 +316,6 @@ test "peer result null and comptime_int" {
}
test "*const ?[*]const T to [*c]const [*c]const T" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -338,7 +329,6 @@ test "*const ?[*]const T to [*c]const [*c]const T" {
}
test "array coercion to undefined at runtime" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -368,7 +358,6 @@ fn implicitIntLitToOptional() void {
}
test "return u8 coercing into ?u32 return type" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -390,7 +379,6 @@ test "cast from ?[*]T to ??[*]T" {
}
test "peer type unsigned int to signed" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var w: u31 = 5;
@@ -403,7 +391,6 @@ test "peer type unsigned int to signed" {
}
test "expected [*c]const u8, found [*:0]const u8" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -415,7 +402,6 @@ test "expected [*c]const u8, found [*:0]const u8" {
}
test "explicit cast from integer to error type" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -431,7 +417,6 @@ fn testCastIntToErr(err: anyerror) !void {
}
test "peer resolve array and const slice" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -447,7 +432,6 @@ fn testPeerResolveArrayConstSlice(b: bool) !void {
}
test "implicitly cast from T to anyerror!?T" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -473,7 +457,6 @@ fn castToOptionalTypeError(z: i32) !void {
}
test "implicitly cast from [0]T to anyerror![]T" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testCastZeroArrayToErrSliceMut();
@@ -489,7 +472,6 @@ fn gimmeErrOrSlice() anyerror![]u8 {
}
test "peer type resolution: [0]u8, []const u8, and anyerror![]u8" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -522,7 +504,6 @@ fn peerTypeEmptyArrayAndSliceAndError(a: bool, slice: []u8) anyerror![]u8 {
}
test "implicit cast from *const [N]T to []const T" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -548,7 +529,6 @@ fn testCastConstArrayRefToConstSlice() !void {
}
test "peer type resolution: error and [N]T" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -573,7 +553,6 @@ fn testPeerErrorAndArray2(x: u8) anyerror![]const u8 {
}
test "single-item pointer of array to slice to unknown length pointer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -603,7 +582,6 @@ fn testCastPtrOfArrayToSliceAndPtr() !void {
}
test "cast *[1][*]const u8 to [*]const ?[*]const u8" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -613,8 +591,8 @@ test "cast *[1][*]const u8 to [*]const ?[*]const u8" {
}
test "@intCast on vector" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -651,7 +629,6 @@ test "@intCast on vector" {
}
test "@floatCast cast down" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -670,7 +647,6 @@ test "@floatCast cast down" {
}
test "peer type resolution: unreachable, error set, unreachable" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const Error = error{
@@ -704,7 +680,6 @@ test "peer cast: error set any anyerror" {
}
test "peer type resolution: error set supersets" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -735,7 +710,6 @@ test "peer type resolution: error set supersets" {
test "peer type resolution: disjoint error sets" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -765,7 +739,6 @@ test "peer type resolution: disjoint error sets" {
test "peer type resolution: error union and error set" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -799,7 +772,6 @@ test "peer type resolution: error union and error set" {
test "peer type resolution: error union after non-error" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -833,7 +805,6 @@ test "peer type resolution: error union after non-error" {
test "peer cast *[0]T to E![]const T" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -849,7 +820,6 @@ test "peer cast *[0]T to E![]const T" {
test "peer cast *[0]T to []const T" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -872,7 +842,6 @@ test "peer cast *[N]T to [*]T" {
}
test "peer resolution of string literals" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -895,7 +864,6 @@ test "peer resolution of string literals" {
}
test "peer cast [:x]T to []T" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -912,7 +880,6 @@ test "peer cast [:x]T to []T" {
}
test "peer cast [N:x]T to [N]T" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -929,7 +896,6 @@ test "peer cast [N:x]T to [N]T" {
}
test "peer cast *[N:x]T to *[N]T" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -945,7 +911,6 @@ test "peer cast *[N:x]T to *[N]T" {
}
test "peer cast [*:x]T to [*]T" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -966,7 +931,6 @@ test "peer cast [*:x]T to [*]T" {
}
test "peer cast [:x]T to [*:x]T" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -988,7 +952,6 @@ test "peer cast [:x]T to [*:x]T" {
}
test "peer type resolution implicit cast to return type" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1009,7 +972,6 @@ test "peer type resolution implicit cast to return type" {
}
test "peer type resolution implicit cast to variable type" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1035,7 +997,6 @@ test "variable initialization uses result locations properly with regards to the
}
test "cast between C pointer with different but compatible types" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1053,7 +1014,6 @@ test "cast between C pointer with different but compatible types" {
}
test "peer type resolve string lit with sentinel-terminated mutable slice" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1104,7 +1064,6 @@ test "comptime float casts" {
}
test "pointer reinterpret const float to int" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
// The hex representation is 0x3fe3333333333303.
@@ -1119,7 +1078,6 @@ test "pointer reinterpret const float to int" {
}
test "implicit cast from [*]T to ?*anyopaque" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1184,7 +1142,6 @@ test "cast function with an opaque parameter" {
}
test "implicit ptr to *anyopaque" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1199,7 +1156,6 @@ test "implicit ptr to *anyopaque" {
}
test "return null from fn () anyerror!?&T" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1216,7 +1172,6 @@ fn returnNullLitFromOptionalTypeErrorRef() anyerror!?*A {
}
test "peer type resolution: [0]u8 and []const u8" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1237,7 +1192,6 @@ fn peerTypeEmptyArrayAndSlice(a: bool, slice: []const u8) []const u8 {
}
test "implicitly cast from [N]T to ?[]const T" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1251,7 +1205,6 @@ fn castToOptionalSlice() ?[]const u8 {
}
test "cast u128 to f128 and back" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1274,7 +1227,6 @@ fn cast128Float(x: u128) f128 {
}
test "implicit cast from *[N]T to ?[*]T" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1291,7 +1243,6 @@ test "implicit cast from *[N]T to ?[*]T" {
}
test "implicit cast from *T to ?*anyopaque" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1306,7 +1257,6 @@ fn incrementVoidPtrValue(value: ?*anyopaque) void {
}
test "implicit cast *[0]T to E![]const u8" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var x = @as(anyerror![]const u8, &[0]u8{});
@@ -1330,7 +1280,6 @@ test "cast from array reference to fn: runtime fn ptr" {
}
test "*const [N]null u8 to ?[]const u8" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1367,7 +1316,6 @@ test "cast between [*c]T and ?[*:0]T on fn parameter" {
var global_struct: struct { f0: usize } = undefined;
test "assignment to optional pointer result loc" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1386,7 +1334,6 @@ test "cast between *[N]void and []void" {
}
test "peer resolve arrays of different size to const slice" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1400,7 +1347,6 @@ fn boolToStr(b: bool) []const u8 {
}
test "cast f16 to wider types" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1421,7 +1367,6 @@ test "cast f16 to wider types" {
}
test "cast f128 to narrower types" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1441,7 +1386,6 @@ test "cast f128 to narrower types" {
}
test "peer type resolution: unreachable, null, slice" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1460,7 +1404,6 @@ test "peer type resolution: unreachable, null, slice" {
}
test "cast i8 fn call peers to i32 result" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -1482,7 +1425,6 @@ test "cast i8 fn call peers to i32 result" {
}
test "cast compatible optional types" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1494,7 +1436,6 @@ test "cast compatible optional types" {
}
test "coerce undefined single-item pointer of array to error union of slice" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const a = @as([*]u8, undefined)[0..0];
@@ -1513,7 +1454,6 @@ test "pointer to empty struct literal to mutable slice" {
}
test "coerce between pointers of compatible differently-named floats" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows and !builtin.link_libc) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1548,7 +1488,6 @@ test "peer type resolution of const and non-const pointer to array" {
}
test "intFromFloat to zero-bit int" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1573,8 +1512,6 @@ test "cast typed undefined to int" {
}
// test "implicit cast from [:0]T to [*c]T" {
-// if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
-
// var a: [:0]const u8 = "foo";
// _ = &a;
// const b: [*c]const u8 = a;
@@ -1584,7 +1521,6 @@ test "cast typed undefined to int" {
// }
test "bitcast packed struct with u0" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const S = packed struct(u2) { a: u0, b: u2 };
@@ -1691,7 +1627,6 @@ test "coercion from single-item pointer to @as to slice" {
}
test "peer type resolution: const sentinel slice and mutable non-sentinel slice" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1721,7 +1656,6 @@ test "peer type resolution: const sentinel slice and mutable non-sentinel slice"
}
test "peer type resolution: float and comptime-known fixed-width integer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1743,7 +1677,7 @@ test "peer type resolution: float and comptime-known fixed-width integer" {
}
test "peer type resolution: same array type with sentinel" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1766,7 +1700,6 @@ test "peer type resolution: same array type with sentinel" {
}
test "peer type resolution: array with sentinel and array without sentinel" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1789,7 +1722,7 @@ test "peer type resolution: array with sentinel and array without sentinel" {
}
test "peer type resolution: array and vector with same child type" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1813,7 +1746,7 @@ test "peer type resolution: array and vector with same child type" {
}
test "peer type resolution: array with smaller child type and vector with larger child type" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1837,7 +1770,7 @@ test "peer type resolution: array with smaller child type and vector with larger
}
test "peer type resolution: error union and optional of same type" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1861,7 +1794,6 @@ test "peer type resolution: error union and optional of same type" {
}
test "peer type resolution: C pointer and @TypeOf(null)" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1884,7 +1816,7 @@ test "peer type resolution: C pointer and @TypeOf(null)" {
}
test "peer type resolution: three-way resolution combines error set and optional" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1927,7 +1859,7 @@ test "peer type resolution: three-way resolution combines error set and optional
}
test "peer type resolution: vector and optional vector" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1952,7 +1884,6 @@ test "peer type resolution: vector and optional vector" {
}
test "peer type resolution: optional fixed-width int and comptime_int" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1974,7 +1905,7 @@ test "peer type resolution: optional fixed-width int and comptime_int" {
}
test "peer type resolution: array and tuple" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1998,7 +1929,7 @@ test "peer type resolution: array and tuple" {
}
test "peer type resolution: vector and tuple" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -2022,7 +1953,7 @@ test "peer type resolution: vector and tuple" {
}
test "peer type resolution: vector and array and tuple" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -2066,7 +1997,6 @@ test "peer type resolution: vector and array and tuple" {
}
test "peer type resolution: empty tuple pointer and slice" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -2088,7 +2018,6 @@ test "peer type resolution: empty tuple pointer and slice" {
}
test "peer type resolution: tuple pointer and slice" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -2110,7 +2039,6 @@ test "peer type resolution: tuple pointer and slice" {
}
test "peer type resolution: tuple pointer and optional slice" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
// Miscompilation on Intel's OpenCL CPU runtime.
@@ -2133,7 +2061,6 @@ test "peer type resolution: tuple pointer and optional slice" {
}
test "peer type resolution: many compatible pointers" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -2200,7 +2127,6 @@ test "peer type resolution: many compatible pointers" {
}
test "peer type resolution: tuples with comptime fields" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
@@ -2232,7 +2158,6 @@ test "peer type resolution: tuples with comptime fields" {
}
test "peer type resolution: C pointer and many pointer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -2256,7 +2181,6 @@ test "peer type resolution: C pointer and many pointer" {
}
test "peer type resolution: pointer attributes are combined correctly" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -2338,7 +2262,7 @@ test "peer type resolution: pointer attributes are combined correctly" {
}
test "peer type resolution: arrays of compatible types" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -2356,7 +2280,6 @@ test "peer type resolution: arrays of compatible types" {
}
test "cast builtins can wrap result in optional" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -2394,7 +2317,6 @@ test "cast builtins can wrap result in optional" {
}
test "cast builtins can wrap result in error union" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -2432,7 +2354,6 @@ test "cast builtins can wrap result in error union" {
}
test "cast builtins can wrap result in error union and optional" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -2471,8 +2392,8 @@ test "cast builtins can wrap result in error union and optional" {
}
test "@floatCast on vector" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -2512,8 +2433,8 @@ test "@floatCast on vector" {
}
test "@ptrFromInt on vector" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -2537,8 +2458,8 @@ test "@ptrFromInt on vector" {
}
test "@intFromPtr on vector" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -2562,8 +2483,8 @@ test "@intFromPtr on vector" {
}
test "@floatFromInt on vector" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -2582,8 +2503,8 @@ test "@floatFromInt on vector" {
}
test "@intFromFloat on vector" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -2602,8 +2523,8 @@ test "@intFromFloat on vector" {
}
test "@intFromBool on vector" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -2639,7 +2560,6 @@ test "15-bit int to float" {
}
test "@as does not corrupt values with incompatible representations" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -2654,7 +2574,6 @@ test "@as does not corrupt values with incompatible representations" {
}
test "result information is preserved through many nested structures" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -2679,7 +2598,7 @@ test "result information is preserved through many nested structures" {
}
test "@intCast vector of signed integer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -2703,7 +2622,6 @@ test "result type is preserved into comptime block" {
}
test "bitcast vector" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const u8x32 = @Vector(32, u8);
@@ -2766,6 +2684,7 @@ test "@intFromFloat boundary cases" {
}
test "@intFromFloat vector boundary cases" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
test/behavior/cast_int.zig
@@ -5,7 +5,6 @@ const expectEqual = std.testing.expectEqual;
const maxInt = std.math.maxInt;
test "@intCast i32 to u7" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -19,7 +18,6 @@ test "@intCast i32 to u7" {
}
test "coerce i8 to i32 and @intCast back" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -36,6 +34,7 @@ test "coerce i8 to i32 and @intCast back" {
test "coerce non byte-sized integers accross 32bits boundary" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
+
{
var v: u21 = 6417;
_ = &v;
@@ -164,8 +163,9 @@ const Piece = packed struct {
}
};
+// Originally reported at https://github.com/ziglang/zig/issues/14200
test "load non byte-sized optional value" {
- // Originally reported at https://github.com/ziglang/zig/issues/14200
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
@@ -181,6 +181,7 @@ test "load non byte-sized optional value" {
}
test "load non byte-sized value in struct" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.cpu.arch.endian() != .little) return error.SkipZigTest; // packed struct TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
test/behavior/comptime_memory.zig
@@ -66,7 +66,6 @@ fn bigToNativeEndian(comptime T: type, v: T) T {
return if (endian == .big) v else @byteSwap(v);
}
test "type pun endianness" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
comptime {
@@ -360,7 +359,6 @@ test "offset field ptr by enclosing array element size" {
}
test "accessing reinterpreted memory of parent object" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = extern struct {
test/behavior/const_slice_child.zig
@@ -7,7 +7,6 @@ const expect = testing.expect;
var argv: [*]const [*]const u8 = undefined;
test "const slice child" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
test/behavior/decl_literals.zig
@@ -33,9 +33,9 @@ test "decl literal with pointer" {
}
test "call decl literal with optional" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const S = struct {
@@ -74,6 +74,7 @@ test "call decl literal" {
}
test "call decl literal with error union" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
const S = struct {
test/behavior/defer.zig
@@ -32,7 +32,6 @@ test "defer and labeled break" {
}
test "errdefer does not apply to fn inside fn" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (testNestedFnErrDefer()) |_| @panic("expected error") else |e| try expect(e == error.Bad);
@@ -51,7 +50,6 @@ fn testNestedFnErrDefer() anyerror!void {
test "return variable while defer expression in scope to modify it" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -91,7 +89,6 @@ fn runSomeErrorDefers(x: bool) !bool {
}
test "mixing normal and error defers" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -110,7 +107,7 @@ test "mixing normal and error defers" {
}
test "errdefer with payload" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -132,8 +129,8 @@ test "errdefer with payload" {
}
test "reference to errdefer payload" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -157,7 +154,6 @@ test "reference to errdefer payload" {
}
test "simple else prong doesn't emit an error for unreachable else prong" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
test/behavior/enum.zig
@@ -25,7 +25,6 @@ fn testEnumFromIntEval(x: i32) !void {
const EnumFromIntNumber = enum { Zero, One, Two, Three, Four };
test "int to enum" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testEnumFromIntEval(3);
@@ -608,7 +607,6 @@ fn testEnumWithSpecifiedTagValues(x: MultipleChoice) !void {
}
test "enum with specified tag values" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testEnumWithSpecifiedTagValues(MultipleChoice.C);
@@ -616,7 +614,6 @@ test "enum with specified tag values" {
}
test "non-exhaustive enum" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -680,7 +677,6 @@ test "empty non-exhaustive enum" {
}
test "single field non-exhaustive enum" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -744,7 +740,6 @@ test "cast integer literal to enum" {
}
test "enum with specified and unspecified tag values" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testEnumWithSpecifiedAndUnspecifiedTagValues(MultipleChoice2.D);
@@ -904,8 +899,8 @@ test "enum value allocation" {
}
test "enum literal casting to tagged union" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const Arch = union(enum) {
@@ -941,8 +936,8 @@ test "enum literal casting to error union with payload enum" {
}
test "constant enum initialization with differing sizes" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -985,8 +980,8 @@ fn test3_2(f: Test3Foo) !void {
}
test "@tagName" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1002,8 +997,8 @@ fn testEnumTagNameBare(n: anytype) []const u8 {
const BareNumber = enum { One, Two, Three };
test "@tagName non-exhaustive enum" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1014,8 +1009,8 @@ test "@tagName non-exhaustive enum" {
const NonExhaustive = enum(u8) { A, B, _ };
test "@tagName is null-terminated" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1030,8 +1025,8 @@ test "@tagName is null-terminated" {
}
test "tag name with assigned enum values" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1046,7 +1041,6 @@ test "tag name with assigned enum values" {
}
test "@tagName on enum literals" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1055,8 +1049,8 @@ test "@tagName on enum literals" {
}
test "tag name with signed enum values" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1073,8 +1067,8 @@ test "tag name with signed enum values" {
}
test "@tagName in callconv(.c) function" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1091,7 +1085,6 @@ fn testEnumTagNameCallconvC() callconv(.c) [*:0]const u8 {
test "enum literal casting to optional" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var bar: ?Bar = undefined;
@@ -1117,8 +1110,8 @@ const bit_field_1 = BitFieldOfEnums{
};
test "bit field access with enum fields" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
@@ -1158,8 +1151,8 @@ test "enum literal in array literal" {
}
test "tag name functions are unique" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1179,7 +1172,6 @@ test "tag name functions are unique" {
}
test "size of enum with only one tag which has explicit integer tag type" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const E = enum(u8) { nope = 10 };
test/behavior/error.zig
@@ -145,11 +145,14 @@ test "implicit cast to optional to error union to return result loc" {
}
test "fn returning empty error set can be passed as fn returning any error" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+
entry();
comptime entry();
}
test "fn returning empty error set can be passed as fn returning any error - pointer" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
entryPtr();
@@ -401,8 +404,8 @@ fn intLiteral(str: []const u8) !?i64 {
}
test "nested error union function call in optional unwrap" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -448,7 +451,6 @@ test "nested error union function call in optional unwrap" {
}
test "return function call to error set from error union function" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -465,7 +467,6 @@ test "return function call to error set from error union function" {
}
test "optional error set is the same size as error set" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -481,7 +482,7 @@ test "optional error set is the same size as error set" {
}
test "nested catch" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -530,7 +531,7 @@ test "function pointer with return type that is error union with payload which i
}
test "return result loc as peer result loc in inferred error set function" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -562,7 +563,6 @@ test "return result loc as peer result loc in inferred error set function" {
test "error payload type is correctly resolved" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const MyIntWrapper = struct {
@@ -590,8 +590,8 @@ test "error union comptime caching" {
}
test "@errorName" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -605,8 +605,8 @@ fn gimmeItBroke() anyerror {
}
test "@errorName sentinel length matches slice length" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -700,8 +700,8 @@ test "coerce error set to the current inferred error set" {
}
test "error union payload is properly aligned" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -719,7 +719,6 @@ test "error union payload is properly aligned" {
}
test "ret_ptr doesn't cause own inferred error set to be resolved" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -760,7 +759,7 @@ test "simple else prong allowed even when all errors handled" {
}
test "pointer to error union payload" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -794,7 +793,6 @@ const NoReturn = struct {
};
test "error union of noreturn used with if" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -809,7 +807,6 @@ test "error union of noreturn used with if" {
}
test "error union of noreturn used with try" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -821,7 +818,6 @@ test "error union of noreturn used with try" {
}
test "error union of noreturn used with catch" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -833,7 +829,6 @@ test "error union of noreturn used with catch" {
}
test "alignment of wrapping an error union payload" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -852,6 +847,7 @@ test "alignment of wrapping an error union payload" {
}
test "compare error union and error set" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var a: anyerror = error.Foo;
@@ -887,7 +883,7 @@ test "catch within a function that calls no errorable functions" {
}
test "error from comptime string" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -913,7 +909,6 @@ test "field access of anyerror results in smaller error set" {
}
test "optional error union return type" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const S = struct {
@@ -928,7 +923,6 @@ test "optional error union return type" {
test "optional error set return type" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
const E = error{ A, B };
const S = struct {
@@ -952,8 +946,8 @@ test "optional error set function parameter" {
}
test "returning an error union containing a type with no runtime bits" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const ZeroByteType = struct {
@@ -969,7 +963,7 @@ test "returning an error union containing a type with no runtime bits" {
}
test "try used in recursive function with inferred error set" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1010,7 +1004,6 @@ test "generic inline function returns inferred error set" {
}
test "function called at runtime is properly analyzed for inferred error set" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1045,6 +1038,8 @@ test "errorCast to adhoc inferred error set" {
}
test "@errorCast from error set to error union" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+
const S = struct {
fn doTheTest(set: error{ A, B }) error{A}!i32 {
return @errorCast(set);
@@ -1055,6 +1050,8 @@ test "@errorCast from error set to error union" {
}
test "@errorCast from error union to error union" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+
const S = struct {
fn doTheTest(set: error{ A, B }!i32) error{A}!i32 {
return @errorCast(set);
@@ -1065,8 +1062,8 @@ test "@errorCast from error union to error union" {
}
test "result location initialization of error union with OPV payload" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
test/behavior/eval.zig
@@ -18,7 +18,6 @@ fn unwrapAndAddOne(blah: ?i32) i32 {
}
const should_be_1235 = unwrapAndAddOne(1234);
test "static add one" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -71,7 +70,6 @@ fn constExprEvalOnSingleExprBlocksFn(x: i32, b: bool) i32 {
}
test "constant expressions" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var array: [array_size]u8 = undefined;
@@ -93,7 +91,6 @@ fn letsTryToCompareBools(a: bool, b: bool) bool {
return max(bool, a, b);
}
test "inlined block and runtime block phi" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try expect(letsTryToCompareBools(true, true));
@@ -140,7 +137,6 @@ test "pointer to type" {
}
test "a type constructed in a global expression" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -236,7 +232,6 @@ const vertices = [_]Vertex{
};
test "statically initialized list" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try expect(static_point_list[0].x == 1);
@@ -342,7 +337,6 @@ fn doesAlotT(comptime T: type, value: usize) T {
}
test "@setEvalBranchQuota at same scope as generic function call" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try expect(doesAlotT(u32, 2) == 2);
@@ -394,7 +388,6 @@ test "return 0 from function that has u0 return type" {
}
test "statically initialized struct" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
st_init_str_foo.x += 1;
@@ -444,7 +437,6 @@ fn copyWithPartialInline(s: []u32, b: []u8) void {
test "binary math operator in partially inlined function" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -462,7 +454,6 @@ test "binary math operator in partially inlined function" {
}
test "comptime shl" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -491,6 +482,7 @@ test "comptime bitwise operators" {
}
test "comptime shlWithOverflow" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -503,7 +495,6 @@ test "comptime shlWithOverflow" {
}
test "const ptr to variable data changes at runtime" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -521,7 +512,6 @@ const foo_ref = &foo_contents;
test "runtime 128 bit integer division" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -536,7 +526,6 @@ test "runtime 128 bit integer division" {
}
test "@tagName of @typeInfo" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -545,7 +534,6 @@ test "@tagName of @typeInfo" {
}
test "static eval list init" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -578,7 +566,6 @@ test "inlined loop has array literal with elided runtime scope on first iteratio
}
test "ptr to local array argument at comptime" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
comptime {
@@ -741,7 +728,6 @@ test "*align(1) u16 is the same as *align(1:0:2) u16" {
test "array concatenation of function calls" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -751,7 +737,6 @@ test "array concatenation of function calls" {
test "array multiplication of function calls" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -769,7 +754,6 @@ fn scalar(x: u32) u32 {
test "array concatenation peer resolves element types - value" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var a = [2]u3{ 1, 7 };
@@ -786,7 +770,6 @@ test "array concatenation peer resolves element types - value" {
test "array concatenation peer resolves element types - pointer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -803,7 +786,6 @@ test "array concatenation peer resolves element types - pointer" {
test "array concatenation sets the sentinel - value" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -823,7 +805,6 @@ test "array concatenation sets the sentinel - value" {
}
test "array concatenation sets the sentinel - pointer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -843,7 +824,6 @@ test "array concatenation sets the sentinel - pointer" {
test "array multiplication sets the sentinel - value" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -862,7 +842,6 @@ test "array multiplication sets the sentinel - value" {
test "array multiplication sets the sentinel - pointer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -889,7 +868,6 @@ test "comptime assign int to optional int" {
test "two comptime calls with array default initialized to undefined" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -976,7 +954,6 @@ test "const local with comptime init through array init" {
}
test "closure capture type of runtime-known parameter" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -992,7 +969,6 @@ test "closure capture type of runtime-known parameter" {
}
test "closure capture type of runtime-known var" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var x: u32 = 1234;
@@ -1035,7 +1011,6 @@ test "comptime break passing through runtime condition converted to runtime brea
}
test "comptime break to outer loop passing through runtime condition converted to runtime break" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1088,7 +1063,6 @@ test "comptime break operand passing through runtime condition converted to runt
}
test "comptime break operand passing through runtime switch converted to runtime break" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -1108,7 +1082,6 @@ test "comptime break operand passing through runtime switch converted to runtime
}
test "no dependency loop for alignment of self struct" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1147,7 +1120,6 @@ test "no dependency loop for alignment of self struct" {
}
test "no dependency loop for alignment of self bare union" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1186,7 +1158,6 @@ test "no dependency loop for alignment of self bare union" {
}
test "no dependency loop for alignment of self tagged union" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1230,7 +1201,6 @@ test "equality of pointers to comptime const" {
}
test "storing an array of type in a field" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1260,7 +1230,6 @@ test "storing an array of type in a field" {
}
test "pass pointer to field of comptime-only type as a runtime parameter" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1387,7 +1356,6 @@ test "lazy sizeof union tag size in compare" {
}
test "lazy value is resolved as slice operand" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1568,7 +1536,7 @@ test "x or true is comptime-known true" {
}
test "non-optional and optional array elements concatenated" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
test/behavior/export_builtin.zig
@@ -4,7 +4,6 @@ const expect = std.testing.expect;
test "exporting enum value" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.cpu.arch.isWasm()) {
// https://github.com/ziglang/zig/issues/4866
@@ -23,7 +22,6 @@ test "exporting enum value" {
test "exporting with internal linkage" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const S = struct {
fn foo() callconv(.c) void {}
@@ -36,7 +34,6 @@ test "exporting with internal linkage" {
test "exporting using namespace access" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.cpu.arch.isWasm()) {
// https://github.com/ziglang/zig/issues/4866
@@ -57,7 +54,6 @@ test "exporting using namespace access" {
test "exporting comptime-known value" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
test/behavior/field_parent_ptr.zig
@@ -2,6 +2,7 @@ const expect = @import("std").testing.expect;
const builtin = @import("builtin");
test "@fieldParentPtr struct" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -586,6 +587,7 @@ test "@fieldParentPtr extern struct last zero-bit field" {
}
test "@fieldParentPtr unaligned packed struct" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -724,6 +726,7 @@ test "@fieldParentPtr unaligned packed struct" {
}
test "@fieldParentPtr aligned packed struct" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1339,6 +1342,7 @@ test "@fieldParentPtr packed struct last zero-bit field" {
}
test "@fieldParentPtr tagged union" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1475,6 +1479,7 @@ test "@fieldParentPtr tagged union" {
}
test "@fieldParentPtr untagged union" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
test/behavior/floatop.zig
@@ -143,7 +143,6 @@ test "cmp f64" {
}
test "cmp f128" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -154,7 +153,6 @@ test "cmp f128" {
}
test "cmp f80/c_longdouble" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
@@ -223,6 +221,7 @@ fn testCmp(comptime T: type) !void {
}
test "vector cmp f16" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
@@ -236,6 +235,7 @@ test "vector cmp f16" {
}
test "vector cmp f32" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -249,6 +249,7 @@ test "vector cmp f32" {
}
test "vector cmp f64" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -261,8 +262,8 @@ test "vector cmp f64" {
}
test "vector cmp f128" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -276,6 +277,7 @@ test "vector cmp f128" {
}
test "vector cmp f80/c_longdouble" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .hexagon) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .powerpc64le) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -324,7 +326,6 @@ fn testCmpVector(comptime T: type) !void {
test "different sized float comparisons" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -371,7 +372,6 @@ test "negative f128 intFromFloat at compile-time" {
test "@sqrt f16" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -382,7 +382,6 @@ test "@sqrt f16" {
test "@sqrt f32/f64" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -394,7 +393,6 @@ test "@sqrt f32/f64" {
test "@sqrt f80/f128/c_longdouble" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -481,9 +479,9 @@ fn testSqrt(comptime T: type) !void {
}
test "@sqrt with vectors" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -503,7 +501,6 @@ fn testSqrtWithVectors() !void {
test "@sin f16" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -514,7 +511,6 @@ test "@sin f16" {
test "@sin f32/f64" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -527,7 +523,6 @@ test "@sin f32/f64" {
test "@sin f80/f128/c_longdouble" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -554,9 +549,9 @@ fn testSin(comptime T: type) !void {
}
test "@sin with vectors" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -576,7 +571,6 @@ fn testSinWithVectors() !void {
test "@cos f16" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -587,7 +581,6 @@ test "@cos f16" {
test "@cos f32/f64" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -600,7 +593,6 @@ test "@cos f32/f64" {
test "@cos f80/f128/c_longdouble" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -627,9 +619,9 @@ fn testCos(comptime T: type) !void {
}
test "@cos with vectors" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -649,7 +641,6 @@ fn testCosWithVectors() !void {
test "@tan f16" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -660,7 +651,6 @@ test "@tan f16" {
test "@tan f32/f64" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -673,7 +663,6 @@ test "@tan f32/f64" {
test "@tan f80/f128/c_longdouble" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -700,9 +689,9 @@ fn testTan(comptime T: type) !void {
}
test "@tan with vectors" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -722,7 +711,6 @@ fn testTanWithVectors() !void {
test "@exp f16" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -733,7 +721,6 @@ test "@exp f16" {
test "@exp f32/f64" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -746,7 +733,6 @@ test "@exp f32/f64" {
test "@exp f80/f128/c_longdouble" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -777,9 +763,9 @@ fn testExp(comptime T: type) !void {
}
test "@exp with vectors" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -799,7 +785,6 @@ fn testExpWithVectors() !void {
test "@exp2 f16" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -810,7 +795,6 @@ test "@exp2 f16" {
test "@exp2 f32/f64" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -823,7 +807,6 @@ test "@exp2 f32/f64" {
test "@exp2 f80/f128/c_longdouble" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -849,9 +832,9 @@ fn testExp2(comptime T: type) !void {
}
test "@exp2 with @vectors" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -870,7 +853,6 @@ fn testExp2WithVectors() !void {
}
test "@log f16" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -881,7 +863,6 @@ test "@log f16" {
}
test "@log f32/f64" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -894,7 +875,6 @@ test "@log f32/f64" {
}
test "@log f80/f128/c_longdouble" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -921,8 +901,8 @@ fn testLog(comptime T: type) !void {
}
test "@log with @vectors" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -940,7 +920,6 @@ test "@log with @vectors" {
}
test "@log2 f16" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -951,7 +930,6 @@ test "@log2 f16" {
}
test "@log2 f32/f64" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -964,7 +942,6 @@ test "@log2 f32/f64" {
}
test "@log2 f80/f128/c_longdouble" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -991,8 +968,8 @@ fn testLog2(comptime T: type) !void {
}
test "@log2 with vectors" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1016,7 +993,6 @@ fn testLog2WithVectors() !void {
}
test "@log10 f16" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1027,7 +1003,6 @@ test "@log10 f16" {
}
test "@log10 f32/f64" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1040,7 +1015,6 @@ test "@log10 f32/f64" {
}
test "@log10 f80/f128/c_longdouble" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1067,8 +1041,8 @@ fn testLog10(comptime T: type) !void {
}
test "@log10 with vectors" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1089,7 +1063,6 @@ fn testLog10WithVectors() !void {
test "@abs f16" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1099,7 +1072,6 @@ test "@abs f16" {
test "@abs f32/f64" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testFabs(f32);
@@ -1110,7 +1082,6 @@ test "@abs f32/f64" {
test "@abs f80/f128/c_longdouble" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1190,7 +1161,7 @@ fn testFabs(comptime T: type) !void {
}
test "@abs with vectors" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1210,7 +1181,6 @@ fn testFabsWithVectors() !void {
}
test "@floor f16" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1220,7 +1190,6 @@ test "@floor f16" {
}
test "@floor f32/f64" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1232,7 +1201,6 @@ test "@floor f32/f64" {
}
test "@floor f80/f128/c_longdouble" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1284,7 +1252,7 @@ fn testFloor(comptime T: type) !void {
}
test "@floor with vectors" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1304,7 +1272,6 @@ fn testFloorWithVectors() !void {
}
test "@ceil f16" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1314,7 +1281,6 @@ test "@ceil f16" {
}
test "@ceil f32/f64" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1326,7 +1292,6 @@ test "@ceil f32/f64" {
}
test "@ceil f80/f128/c_longdouble" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1378,7 +1343,7 @@ fn testCeil(comptime T: type) !void {
}
test "@ceil with vectors" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1398,7 +1363,6 @@ fn testCeilWithVectors() !void {
}
test "@trunc f16" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1408,7 +1372,6 @@ test "@trunc f16" {
}
test "@trunc f32/f64" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1420,7 +1383,6 @@ test "@trunc f32/f64" {
}
test "@trunc f80/f128/c_longdouble" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1472,7 +1434,7 @@ fn testTrunc(comptime T: type) !void {
}
test "@trunc with vectors" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1492,7 +1454,6 @@ fn testTruncWithVectors() !void {
}
test "neg f16" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1508,7 +1469,6 @@ test "neg f16" {
}
test "neg f32/f64" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1520,7 +1480,6 @@ test "neg f32/f64" {
}
test "neg f80/f128/c_longdouble" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1606,7 +1565,6 @@ fn testNeg(comptime T: type) !void {
}
test "eval @setFloatMode at compile-time" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const result = comptime fnWithFloatMode();
@@ -1629,7 +1587,6 @@ test "f128 at compile time is lossy" {
test "comptime fixed-width float zero divided by zero produces NaN" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1641,7 +1598,6 @@ test "comptime fixed-width float zero divided by zero produces NaN" {
test "comptime fixed-width float non-zero divided by zero produces signed Inf" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1686,21 +1642,18 @@ test "comptime inf >= runtime 1" {
try std.testing.expect(f >= i);
}
test "comptime isNan(nan * 1)" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const nan_times_one = comptime std.math.nan(f64) * 1;
try std.testing.expect(std.math.isNan(nan_times_one));
}
test "runtime isNan(nan * 1)" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const nan_times_one = std.math.nan(f64) * 1;
try std.testing.expect(std.math.isNan(nan_times_one));
}
test "comptime isNan(nan * 0)" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const nan_times_zero = comptime std.math.nan(f64) * 0;
@@ -1709,7 +1662,6 @@ test "comptime isNan(nan * 0)" {
try std.testing.expect(std.math.isNan(zero_times_nan));
}
test "runtime isNan(nan * 0)" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const nan_times_zero = std.math.nan(f64) * 0;
@@ -1718,7 +1670,6 @@ test "runtime isNan(nan * 0)" {
try std.testing.expect(std.math.isNan(zero_times_nan));
}
test "comptime isNan(inf * 0)" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const inf_times_zero = comptime std.math.inf(f64) * 0;
@@ -1727,7 +1678,6 @@ test "comptime isNan(inf * 0)" {
try std.testing.expect(std.math.isNan(zero_times_inf));
}
test "runtime isNan(inf * 0)" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const inf_times_zero = std.math.inf(f64) * 0;
test/behavior/fn.zig
@@ -78,7 +78,6 @@ test "return inner function which references comptime variable of outer function
test "discard the result of a function that returns a struct" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -101,7 +100,6 @@ test "discard the result of a function that returns a struct" {
test "inline function call that calls optional function pointer, return pointer at callsite interacts correctly with callsite return type" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -179,7 +177,6 @@ fn fComplexCallconvRet(x: u32) callconv(blk: {
test "function with complex callconv and return type expressions" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try expect(fComplexCallconvRet(3).x == 9);
@@ -255,7 +252,6 @@ test "pass by non-copying value as method, at comptime" {
test "implicit cast fn call result to optional in field result" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -283,7 +279,6 @@ test "implicit cast fn call result to optional in field result" {
test "void parameters" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
try voidFun(1, void{}, 2, {});
}
@@ -306,7 +301,6 @@ fn acceptsString(foo: []u8) void {
}
test "function pointers" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -345,7 +339,6 @@ fn numberLiteralArg(a: anytype) !void {
test "function call with anon list literal" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -365,7 +358,6 @@ test "function call with anon list literal" {
test "function call with anon list literal - 2D" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -426,7 +418,6 @@ test "import passed byref to function in return type" {
test "implicit cast function to function ptr" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -485,7 +476,6 @@ test "method call with optional pointer first param" {
test "using @ptrCast on function pointers" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -524,8 +514,6 @@ test "function returns function returning type" {
}
test "peer type resolution of inferred error set with non-void payload" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
-
const S = struct {
fn openDataFile(mode: enum { read, write }) !u32 {
return switch (mode) {
@@ -582,7 +570,6 @@ test "pass and return comptime-only types" {
test "pointer to alias behaves same as pointer to function" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const S = struct {
test/behavior/for.zig
@@ -5,7 +5,6 @@ const expectEqual = std.testing.expectEqual;
const mem = std.mem;
test "continue in for loop" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const array = [_]i32{ 1, 2, 3, 4, 5 };
@@ -67,7 +66,6 @@ test "ignore lval with underscore (for loop)" {
test "basic for loop" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -111,7 +109,6 @@ test "basic for loop" {
test "for with null and T peer types and inferred result location type" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -132,7 +129,6 @@ test "for with null and T peer types and inferred result location type" {
}
test "2 break statements and an else" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -152,7 +148,6 @@ test "2 break statements and an else" {
}
test "for loop with pointer elem var" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -180,7 +175,6 @@ fn mangleString(s: []u8) void {
}
test "for copies its payload" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -198,7 +192,6 @@ test "for copies its payload" {
}
test "for on slice with allowzero ptr" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -215,7 +208,6 @@ test "for on slice with allowzero ptr" {
}
test "else continue outer for" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -230,8 +222,6 @@ test "else continue outer for" {
}
test "for loop with else branch" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
-
{
var x = [_]u32{ 1, 2 };
_ = &x;
@@ -312,7 +302,6 @@ test "1-based counter and ptr to array" {
test "slice and two counters, one is offset and one is runtime" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const slice: []const u8 = "blah";
@@ -342,7 +331,6 @@ test "slice and two counters, one is offset and one is runtime" {
test "two slices, one captured by-ref" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
var buf: [10]u8 = undefined;
@@ -362,7 +350,6 @@ test "two slices, one captured by-ref" {
test "raw pointer and slice" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
var buf: [10]u8 = undefined;
@@ -382,7 +369,6 @@ test "raw pointer and slice" {
test "raw pointer and counter" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
var buf: [10]u8 = undefined;
@@ -401,7 +387,6 @@ test "raw pointer and counter" {
test "inline for with slice as the comptime-known" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
const comptime_slice = "hello";
var runtime_i: usize = 3;
@@ -432,7 +417,6 @@ test "inline for with slice as the comptime-known" {
test "inline for with counter as the comptime-known" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
var runtime_slice = "hello";
@@ -464,7 +448,6 @@ test "inline for with counter as the comptime-known" {
test "inline for on tuple pointer" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const S = struct { u32, u32, u32 };
@@ -480,7 +463,6 @@ test "inline for on tuple pointer" {
test "ref counter that starts at zero" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
for ([_]usize{ 0, 1, 2 }, 0..) |i, j| {
try expectEqual(i, j);
@@ -495,7 +477,6 @@ test "ref counter that starts at zero" {
test "inferred alloc ptr of for loop" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
{
var cond = false;
@@ -516,7 +497,6 @@ test "inferred alloc ptr of for loop" {
}
test "for loop results in a bool" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try std.testing.expect(for ([1]u8{0}) |x| {
test/behavior/generics.zig
@@ -17,7 +17,6 @@ fn checkSize(comptime T: type) usize {
}
test "simple generic fn" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try expect(max(i32, 3, -1) == 3);
@@ -53,7 +52,6 @@ fn sameButWithFloats(a: f64, b: f64) f64 {
test "fn with comptime args" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try expect(gimmeTheBigOne(1234, 5678) == 5678);
@@ -63,7 +61,6 @@ test "fn with comptime args" {
test "anytype params" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try expect(max_i32(12, 34) == 34);
@@ -87,7 +84,6 @@ fn max_f64(a: f64, b: f64) f64 {
}
test "type constructed by comptime function call" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -113,7 +109,6 @@ fn SimpleList(comptime L: usize) type {
test "function with return type type" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var list: List(i32) = undefined;
@@ -154,7 +149,6 @@ fn aGenericFn(comptime T: type, comptime a: T, b: T) T {
test "generic fn with implicit cast" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -173,7 +167,6 @@ fn getFirstByte(comptime T: type, mem: []const T) u8 {
test "generic fn keeps non-generic parameter types" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -249,7 +242,6 @@ test "function parameter is generic" {
}
test "generic function instantiation turns into comptime call" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -300,7 +292,6 @@ test "generic function with void and comptime parameter" {
}
test "anonymous struct return type referencing comptime parameter" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -318,7 +309,6 @@ test "anonymous struct return type referencing comptime parameter" {
test "generic function instantiation non-duplicates" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -339,7 +329,6 @@ test "generic function instantiation non-duplicates" {
test "generic instantiation of tagged union with only one field" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.os.tag == .wasi) return error.SkipZigTest;
@@ -439,8 +428,6 @@ test "null sentinel pointer passed as generic argument" {
}
test "generic function passed as comptime argument" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
-
const S = struct {
fn doMath(comptime f: fn (comptime type, i32, i32) error{Overflow}!i32, a: i32, b: i32) !void {
const result = try f(i32, a, b);
@@ -451,7 +438,6 @@ test "generic function passed as comptime argument" {
}
test "return type of generic function is function pointer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const S = struct {
@@ -464,8 +450,6 @@ test "return type of generic function is function pointer" {
}
test "coerced function body has inequal value with its uncoerced body" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
-
const S = struct {
const A = B(i32, c);
fn c() !i32 {
@@ -513,7 +497,6 @@ test "union in struct captures argument" {
test "function argument tuple used as struct field" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const S = struct {
@@ -546,8 +529,8 @@ test "comptime callconv(.c) function ptr uses comptime type argument" {
}
test "call generic function with from function called by the generic function" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const GET = struct {
test/behavior/globals.zig
@@ -6,7 +6,6 @@ var pos = [2]f32{ 0.0, 0.0 };
test "store to global array" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
try expect(pos[1] == 0.0);
pos = [2]f32{ 0.0, 1.0 };
@@ -15,9 +14,9 @@ test "store to global array" {
var vpos = @Vector(2, f32){ 0.0, 0.0 };
test "store to global vector" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
try expect(vpos[1] == 0.0);
vpos = @Vector(2, f32){ 0.0, 1.0 };
@@ -26,7 +25,6 @@ test "store to global vector" {
test "slices pointing at the same address as global array." {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -47,7 +45,6 @@ test "slices pointing at the same address as global array." {
test "global loads can affect liveness" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const S = struct {
const ByRef = struct {
@@ -188,6 +185,7 @@ test "function pointer field call on global extern struct, conditional on global
}
test "function pointer field call on global extern struct" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const S = struct {
test/behavior/if.zig
@@ -116,7 +116,6 @@ test "if prongs cast to expected type instead of peer type resolution" {
test "if peer expressions inferred optional type" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -135,7 +134,6 @@ test "if peer expressions inferred optional type" {
test "if-else expression with runtime condition result location is inferred optional" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const A = struct { b: u64, c: u64 };
@@ -174,6 +172,8 @@ fn returnTrue() bool {
}
test "if value shouldn't be load-elided if used later (structs)" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+
const Foo = struct { x: i32 };
var a = Foo{ .x = 1 };
@@ -191,6 +191,8 @@ test "if value shouldn't be load-elided if used later (structs)" {
}
test "if value shouldn't be load-elided if used later (optionals)" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+
var a: ?i32 = 1;
var b: ?i32 = 1;
test/behavior/import_c_keywords.zig
@@ -27,7 +27,6 @@ extern fn @"break"() Id;
extern fn an_alias_of_some_non_c_keyword_function() Id;
test "import c keywords" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
test/behavior/inline_switch.zig
@@ -3,7 +3,6 @@ const expect = std.testing.expect;
const builtin = @import("builtin");
test "inline scalar prongs" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var x: usize = 0;
@@ -18,7 +17,6 @@ test "inline scalar prongs" {
}
test "inline prong ranges" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var x: usize = 0;
@@ -33,7 +31,6 @@ test "inline prong ranges" {
const E = enum { a, b, c, d };
test "inline switch enums" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var x: E = .a;
@@ -46,7 +43,7 @@ test "inline switch enums" {
const U = union(E) { a: void, b: u2, c: u3, d: u4 };
test "inline switch unions" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -73,7 +70,6 @@ test "inline switch unions" {
}
test "inline else bool" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var a = true;
@@ -85,7 +81,6 @@ test "inline else bool" {
}
test "inline else error" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const Err = error{ a, b, c };
@@ -98,7 +93,6 @@ test "inline else error" {
}
test "inline else enum" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const E2 = enum(u8) { a = 2, b = 3, c = 4, d = 5 };
@@ -111,7 +105,7 @@ test "inline else enum" {
}
test "inline else int with gaps" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
@@ -130,7 +124,6 @@ test "inline else int with gaps" {
}
test "inline else int all values" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var a: u2 = 0;
test/behavior/int128.zig
@@ -5,7 +5,6 @@ const minInt = std.math.minInt;
const builtin = @import("builtin");
test "uint128" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -25,7 +24,6 @@ test "uint128" {
}
test "undefined 128 bit int" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -45,7 +43,6 @@ test "undefined 128 bit int" {
}
test "int128" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -68,7 +65,6 @@ test "int128" {
}
test "truncate int128" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -92,7 +88,6 @@ test "truncate int128" {
}
test "shift int128" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
test/behavior/int_comparison_elision.zig
@@ -15,7 +15,6 @@ test "int comparison elision" {
// TODO: support int types > 128 bits wide in other backends
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
test/behavior/ir_block_deps.zig
@@ -18,7 +18,6 @@ fn getErrInt() anyerror!i32 {
}
test "ir block deps" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
test/behavior/lower_strlit_to_vector.zig
@@ -3,7 +3,6 @@ const builtin = @import("builtin");
test "strlit to vector" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
test/behavior/math.zig
@@ -62,11 +62,11 @@ fn assertFalse(b: bool) !void {
}
test "@clz" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
+
try testClz();
try comptime testClz();
}
@@ -80,7 +80,6 @@ fn testClz() !void {
}
test "@clz big ints" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -100,8 +99,8 @@ fn testOneClz(comptime T: type, x: T) u32 {
}
test "@clz vectors" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -146,7 +145,6 @@ fn expectVectorsEqual(a: anytype, b: anytype) !void {
}
test "@ctz" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -170,7 +168,7 @@ fn testOneCtz(comptime T: type, x: T) u32 {
}
test "@ctz 128-bit integers" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -188,8 +186,8 @@ fn testCtz128() !void {
}
test "@ctz vectors" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -227,7 +225,6 @@ test "const number literal" {
const ten = 10;
test "float equality" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -433,7 +430,6 @@ test "binary not" {
}
test "binary not big int <= 128 bits" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -467,7 +463,7 @@ test "binary not big int <= 128 bits" {
}
test "division" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -607,7 +603,6 @@ test "large integer division" {
}
test "division half-precision floats" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -694,7 +689,6 @@ fn testUnsignedNegationWrappingEval(x: u16) !void {
}
test "negation wrapping" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -747,7 +741,6 @@ fn testShrTrunc(x: u16) !void {
}
test "f128" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -776,6 +769,7 @@ fn should_not_be_zero(x: f128) !void {
}
test "umax wrapped squaring" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -832,7 +826,6 @@ test "umax wrapped squaring" {
}
test "128-bit multiplication" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -867,7 +860,6 @@ fn testAddWithOverflow(comptime T: type, a: T, b: T, add: T, bit: u1) !void {
}
test "@addWithOverflow" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
@@ -886,7 +878,6 @@ test "@addWithOverflow" {
}
test "@addWithOverflow > 64 bits" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -958,7 +949,7 @@ fn testMulWithOverflow(comptime T: type, a: T, b: T, mul: T, bit: u1) !void {
}
test "basic @mulWithOverflow" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
@@ -970,7 +961,7 @@ test "basic @mulWithOverflow" {
}
test "extensive @mulWithOverflow" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1010,11 +1001,9 @@ test "extensive @mulWithOverflow" {
}
test "@mulWithOverflow bitsize > 32" {
- if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
-
- // aarch64 fails on a release build of the compiler.
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testMulWithOverflow(u40, 3, 0x55_5555_5555, 0xff_ffff_ffff, 0);
@@ -1041,9 +1030,9 @@ test "@mulWithOverflow bitsize > 32" {
}
test "@mulWithOverflow bitsize 128 bits" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
@@ -1068,6 +1057,7 @@ test "@mulWithOverflow bitsize 128 bits" {
}
test "@mulWithOverflow bitsize 256 bits" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1114,7 +1104,6 @@ fn testSubWithOverflow(comptime T: type, a: T, b: T, sub: T, bit: u1) !void {
}
test "@subWithOverflow" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1131,7 +1120,6 @@ test "@subWithOverflow" {
}
test "@subWithOverflow > 64 bits" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1179,7 +1167,7 @@ fn testShlWithOverflow(comptime T: type, a: T, b: math.Log2Int(T), shl: T, bit:
}
test "@shlWithOverflow" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1198,7 +1186,7 @@ test "@shlWithOverflow" {
}
test "@shlWithOverflow > 64 bits" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1267,7 +1255,6 @@ test "allow signed integer division/remainder when values are comptime-known and
}
test "quad hex float literal parsing accurate" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1425,8 +1412,8 @@ test "comptime float rem int" {
}
test "remainder division" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
@@ -1465,7 +1452,6 @@ fn remdivOne(comptime T: type, a: T, b: T, c: T) !void {
test "float remainder division using @rem" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1506,8 +1492,8 @@ fn fremOne(comptime T: type, a: T, b: T, c: T, epsilon: T) !void {
}
test "float modulo division using @mod" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
@@ -1550,7 +1536,6 @@ fn fmodOne(comptime T: type, a: T, b: T, c: T, epsilon: T) !void {
test "@round f16" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1560,7 +1545,6 @@ test "@round f16" {
test "@round f32/f64" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1577,7 +1561,6 @@ test "@round f32/f64" {
test "@round f80" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
@@ -1589,7 +1572,6 @@ test "@round f80" {
test "@round f128" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
@@ -1606,9 +1588,9 @@ fn testRound(comptime T: type, x: T) !void {
}
test "vector integer addition" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1630,7 +1612,6 @@ test "vector integer addition" {
test "NaN comparison" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1648,7 +1629,6 @@ test "NaN comparison" {
test "NaN comparison f80" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1670,9 +1650,9 @@ fn testNanEqNan(comptime F: type) !void {
}
test "vector comparison" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1703,7 +1683,6 @@ test "compare undefined literal with comptime_int" {
test "signed zeros are represented properly" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1735,7 +1714,6 @@ test "signed zeros are represented properly" {
test "absFloat" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testAbsFloat();
@@ -1765,8 +1743,8 @@ test "mod lazy values" {
}
test "@clz works on both vector and scalar inputs" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1784,7 +1762,6 @@ test "@clz works on both vector and scalar inputs" {
test "runtime comparison to NaN is comptime-known" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1814,7 +1791,6 @@ test "runtime comparison to NaN is comptime-known" {
test "runtime int comparison to inf is comptime-known" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1843,8 +1819,8 @@ test "runtime int comparison to inf is comptime-known" {
}
test "float divide by zero" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1878,8 +1854,8 @@ test "float divide by zero" {
}
test "partially-runtime integer vector division would be illegal if vector elements were reordered" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1907,8 +1883,8 @@ test "partially-runtime integer vector division would be illegal if vector eleme
}
test "float vector division of comptime zero by runtime nan is nan" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1925,8 +1901,8 @@ test "float vector division of comptime zero by runtime nan is nan" {
}
test "float vector multiplication of comptime zero by runtime nan is nan" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1943,7 +1919,6 @@ test "float vector multiplication of comptime zero by runtime nan is nan" {
test "comptime float vector division of zero by nan is nan" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1959,7 +1934,6 @@ test "comptime float vector division of zero by nan is nan" {
test "comptime float vector multiplication of zero by nan is nan" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
test/behavior/maximum_minimum.zig
@@ -7,7 +7,6 @@ const expectEqual = std.testing.expectEqual;
test "@max" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -28,9 +27,9 @@ test "@max" {
}
test "@max on vectors" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -61,7 +60,6 @@ test "@max on vectors" {
}
test "@min" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -83,8 +81,8 @@ test "@min" {
}
test "@min for vectors" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -116,7 +114,6 @@ test "@min for vectors" {
}
test "@min/max for floats" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -167,8 +164,8 @@ test "@min/@max more than two arguments" {
}
test "@min/@max more than two vector arguments" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -181,7 +178,6 @@ test "@min/@max more than two vector arguments" {
}
test "@min/@max notices bounds" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -198,8 +194,8 @@ test "@min/@max notices bounds" {
}
test "@min/@max notices vector bounds" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -217,7 +213,6 @@ test "@min/@max notices vector bounds" {
}
test "@min/@max on comptime_int" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -231,7 +226,6 @@ test "@min/@max on comptime_int" {
}
test "@min/@max notices bounds from types" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -251,8 +245,8 @@ test "@min/@max notices bounds from types" {
}
test "@min/@max notices bounds from vector types" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -273,7 +267,6 @@ test "@min/@max notices bounds from vector types" {
}
test "@min/@max notices bounds from types when comptime-known value is undef" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -293,8 +286,8 @@ test "@min/@max notices bounds from types when comptime-known value is undef" {
}
test "@min/@max notices bounds from vector types when element of comptime-known vector is undef" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -333,7 +326,6 @@ test "@min/@max of signed and unsigned runtime integers" {
}
test "@min resulting in u0" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -364,8 +356,8 @@ test "@min/@max with runtime signed and unsigned integers of same size" {
}
test "@min/@max with runtime vectors of signed and unsigned integers of same size" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
test/behavior/member_func.zig
@@ -28,7 +28,6 @@ const HasFuncs = struct {
test "standard field calls" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -72,7 +71,6 @@ test "standard field calls" {
test "@field field calls" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
test/behavior/memcpy.zig
@@ -4,7 +4,6 @@ const expect = std.testing.expect;
const assert = std.debug.assert;
test "memcpy and memset intrinsics" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -26,7 +25,6 @@ fn testMemcpyMemset() !void {
}
test "@memcpy with both operands single-ptr-to-array, one is null-terminated" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -47,7 +45,6 @@ fn testMemcpyBothSinglePtrArrayOneIsNullTerminated() !void {
}
test "@memcpy dest many pointer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -70,7 +67,6 @@ fn testMemcpyDestManyPtr() !void {
}
test "@memcpy C pointer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -156,6 +152,7 @@ test "@memcpy zero-bit type with aliasing" {
}
test "@memcpy with sentinel" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const S = struct {
test/behavior/memmove.zig
@@ -3,7 +3,6 @@ const builtin = @import("builtin");
const expect = std.testing.expect;
test "memmove and memset intrinsics" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -32,7 +31,6 @@ fn testMemmoveMemset() !void {
}
test "@memmove with both operands single-ptr-to-array, one is null-terminated" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -77,7 +75,6 @@ fn testMemmoveBothSinglePtrArrayOneIsNullTerminated() !void {
}
test "@memmove dest many pointer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
test/behavior/memset.zig
@@ -3,7 +3,6 @@ const builtin = @import("builtin");
const expect = std.testing.expect;
test "@memset on array pointers" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -32,7 +31,6 @@ fn testMemsetArray() !void {
}
test "@memset on slices" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -68,7 +66,6 @@ fn testMemsetSlice() !void {
}
test "memset with bool element" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -80,7 +77,6 @@ test "memset with bool element" {
}
test "memset with 1-byte struct element" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -94,7 +90,6 @@ test "memset with 1-byte struct element" {
}
test "memset with 1-byte array element" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -145,7 +140,6 @@ test "memset with large array element, comptime known" {
}
test "@memset provides result type" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -164,7 +158,6 @@ test "@memset provides result type" {
}
test "zero keys with @memset" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
test/behavior/muladd.zig
@@ -4,7 +4,6 @@ const expect = std.testing.expect;
test "@mulAdd" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -32,7 +31,6 @@ fn testMulAdd() !void {
test "@mulAdd f16" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -51,7 +49,6 @@ fn testMulAdd16() !void {
test "@mulAdd f80" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
@@ -71,7 +68,6 @@ fn testMulAdd80() !void {
test "@mulAdd f128" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
@@ -103,9 +99,9 @@ fn vector16() !void {
}
test "vector f16" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -128,9 +124,9 @@ fn vector32() !void {
}
test "vector f32" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -153,9 +149,9 @@ fn vector64() !void {
}
test "vector f64" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -177,9 +173,9 @@ fn vector80() !void {
}
test "vector f80" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
@@ -203,9 +199,9 @@ fn vector128() !void {
}
test "vector f128" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArm()) return error.SkipZigTest;
test/behavior/multiple_externs_with_conflicting_types.zig
@@ -11,7 +11,6 @@ comptime {
const builtin = @import("builtin");
test "call extern function defined with conflicting type" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
test/behavior/nan.zig
@@ -23,7 +23,6 @@ const snan_f128: f128 = math.snan(f128);
test "nan memory equality" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
test/behavior/null.zig
@@ -29,8 +29,8 @@ test "optional type" {
}
test "test maybe object and get a pointer to the inner value" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -51,7 +51,6 @@ test "rhs maybe unwrap return" {
test "maybe return" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try maybeReturnImpl();
@@ -140,8 +139,8 @@ test "optional pointer to 0 bit type null value at runtime" {
}
test "if var maybe pointer" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -185,7 +184,6 @@ const here_is_a_null_literal = SillyStruct{ .context = null };
test "unwrap optional which is field of global var" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
struct_with_optional.field = null;
test/behavior/optional.zig
@@ -59,6 +59,7 @@ fn testNullPtrsEql() !void {
}
test "optional with zero-bit type" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const S = struct {
@@ -109,7 +110,6 @@ test "optional with zero-bit type" {
}
test "address of unwrap optional" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -132,7 +132,6 @@ test "address of unwrap optional" {
}
test "nested optional field in struct" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -210,6 +209,7 @@ test "equality compare optionals and non-optionals" {
}
test "compare optionals with modified payloads" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
var lhs: ?bool = false;
@@ -319,7 +319,7 @@ test "assigning to an unwrapped optional field in an inline loop" {
}
test "coerce an anon struct literal to optional struct" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -339,7 +339,6 @@ test "coerce an anon struct literal to optional struct" {
}
test "0-bit child type coerced to optional return ptr result location" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -365,6 +364,7 @@ test "0-bit child type coerced to optional return ptr result location" {
}
test "0-bit child type coerced to optional" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -391,7 +391,7 @@ test "0-bit child type coerced to optional" {
}
test "array of optional unaligned types" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -428,8 +428,8 @@ test "array of optional unaligned types" {
}
test "optional pointer to zero bit optional payload" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -447,8 +447,8 @@ test "optional pointer to zero bit optional payload" {
}
test "optional pointer to zero bit error union payload" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -543,7 +543,6 @@ test "alignment of wrapping an optional payload" {
}
test "Optional slice size is optimized" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -556,7 +555,7 @@ test "Optional slice size is optimized" {
}
test "Optional slice passed to function" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -574,7 +573,6 @@ test "Optional slice passed to function" {
}
test "peer type resolution in nested if expressions" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
const Thing = struct { n: i32 };
@@ -623,8 +621,8 @@ test "variable of optional of noreturn" {
}
test "copied optional doesn't alias source" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -637,8 +635,8 @@ test "copied optional doesn't alias source" {
}
test "result location initialization of optional with OPV payload" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
test/behavior/packed-struct.zig
@@ -120,7 +120,6 @@ test "consistent size of packed structs" {
}
test "correct sizeOf and offsets in packed structs" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -187,7 +186,6 @@ test "correct sizeOf and offsets in packed structs" {
}
test "nested packed structs" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -484,7 +482,6 @@ test "load pointer from packed struct" {
}
test "@intFromPtr on a packed struct field" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -507,7 +504,6 @@ test "@intFromPtr on a packed struct field" {
}
test "@intFromPtr on a packed struct field unaligned and nested" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -617,6 +613,7 @@ test "@intFromPtr on a packed struct field unaligned and nested" {
}
test "packed struct fields modification" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
// Originally reported at https://github.com/ziglang/zig/issues/16615
@@ -656,9 +653,9 @@ test "optional pointer in packed struct" {
}
test "nested packed struct field access test" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO packed structs larger than 64 bits
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -777,6 +774,7 @@ test "nested packed struct field access test" {
}
test "nested packed struct at non-zero offset" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -915,7 +913,6 @@ test "packed struct passed to callconv(.c) function" {
}
test "overaligned pointer to packed struct" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -937,7 +934,7 @@ test "overaligned pointer to packed struct" {
}
test "packed struct initialized in bitcast" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -986,8 +983,8 @@ test "store undefined to packed result location" {
try expectEqual(x, s.x);
}
+// Originally reported at https://github.com/ziglang/zig/issues/9914
test "bitcast back and forth" {
- // Originally reported at https://github.com/ziglang/zig/issues/9914
const S = packed struct { one: u6, two: u1 };
const s = S{ .one = 0b110101, .two = 0b1 };
const u: u7 = @bitCast(s);
@@ -996,8 +993,9 @@ test "bitcast back and forth" {
try expect(s.two == s2.two);
}
+// Originally reported at https://github.com/ziglang/zig/issues/14200
test "field access of packed struct smaller than its abi size inside struct initialized with rls" {
- // Originally reported at https://github.com/ziglang/zig/issues/14200
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
@@ -1015,8 +1013,8 @@ test "field access of packed struct smaller than its abi size inside struct init
try expect(@as(i2, 1) == s.ps.y);
}
+// Originally reported at https://github.com/ziglang/zig/issues/14632
test "modify nested packed struct aligned field" {
- // Originally reported at https://github.com/ziglang/zig/issues/14632
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
@@ -1045,10 +1043,10 @@ test "modify nested packed struct aligned field" {
try std.testing.expect(!opts.baz);
}
+// Originally reported at https://github.com/ziglang/zig/issues/9674
test "assigning packed struct inside another packed struct" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
-
- // Originally reported at https://github.com/ziglang/zig/issues/9674
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const S = struct {
@@ -1078,7 +1076,6 @@ test "assigning packed struct inside another packed struct" {
}
test "packed struct used as part of anon decl name" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1104,7 +1101,13 @@ test "packed struct acts as a namespace" {
}
test "pointer loaded correctly from packed struct" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
+
+ if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // crashes MSVC
const RAM = struct {
data: [0xFFFF + 1]u8,
@@ -1132,12 +1135,6 @@ test "pointer loaded correctly from packed struct" {
}
}
};
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
-
- if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // crashes MSVC
var ram = try RAM.new();
var cpu = try CPU.new(&ram);
@@ -1146,7 +1143,7 @@ test "pointer loaded correctly from packed struct" {
}
test "assignment to non-byte-aligned field in packed struct" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
@@ -1167,7 +1164,6 @@ test "assignment to non-byte-aligned field in packed struct" {
}
test "packed struct field pointer aligned properly" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
@@ -1186,7 +1182,7 @@ test "packed struct field pointer aligned properly" {
}
test "load flag from packed struct in union" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1311,6 +1307,7 @@ test "packed struct equality" {
}
test "packed struct equality ignores padding bits" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1322,6 +1319,8 @@ test "packed struct equality ignores padding bits" {
}
test "packed struct with signed field" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+
var s: packed struct {
a: i2,
b: u6,
@@ -1332,6 +1331,7 @@ test "packed struct with signed field" {
}
test "assign packed struct initialized with RLS to packed struct literal field" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch.isWasm()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1349,6 +1349,7 @@ test "assign packed struct initialized with RLS to packed struct literal field"
}
test "byte-aligned packed relocation" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1363,6 +1364,7 @@ test "byte-aligned packed relocation" {
}
test "packed struct store of comparison result" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
test/behavior/packed-union.zig
@@ -99,10 +99,10 @@ fn testFlagsInPackedUnionAtOffset() !void {
try expectEqual(false, test_bits.adv_flags.adv.flags.enable_2);
}
+// Originally reported at https://github.com/ziglang/zig/issues/16581
test "packed union in packed struct" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
-
- // Originally reported at https://github.com/ziglang/zig/issues/16581
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
try testPackedUnionInPackedStruct();
@@ -136,7 +136,7 @@ fn testPackedUnionInPackedStruct() !void {
}
test "packed union initialized with a runtime value" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
test/behavior/packed_struct_explicit_backing_int.zig
@@ -5,7 +5,6 @@ const expectEqual = std.testing.expectEqual;
const native_endian = builtin.cpu.arch.endian();
test "packed struct explicit backing integer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
test/behavior/pointers.zig
@@ -18,7 +18,6 @@ fn testDerefPtr() !void {
}
test "pointer-integer arithmetic" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -232,7 +231,6 @@ test "peer type resolution with C pointer and const pointer" {
test "implicit casting between C pointer and optional non-C pointer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -248,8 +246,8 @@ test "implicit casting between C pointer and optional non-C pointer" {
}
test "implicit cast error unions with non-optional to optional pointer" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -298,7 +296,6 @@ test "allowzero pointer and slice" {
test "assign null directly to C pointer and test null equality" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -366,7 +363,6 @@ test "array initialization types" {
}
test "null terminated pointer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -384,7 +380,6 @@ test "null terminated pointer" {
}
test "allow any sentinel" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -401,7 +396,6 @@ test "allow any sentinel" {
}
test "pointer sentinel with enums" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -515,7 +509,6 @@ test "@intFromPtr on null optional at comptime" {
}
test "indexing array with sentinel returns correct type" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -525,7 +518,6 @@ test "indexing array with sentinel returns correct type" {
}
test "element pointer to slice" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -548,7 +540,6 @@ test "element pointer to slice" {
}
test "element pointer arithmetic to slice" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -604,7 +595,6 @@ test "pointer to constant decl preserves alignment" {
test "ptrCast comptime known slice to C pointer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -625,7 +615,6 @@ test "pointer alignment and element type include call expression" {
}
test "pointer to array has explicit alignment" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const S = struct {
test/behavior/popcount.zig
@@ -4,7 +4,6 @@ const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual;
test "@popCount integers" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -15,7 +14,7 @@ test "@popCount integers" {
}
test "@popCount 128bit integer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -77,8 +76,8 @@ fn testPopCountIntegers() !void {
}
test "@popCount vectors" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
test/behavior/ptrcast.zig
@@ -22,7 +22,6 @@ fn testReinterpretBytesAsInteger() !void {
test "reinterpret an array over multiple elements, with no well-defined layout" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -56,7 +55,6 @@ fn testReinterpretStructWrappedBytesAsInteger() !void {
}
test "reinterpret bytes of an array into an extern struct" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -130,7 +128,6 @@ fn testReinterpretOverAlignedExternStructAsExternStruct() !void {
test "lower reinterpreted comptime field ptr (with under-aligned fields)" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
// Test lowering a field ptr
@@ -152,7 +149,6 @@ test "lower reinterpreted comptime field ptr (with under-aligned fields)" {
test "lower reinterpreted comptime field ptr" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
// Test lowering a field ptr
@@ -174,7 +170,6 @@ test "lower reinterpreted comptime field ptr" {
test "reinterpret struct field at comptime" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const numNative = comptime Bytes.init(0x12345678);
@@ -232,7 +227,6 @@ test "ptrcast of const integer has the correct object size" {
test "implicit optional pointer to optional anyopaque pointer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
var buf: [4]u8 = "aoeu".*;
@@ -244,7 +238,6 @@ test "implicit optional pointer to optional anyopaque pointer" {
test "@ptrCast slice to slice" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -262,7 +255,6 @@ test "@ptrCast slice to slice" {
test "comptime @ptrCast a subset of an array, then write through it" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
comptime {
@@ -354,7 +346,6 @@ test "@ptrCast restructures sliced comptime-only array" {
test "@ptrCast slice multiplying length" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -372,7 +363,6 @@ test "@ptrCast slice multiplying length" {
test "@ptrCast array pointer to slice multiplying length" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -390,7 +380,6 @@ test "@ptrCast array pointer to slice multiplying length" {
test "@ptrCast slice dividing length" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -408,7 +397,6 @@ test "@ptrCast slice dividing length" {
test "@ptrCast array pointer to slice dividing length" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -426,7 +414,6 @@ test "@ptrCast array pointer to slice dividing length" {
test "@ptrCast slice with complex length increase" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -447,7 +434,6 @@ test "@ptrCast slice with complex length increase" {
test "@ptrCast array pointer to slice with complex length increase" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -468,7 +454,6 @@ test "@ptrCast array pointer to slice with complex length increase" {
test "@ptrCast slice with complex length decrease" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -489,7 +474,6 @@ test "@ptrCast slice with complex length decrease" {
test "@ptrCast array pointer to slice with complex length decrease" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -510,7 +494,6 @@ test "@ptrCast array pointer to slice with complex length decrease" {
test "@ptrCast slice of zero-bit type to different slice" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -530,7 +513,6 @@ test "@ptrCast slice of zero-bit type to different slice" {
test "@ptrCast single-item pointer to slice with length 1" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
@@ -552,7 +534,6 @@ test "@ptrCast single-item pointer to slice with length 1" {
test "@ptrCast single-item pointer to slice of bytes" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
test/behavior/ptrfromint.zig
@@ -17,7 +17,6 @@ fn addressToFunction() void {
test "mutate through ptr initialized with constant ptrFromInt value" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -35,7 +34,6 @@ fn forceCompilerAnalyzeBranchHardCodedPtrDereference(x: bool) void {
test "@ptrFromInt creates null pointer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -45,7 +43,6 @@ test "@ptrFromInt creates null pointer" {
test "@ptrFromInt creates allowzero zero pointer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const ptr = @as(*allowzero u32, @ptrFromInt(0));
test/behavior/ref_var_in_if_after_if_2nd_switch_prong.zig
@@ -6,7 +6,6 @@ const mem = std.mem;
var ok: bool = false;
test "reference a variable in an if after an if in the 2nd switch prong" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
test/behavior/reflection.zig
@@ -26,7 +26,6 @@ fn dummy(a: bool, b: i32, c: f32) i32 {
}
test "reflection: @field" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var f = Foo{
test/behavior/return_address.zig
@@ -6,7 +6,7 @@ fn retAddr() usize {
}
test "return address" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
test/behavior/saturating_arithmetic.zig
@@ -5,7 +5,7 @@ const maxInt = std.math.maxInt;
const expect = std.testing.expect;
test "saturating add" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -52,8 +52,8 @@ test "saturating add" {
}
test "saturating add 128bit" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -79,7 +79,7 @@ test "saturating add 128bit" {
}
test "saturating subtraction" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -125,8 +125,8 @@ test "saturating subtraction" {
}
test "saturating subtraction 128bit" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -158,7 +158,6 @@ fn testSatMul(comptime T: type, a: T, b: T, expected: T) !void {
}
test "saturating multiplication <= 32 bits" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -229,6 +228,7 @@ test "saturating multiplication <= 32 bits" {
}
test "saturating mul i64, i128" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -256,8 +256,8 @@ test "saturating mul i64, i128" {
}
test "saturating multiplication" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -295,7 +295,7 @@ test "saturating multiplication" {
}
test "saturating shift-left" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -340,6 +340,7 @@ test "saturating shift-left" {
}
test "saturating shift-left large rhs" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -357,7 +358,7 @@ test "saturating shift-left large rhs" {
}
test "saturating shl uses the LHS type" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
test/behavior/select.zig
@@ -4,9 +4,9 @@ const mem = std.mem;
const expect = std.testing.expect;
test "@select vectors" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -35,9 +35,9 @@ fn selectVectors() !void {
}
test "@select arrays" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -66,6 +66,7 @@ fn selectArrays() !void {
}
test "@select compare result" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .hexagon) return error.SkipZigTest;
test/behavior/shuffle.zig
@@ -5,7 +5,7 @@ const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual;
test "@shuffle int" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -50,8 +50,8 @@ test "@shuffle int" {
}
test "@shuffle int strange sizes" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -132,8 +132,8 @@ fn testShuffle(
}
test "@shuffle bool 1" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -155,8 +155,8 @@ test "@shuffle bool 1" {
}
test "@shuffle bool 2" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
test/behavior/sizeof_and_typeof.zig
@@ -270,7 +270,6 @@ test "bitSizeOf comptime_int" {
}
test "runtime instructions inside typeof in comptime only scope" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -326,7 +325,6 @@ test "lazy abi size used in comparison" {
}
test "peer type resolution with @TypeOf doesn't trigger dependency loop check" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -437,7 +435,6 @@ test "Peer resolution of extern function calls in @TypeOf" {
}
test "Extern function calls, dereferences and field access in @TypeOf" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const Test = struct {
test/behavior/slice.zig
@@ -211,7 +211,6 @@ test "comptime pointer cast array and then slice" {
}
test "slicing zero length array" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -273,7 +272,6 @@ test "result location zero sized array inside struct field implicit cast to slic
}
test "runtime safety lets us slice from len..len" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -287,7 +285,6 @@ fn sliceFromLenToLen(a_slice: []u8, start: usize, end: usize) []u8 {
}
test "C pointer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -299,7 +296,6 @@ test "C pointer" {
}
test "C pointer slice access" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -331,7 +327,6 @@ fn sliceSum(comptime q: []const u8) i32 {
}
test "slice type with custom alignment" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -390,7 +385,6 @@ test "empty array to slice" {
}
test "@ptrCast slice to pointer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -445,7 +439,6 @@ test "slice multi-pointer without end" {
}
test "slice syntax resulting in pointer-to-array" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -665,7 +658,6 @@ test "slice syntax resulting in pointer-to-array" {
}
test "slice pointer-to-array null terminated" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -718,7 +710,7 @@ test "slice pointer-to-array zero length" {
}
test "type coercion of pointer to anon struct literal to pointer to slice" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -812,7 +804,6 @@ test "slice sentinel access at comptime" {
}
test "slicing array with sentinel as end index" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -831,7 +822,6 @@ test "slicing array with sentinel as end index" {
}
test "slicing slice with sentinel as end index" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -888,7 +878,7 @@ test "slice field ptr var" {
}
test "global slice field access" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -929,7 +919,6 @@ test "slice with dereferenced value" {
}
test "empty slice ptr is non null" {
- if (builtin.zig_backend == .stage2_aarch64 and builtin.os.tag == .macos) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // Test assumes `undefined` is non-zero
{
@@ -947,7 +936,6 @@ test "empty slice ptr is non null" {
}
test "slice decays to many pointer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -957,7 +945,6 @@ test "slice decays to many pointer" {
}
test "write through pointer to optional slice arg" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -977,7 +964,6 @@ test "write through pointer to optional slice arg" {
}
test "modify slice length at comptime" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -994,7 +980,6 @@ test "modify slice length at comptime" {
}
test "slicing zero length array field of struct" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1010,7 +995,6 @@ test "slicing zero length array field of struct" {
}
test "slicing slices gives correct result" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1024,7 +1008,6 @@ test "slicing slices gives correct result" {
}
test "get address of element of zero-sized slice" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1037,7 +1020,6 @@ test "get address of element of zero-sized slice" {
}
test "sentinel-terminated 0-length slices" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1058,8 +1040,6 @@ test "sentinel-terminated 0-length slices" {
}
test "peer slices keep abi alignment with empty struct" {
- if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
-
var cond: bool = undefined;
cond = false;
const slice = if (cond) &[1]u32{42} else &.{};
test/behavior/src.zig
@@ -16,7 +16,6 @@ const expect = std.testing.expect;
const expectEqualStrings = std.testing.expectEqualStrings;
test "@src" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
test/behavior/string_literals.zig
@@ -6,7 +6,6 @@ const tag_name = @tagName(TestEnum.TestEnumValue);
const ptr_tag_name: [*:0]const u8 = tag_name;
test "@tagName() returns a string literal" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -20,7 +19,6 @@ const error_name = @errorName(TestError.TestErrorCode);
const ptr_error_name: [*:0]const u8 = error_name;
test "@errorName() returns a string literal" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -34,7 +32,6 @@ const type_name = @typeName(TestType);
const ptr_type_name: [*:0]const u8 = type_name;
test "@typeName() returns a string literal" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -48,7 +45,6 @@ const ptr_actual_contents: [*:0]const u8 = actual_contents;
const expected_contents = "hello zig\n";
test "@embedFile() returns a string literal" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -64,7 +60,7 @@ fn testFnForSrc() std.builtin.SourceLocation {
}
test "@src() returns a struct containing 0-terminated string slices" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
test/behavior/struct.zig
@@ -10,7 +10,6 @@ const maxInt = std.math.maxInt;
top_level_field: i32,
test "top level fields" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var instance = @This(){
@@ -87,7 +86,6 @@ const StructFoo = struct {
};
test "structs" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -200,7 +198,6 @@ const MemberFnRand = struct {
};
test "return struct byval from function" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const Bar = struct {
@@ -237,7 +234,6 @@ test "call method with mutable reference to struct with no fields" {
}
test "struct field init with catch" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -280,7 +276,6 @@ const Val = struct {
};
test "struct point to self" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -297,7 +292,6 @@ test "struct point to self" {
}
test "void struct fields" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const foo = VoidStructFieldsFoo{
@@ -335,7 +329,6 @@ fn testPassSliceOfEmptyStructToFn(slice: []const EmptyStruct2) usize {
}
test "self-referencing struct via array member" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -492,7 +485,7 @@ const Bitfields = packed struct {
};
test "packed struct fields are ordered from LSB to MSB" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -610,7 +603,6 @@ fn getC(data: *const BitField1) u2 {
}
test "default struct initialization fields" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -634,8 +626,8 @@ test "default struct initialization fields" {
}
test "packed array 24bits" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -701,8 +693,8 @@ const FooArrayOfAligned = packed struct {
};
test "pointer to packed struct member in a stack variable" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
@@ -731,7 +723,6 @@ test "packed struct with u0 field access" {
}
test "access to global struct fields" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
g_foo.bar.value = 42;
@@ -753,8 +744,8 @@ const S0 = struct {
var g_foo: S0 = S0.init();
test "packed struct with fp fields" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -783,7 +774,6 @@ test "packed struct with fp fields" {
test "fn with C calling convention returns struct by value" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -807,7 +797,7 @@ test "fn with C calling convention returns struct by value" {
}
test "non-packed struct with u128 entry in union" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -834,8 +824,8 @@ test "non-packed struct with u128 entry in union" {
}
test "packed struct field passed to generic function" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -859,7 +849,6 @@ test "packed struct field passed to generic function" {
}
test "anonymous struct literal syntax" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -951,7 +940,6 @@ test "comptime struct field" {
test "tuple element initialized with fn call" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -968,8 +956,8 @@ test "tuple element initialized with fn call" {
}
test "struct with union field" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -990,7 +978,6 @@ test "struct with union field" {
}
test "struct with 0-length union array field" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1039,6 +1026,7 @@ test "packed struct with undefined initializers" {
}
test "for loop over pointers to struct, getting field from struct pointer" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1078,7 +1066,7 @@ test "for loop over pointers to struct, getting field from struct pointer" {
}
test "anon init through error unions and optionals" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1105,7 +1093,7 @@ test "anon init through error unions and optionals" {
}
test "anon init through optional" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1125,7 +1113,7 @@ test "anon init through optional" {
}
test "anon init through error union" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1145,7 +1133,7 @@ test "anon init through error union" {
}
test "typed init through error unions and optionals" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1171,7 +1159,6 @@ test "typed init through error unions and optionals" {
}
test "initialize struct with empty literal" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct { x: i32 = 1234 };
@@ -1206,7 +1193,7 @@ test "loading a struct pointer perfoms a copy" {
}
test "packed struct aggregate init" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1226,7 +1213,7 @@ test "packed struct aggregate init" {
}
test "packed struct field access via pointer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1267,7 +1254,6 @@ test "store to comptime field" {
}
test "struct field init value is size of the struct" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const namespace = struct {
@@ -1282,7 +1268,6 @@ test "struct field init value is size of the struct" {
}
test "under-aligned struct field" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1306,7 +1291,6 @@ test "under-aligned struct field" {
}
test "fieldParentPtr of a zero-bit field" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1357,7 +1341,6 @@ test "fieldParentPtr of a zero-bit field" {
test "struct field has a pointer to an aligned version of itself" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
const E = struct {
next: *align(1) @This(),
@@ -1437,7 +1420,6 @@ test "discarded struct initialization works as expected" {
}
test "function pointer in struct returns the struct" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const A = struct {
@@ -1455,7 +1437,6 @@ test "function pointer in struct returns the struct" {
test "no dependency loop on optional field wrapped in generic function" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const S = struct {
fn Atomic(comptime T: type) type {
@@ -1473,7 +1454,6 @@ test "no dependency loop on optional field wrapped in generic function" {
}
test "optional field init with tuple" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const S = struct {
@@ -1488,8 +1468,6 @@ test "optional field init with tuple" {
}
test "if inside struct init inside if" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
-
const MyStruct = struct { x: u32 };
const b: u32 = 5;
var i: u32 = 1;
@@ -1580,7 +1558,6 @@ test "instantiate struct with comptime field" {
test "struct field pointer has correct alignment" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -1610,7 +1587,6 @@ test "struct field pointer has correct alignment" {
test "extern struct field pointer has correct alignment" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -1828,7 +1804,6 @@ test "tuple with comptime-only field" {
}
test "extern struct fields are aligned to 1" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const Foo = extern struct {
@@ -1845,7 +1820,7 @@ test "extern struct fields are aligned to 1" {
}
test "assign to slice.len of global variable" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1897,7 +1872,6 @@ test "array of structs inside struct initialized with undefined" {
}
test "runtime call in nested initializer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1929,7 +1903,6 @@ test "runtime call in nested initializer" {
}
test "runtime value in nested initializer passed as pointer to function" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const Bar = struct {
@@ -1953,7 +1926,7 @@ test "runtime value in nested initializer passed as pointer to function" {
}
test "struct field default value is a call" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -2001,7 +1974,6 @@ test "aggregate initializers should allow initializing comptime fields, verifyin
test "assignment of field with padding" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const Mesh = extern struct {
@@ -2031,7 +2003,6 @@ test "assignment of field with padding" {
test "initiate global variable with runtime value" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -2126,6 +2097,7 @@ test "anonymous struct equivalence" {
}
test "field access through mem ptr arg" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
const S = struct {
test/behavior/struct_contains_null_ptr_itself.zig
@@ -3,7 +3,6 @@ const expect = std.testing.expect;
const builtin = @import("builtin");
test "struct contains null pointer which contains original struct" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
test/behavior/struct_contains_slice_of_itself.zig
@@ -12,6 +12,7 @@ const NodeAligned = struct {
};
test "struct contains slice of itself" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -52,7 +53,7 @@ test "struct contains slice of itself" {
}
test "struct contains aligned slice of itself" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
test/behavior/switch.zig
@@ -8,6 +8,7 @@ const minInt = std.math.minInt;
const maxInt = std.math.maxInt;
test "switch with numbers" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -43,7 +44,7 @@ fn testSwitchWithAllRanges(x: u32, y: u32) u32 {
}
test "switch arbitrary int size" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
@@ -274,8 +275,8 @@ const SwitchProngWithVarEnum = union(enum) {
};
test "switch prong with variable" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -299,8 +300,8 @@ fn switchProngWithVarFn(a: SwitchProngWithVarEnum) !void {
}
test "switch on enum using pointer capture" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testSwitchEnumPtrCapture();
@@ -360,8 +361,8 @@ fn testSwitchHandleAllCasesRange(x: u8) u8 {
}
test "switch on union with some prongs capturing" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const X = union(enum) {
@@ -398,7 +399,6 @@ test "switch on const enum with var" {
}
test "anon enum literal used in switch on union enum" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const Foo = union(enum) {
@@ -469,8 +469,8 @@ test "switch on integer with else capturing expr" {
}
test "else prong of switch on error set excludes other cases" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -505,8 +505,8 @@ test "else prong of switch on error set excludes other cases" {
}
test "switch prongs with error set cases make a new error set type for capture value" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -563,7 +563,6 @@ test "return result loc and then switch with range implicit casted to error unio
test "switch with null and T peer types and inferred result location type" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -582,7 +581,7 @@ test "switch with null and T peer types and inferred result location type" {
}
test "switch prongs with cases with identical payload types" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -689,7 +688,7 @@ test "switch prong pointer capture alignment" {
}
test "switch on pointer type" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -737,8 +736,8 @@ test "switch on error set with single else" {
}
test "switch capture copies its payload" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -831,6 +830,7 @@ test "comptime inline switch" {
}
test "switch capture peer type resolution" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const U = union(enum) {
@@ -848,6 +848,8 @@ test "switch capture peer type resolution" {
}
test "switch capture peer type resolution for in-memory coercible payloads" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+
const T1 = c_int;
const T2 = @Type(@typeInfo(T1));
@@ -868,6 +870,8 @@ test "switch capture peer type resolution for in-memory coercible payloads" {
}
test "switch pointer capture peer type resolution" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+
const T1 = c_int;
const T2 = @Type(@typeInfo(T1));
@@ -904,6 +908,7 @@ test "inline switch range that includes the maximum value of the switched type"
}
test "nested break ignores switch conditions and breaks instead" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -926,6 +931,7 @@ test "nested break ignores switch conditions and breaks instead" {
}
test "peer type resolution on switch captures ignores unused payload bits" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -951,7 +957,6 @@ test "peer type resolution on switch captures ignores unused payload bits" {
}
test "switch prong captures range" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
@@ -971,6 +976,8 @@ test "switch prong captures range" {
}
test "prong with inline call to unreachable" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+
const U = union(enum) {
void: void,
bool: bool,
@@ -1042,7 +1049,7 @@ test "labeled switch with break" {
}
test "unlabeled break ignores switch" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
test/behavior/switch_loop.zig
@@ -3,7 +3,7 @@ const std = @import("std");
const expect = std.testing.expect;
test "simple switch loop" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
@@ -27,7 +27,7 @@ test "simple switch loop" {
}
test "switch loop with ranges" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
@@ -48,7 +48,7 @@ test "switch loop with ranges" {
}
test "switch loop on enum" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
@@ -72,7 +72,7 @@ test "switch loop on enum" {
}
test "switch loop with error set" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
@@ -96,7 +96,7 @@ test "switch loop with error set" {
}
test "switch loop on tagged union" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
@@ -129,7 +129,7 @@ test "switch loop on tagged union" {
}
test "switch loop dispatching instructions" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
@@ -179,7 +179,7 @@ test "switch loop dispatching instructions" {
}
test "switch loop with pointer capture" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
@@ -218,8 +218,6 @@ test "switch loop with pointer capture" {
}
test "unanalyzed continue with operand" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
-
@setRuntimeSafety(false);
label: switch (false) {
false => if (false) continue :label true,
test/behavior/switch_on_captured_error.zig
@@ -6,6 +6,7 @@ const expectEqual = std.testing.expectEqual;
const builtin = @import("builtin");
test "switch on error union catch capture" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -300,6 +301,7 @@ test "switch on error union catch capture" {
}
test "switch on error union if else capture" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
test/behavior/switch_prong_err_enum.zig
@@ -21,8 +21,8 @@ fn doThing(form_id: u64) anyerror!FormValue {
}
test "switch prong returns error enum" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
test/behavior/switch_prong_implicit_cast.zig
@@ -15,8 +15,8 @@ fn foo(id: u64) !FormValue {
}
test "switch prong implicit cast" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const result = switch (foo(2) catch unreachable) {
test/behavior/this.zig
@@ -26,7 +26,6 @@ test "this refer to module call private fn" {
}
test "this refer to container" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var pt: Point(i32) = undefined;
@@ -47,7 +46,6 @@ fn prev(p: ?State) void {
}
test "this used as optional function parameter" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
test/behavior/threadlocal.zig
@@ -4,7 +4,6 @@ const expect = std.testing.expect;
test "thread local variable" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -25,7 +24,6 @@ test "thread local variable" {
test "pointer to thread local array" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -40,7 +38,6 @@ threadlocal var buffer: [11]u8 = undefined;
test "reference a global threadlocal variable" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
test/behavior/truncate.zig
@@ -65,8 +65,8 @@ test "truncate on comptime integer" {
}
test "truncate on vectors" {
- if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
test/behavior/try.zig
@@ -47,7 +47,7 @@ test "try then not executed with assignment" {
}
test "`try`ing an if/else expression" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -68,7 +68,6 @@ test "`try`ing an if/else expression" {
}
test "'return try' of empty error set in function returning non-error" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
test/behavior/tuple.zig
@@ -8,7 +8,6 @@ const expectEqual = std.testing.expectEqual;
test "tuple concatenation" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -51,7 +50,6 @@ test "tuple multiplication" {
}
test "more tuple concatenation" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -129,7 +127,6 @@ test "tuple initializer for var" {
}
test "array-like initializer for tuple types" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -216,7 +213,6 @@ test "initializing anon struct with explicit type" {
}
test "fieldParentPtr of tuple" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -229,7 +225,6 @@ test "fieldParentPtr of tuple" {
}
test "fieldParentPtr of anon struct" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -256,7 +251,6 @@ test "offsetOf anon struct" {
}
test "initializing tuple with mixed comptime-runtime fields" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
var x: u32 = 15;
@@ -268,7 +262,6 @@ test "initializing tuple with mixed comptime-runtime fields" {
}
test "initializing anon struct with mixed comptime-runtime fields" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
var x: u32 = 15;
@@ -280,7 +273,6 @@ test "initializing anon struct with mixed comptime-runtime fields" {
}
test "tuple in tuple passed to generic function" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -300,7 +292,6 @@ test "tuple in tuple passed to generic function" {
}
test "coerce tuple to tuple" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -315,7 +306,6 @@ test "coerce tuple to tuple" {
}
test "tuple type with void field" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -353,7 +343,6 @@ test "zero sized struct in tuple handled correctly" {
}
test "tuple type with void field and a runtime field" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -364,7 +353,6 @@ test "tuple type with void field and a runtime field" {
}
test "branching inside tuple literal" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -410,7 +398,6 @@ test "tuple of struct concatenation and coercion to array" {
}
test "nested runtime conditionals in tuple initializer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -446,7 +433,6 @@ test "sentinel slice in tuple" {
}
test "tuple pointer is indexable" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -470,7 +456,6 @@ test "tuple pointer is indexable" {
}
test "coerce anon tuple to tuple" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -496,14 +481,12 @@ test "empty tuple type" {
}
test "tuple with comptime fields with non empty initializer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
-
const a: struct { comptime comptime_int = 0 } = .{0};
_ = a;
}
test "tuple with runtime value coerced into a slice with a sentinel" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -578,7 +561,6 @@ test "comptime fields in tuple can be initialized" {
test "empty struct in tuple" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -591,7 +573,6 @@ test "empty struct in tuple" {
test "empty union in tuple" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -604,6 +585,7 @@ test "empty union in tuple" {
test "field pointer of underaligned tuple" {
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
+
const S = struct {
fn doTheTest() !void {
const T = struct { u8, u32 };
test/behavior/tuple_declarations.zig
@@ -5,7 +5,6 @@ const expect = testing.expect;
const expectEqualStrings = testing.expectEqualStrings;
test "tuple declaration type info" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -34,7 +33,6 @@ test "tuple declaration type info" {
}
test "tuple declaration usage" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
test/behavior/type.zig
@@ -200,8 +200,8 @@ test "Type.ErrorUnion" {
}
test "Type.Opaque" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -258,8 +258,8 @@ test "Type.ErrorSet" {
}
test "Type.Struct" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -348,7 +348,6 @@ test "Type.Struct" {
test "Type.Enum" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const Foo = @Type(.{
@@ -409,8 +408,8 @@ test "Type.Enum" {
}
test "Type.Union" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const Untagged = @Type(.{
@@ -547,7 +546,6 @@ test "Type.Union from empty Type.Enum" {
test "Type.Fn" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
const some_opaque = opaque {};
const some_ptr = *some_opaque;
@@ -724,7 +722,6 @@ test "@Type should resolve its children types" {
}
test "struct field names sliced at comptime from larger string" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const text =
test/behavior/type_info.zig
@@ -158,7 +158,6 @@ fn testArray() !void {
}
test "type info: error set, error union info, anyerror" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -190,7 +189,6 @@ fn testErrorSet() !void {
}
test "type info: error set single value" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -204,7 +202,6 @@ test "type info: error set single value" {
}
test "type info: error set merged" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -221,7 +218,6 @@ test "type info: error set merged" {
test "type info: enum info" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -362,6 +358,8 @@ test "type info: function type info" {
}
fn testFunction() !void {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+
const S = struct {
export fn typeInfoFoo() callconv(.c) usize {
unreachable;
test/behavior/typename.zig
@@ -12,7 +12,6 @@ const expectStringStartsWith = std.testing.expectStringStartsWith;
// failures.
test "anon fn param" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -38,7 +37,6 @@ test "anon fn param" {
}
test "anon field init" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -64,7 +62,6 @@ test "anon field init" {
}
test "basic" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -86,7 +83,6 @@ test "basic" {
}
test "top level decl" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -136,7 +132,6 @@ const B = struct {
};
test "fn param" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -216,7 +211,6 @@ pub fn expectEqualStringsIgnoreDigits(expected: []const u8, actual: []const u8)
}
test "local variable" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -235,7 +229,6 @@ test "local variable" {
}
test "comptime parameters not converted to anytype in function type" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -245,7 +238,6 @@ test "comptime parameters not converted to anytype in function type" {
}
test "anon name strategy used in sub expression" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
test/behavior/undefined.zig
@@ -46,7 +46,6 @@ fn setFooX(foo: *Foo) void {
test "assign undefined to struct" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
comptime {
@@ -63,7 +62,6 @@ test "assign undefined to struct" {
test "assign undefined to struct with method" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
comptime {
@@ -89,7 +87,6 @@ test "type name of undefined" {
var buf: []u8 = undefined;
test "reslice of undefined global var slice" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -100,7 +97,6 @@ test "reslice of undefined global var slice" {
}
test "returned undef is 0xaa bytes when runtime safety is enabled" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
test/behavior/union.zig
@@ -12,8 +12,8 @@ const FooWithFloats = union {
};
test "basic unions with floats" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -29,8 +29,8 @@ fn setFloat(foo: *FooWithFloats, x: f64) void {
}
test "init union with runtime value - floats" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -60,8 +60,8 @@ const Foo = union {
};
test "init union with runtime value" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -160,8 +160,8 @@ test "unions embedded in aggregate types" {
}
test "constant tagged union with payload" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var empty = TaggedUnionWithPayload{ .Empty = {} };
@@ -210,8 +210,8 @@ const Payload = union(Letter) {
};
test "union with specified enum tag" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -221,8 +221,8 @@ test "union with specified enum tag" {
}
test "packed union generates correctly aligned type" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -263,8 +263,8 @@ fn testComparison() !void {
}
test "comparison between union and enum literal" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -279,8 +279,8 @@ const TheUnion = union(TheTag) {
C: i32,
};
test "cast union to tag type of union" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try testCastUnionToTag();
@@ -300,8 +300,8 @@ test "union field access gives the enum values" {
}
test "cast tag type of union to union" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var x: Value2 = Letter2.B;
@@ -316,8 +316,8 @@ const Value2 = union(Letter2) {
};
test "implicit cast union to its tag type" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var x: Value2 = Letter2.B;
@@ -337,8 +337,8 @@ pub const PackThis = union(enum) {
};
test "constant packed union" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -357,7 +357,6 @@ const MultipleChoice = union(enum(u32)) {
};
test "simple union(enum(u32))" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var x = MultipleChoice.C;
@@ -403,7 +402,6 @@ test "assigning to union with zero size field" {
test "tagged union initialization with runtime void" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try expect(testTaggedUnionInit({}));
@@ -423,7 +421,6 @@ pub const UnionEnumNoPayloads = union(enum) { A, B };
test "tagged union with no payloads" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const a = UnionEnumNoPayloads{ .B = {} };
@@ -470,7 +467,6 @@ var glbl: Foo1 = undefined;
test "global union with single field is correctly initialized" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
glbl = Foo1{
@@ -487,8 +483,8 @@ pub const FooUnion = union(enum) {
var glbl_array: [2]FooUnion = undefined;
test "initialize global array of union" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -499,8 +495,8 @@ test "initialize global array of union" {
}
test "update the tag value for zero-sized unions" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = union(enum) {
@@ -515,7 +511,6 @@ test "update the tag value for zero-sized unions" {
test "union initializer generates padding only if needed" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const U = union(enum) {
@@ -528,7 +523,6 @@ test "union initializer generates padding only if needed" {
}
test "runtime tag name with single field" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -543,7 +537,6 @@ test "runtime tag name with single field" {
test "method call on an empty union" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -604,8 +597,8 @@ test "tagged union type" {
}
test "tagged union as return value" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -621,8 +614,8 @@ fn returnAnInt(x: i32) TaggedFoo {
}
test "tagged union with all void fields but a meaningful tag" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -649,8 +642,8 @@ test "tagged union with all void fields but a meaningful tag" {
}
test "union(enum(u32)) with specified and unspecified tag values" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -687,7 +680,6 @@ fn testEnumWithSpecifiedAndUnspecifiedTagValues(x: MultipleChoice2) !void {
}
test "switch on union with only 1 field" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -742,8 +734,8 @@ test "union with only 1 field casted to its enum type which has enum value speci
}
test "@intFromEnum works on unions" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const Bar = union(enum) {
@@ -801,8 +793,8 @@ fn Setter(comptime attr: Attribute) type {
}
test "return union init with void payload" {
- if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -825,7 +817,7 @@ test "return union init with void payload" {
}
test "@unionInit stored to a const" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -856,7 +848,7 @@ test "@unionInit stored to a const" {
}
test "@unionInit can modify a union type" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -879,7 +871,7 @@ test "@unionInit can modify a union type" {
}
test "@unionInit can modify a pointer value" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -899,7 +891,6 @@ test "@unionInit can modify a pointer value" {
}
test "union no tag with struct member" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -935,8 +926,8 @@ test "extern union doesn't trigger field check at comptime" {
}
test "anonymous union literal syntax" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -964,8 +955,8 @@ test "anonymous union literal syntax" {
}
test "function call result coerces from tagged union to the tag" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -999,8 +990,8 @@ test "function call result coerces from tagged union to the tag" {
}
test "switching on non exhaustive union" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -1028,7 +1019,6 @@ test "switching on non exhaustive union" {
test "containers with single-field enums" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -1057,8 +1047,8 @@ test "containers with single-field enums" {
}
test "@unionInit on union with tag but no fields" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -1106,7 +1096,7 @@ test "union enum type gets a separate scope" {
}
test "global variable struct contains union initialized to non-most-aligned field" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1133,8 +1123,8 @@ test "global variable struct contains union initialized to non-most-aligned fiel
}
test "union with no result loc initiated with a runtime value" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1151,8 +1141,8 @@ test "union with no result loc initiated with a runtime value" {
}
test "union with a large struct field" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1186,8 +1176,8 @@ test "comptime equality of extern unions with same tag" {
}
test "union tag is set when initiated as a temporary value at runtime" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1206,8 +1196,8 @@ test "union tag is set when initiated as a temporary value at runtime" {
}
test "extern union most-aligned field is smaller" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1226,8 +1216,8 @@ test "extern union most-aligned field is smaller" {
}
test "return an extern union from C calling convention" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1258,7 +1248,7 @@ test "return an extern union from C calling convention" {
}
test "noreturn field in union" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1309,7 +1299,7 @@ test "noreturn field in union" {
}
test "@unionInit uses tag value instead of field index" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1339,7 +1329,6 @@ test "@unionInit uses tag value instead of field index" {
}
test "union field ptr - zero sized payload" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1354,7 +1343,6 @@ test "union field ptr - zero sized payload" {
}
test "union field ptr - zero sized field" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1369,7 +1357,7 @@ test "union field ptr - zero sized field" {
}
test "packed union in packed struct" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1420,8 +1408,8 @@ test "union int tag type is properly managed" {
}
test "no dependency loop when function pointer in union returns the union" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1442,7 +1430,7 @@ test "no dependency loop when function pointer in union returns the union" {
}
test "union reassignment can use previous value" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1456,7 +1444,7 @@ test "union reassignment can use previous value" {
}
test "packed union with zero-bit field" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1475,7 +1463,7 @@ test "packed union with zero-bit field" {
}
test "reinterpreting enum value inside packed union" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const U = packed union {
@@ -1493,7 +1481,7 @@ test "reinterpreting enum value inside packed union" {
}
test "access the tag of a global tagged union" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const U = union(enum) {
a,
@@ -1504,7 +1492,7 @@ test "access the tag of a global tagged union" {
}
test "coerce enum literal to union in result loc" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
const U = union(enum) {
a,
@@ -1522,7 +1510,6 @@ test "coerce enum literal to union in result loc" {
test "defined-layout union field pointer has correct alignment" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -1557,7 +1544,6 @@ test "defined-layout union field pointer has correct alignment" {
test "undefined-layout union field pointer has correct alignment" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -1590,8 +1576,8 @@ test "undefined-layout union field pointer has correct alignment" {
}
test "packed union field pointer has correct alignment" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest; // TODO
@@ -1624,6 +1610,7 @@ test "packed union field pointer has correct alignment" {
}
test "union with 128 bit integer" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const ValueTag = enum { int, other };
@@ -1647,6 +1634,7 @@ test "union with 128 bit integer" {
}
test "memset extern union" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const U = extern union {
@@ -1668,6 +1656,7 @@ test "memset extern union" {
}
test "memset packed union" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const U = packed union {
@@ -1768,6 +1757,7 @@ test "reinterpret extern union" {
}
test "reinterpret packed union" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const U = packed union {
@@ -1840,6 +1830,7 @@ test "reinterpret packed union" {
}
test "reinterpret packed union inside packed struct" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
@@ -1945,6 +1936,8 @@ test "extern union initialized via reintepreted struct field initializer" {
}
test "packed union initialized via reintepreted struct field initializer" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+
const bytes = [_]u8{ 0xaa, 0xbb, 0xcc, 0xdd };
const U = packed union {
@@ -1963,6 +1956,7 @@ test "packed union initialized via reintepreted struct field initializer" {
}
test "store of comptime reinterpreted memory to extern union" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const bytes = [_]u8{ 0xaa, 0xbb, 0xcc, 0xdd };
@@ -1985,6 +1979,8 @@ test "store of comptime reinterpreted memory to extern union" {
}
test "store of comptime reinterpreted memory to packed union" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+
const bytes = [_]u8{ 0xaa, 0xbb, 0xcc, 0xdd };
const U = packed union {
@@ -2005,7 +2001,6 @@ test "store of comptime reinterpreted memory to packed union" {
}
test "union field is a pointer to an aligned version of itself" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -2019,6 +2014,7 @@ test "union field is a pointer to an aligned version of itself" {
}
test "pass register-sized field as non-register-sized union" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -2067,6 +2063,7 @@ test "circular dependency through pointer field of a union" {
}
test "pass nested union with rls" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -2088,8 +2085,8 @@ test "pass nested union with rls" {
}
test "runtime union init, most-aligned field != largest" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -2114,8 +2111,8 @@ test "runtime union init, most-aligned field != largest" {
}
test "copied union field doesn't alias source" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -2134,7 +2131,7 @@ test "copied union field doesn't alias source" {
}
test "create union(enum) from other union(enum)" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -2258,7 +2255,7 @@ test "matching captures causes union equivalence" {
}
test "signed enum tag with negative value" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -2315,6 +2312,7 @@ test "extern union @FieldType" {
}
test "assign global tagged union" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const U = union(enum) {
@@ -2336,6 +2334,8 @@ test "assign global tagged union" {
}
test "set mutable union by switching on same union" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
+
const U = union(enum) {
foo,
bar: usize,
test/behavior/union_with_members.zig
@@ -17,8 +17,8 @@ const ET = union(enum) {
};
test "enum with members" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
test/behavior/var_args.zig
@@ -28,7 +28,6 @@ test "send void arg to var args" {
}
test "pass args directly" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try expect(addSomeStuff(.{ @as(i32, 1), @as(i32, 2), @as(i32, 3), @as(i32, 4) }) == 10);
@@ -41,7 +40,6 @@ fn addSomeStuff(args: anytype) i32 {
}
test "runtime parameter before var args" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
try expect((try extraFn(10, .{})) == 0);
@@ -94,13 +92,13 @@ fn doNothingWithFirstArg(args: anytype) void {
}
test "simple variadic function" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
- if (builtin.os.tag != .macos and builtin.cpu.arch.isAARCH64()) {
+ if (builtin.zig_backend == .stage2_llvm and builtin.os.tag != .macos and builtin.cpu.arch.isAARCH64()) {
// https://github.com/ziglang/zig/issues/14096
return error.SkipZigTest;
}
@@ -156,13 +154,13 @@ test "simple variadic function" {
}
test "coerce reference to var arg" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
- if (builtin.os.tag != .macos and builtin.cpu.arch.isAARCH64()) {
+ if (builtin.zig_backend == .stage2_llvm and builtin.os.tag != .macos and builtin.cpu.arch.isAARCH64()) {
// https://github.com/ziglang/zig/issues/14096
return error.SkipZigTest;
}
@@ -189,13 +187,13 @@ test "coerce reference to var arg" {
}
test "variadic functions" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
- if (builtin.os.tag != .macos and builtin.cpu.arch.isAARCH64()) {
+ if (builtin.zig_backend == .stage2_llvm and builtin.os.tag != .macos and builtin.cpu.arch.isAARCH64()) {
// https://github.com/ziglang/zig/issues/14096
return error.SkipZigTest;
}
@@ -236,12 +234,12 @@ test "variadic functions" {
}
test "copy VaList" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
- if (builtin.os.tag != .macos and builtin.cpu.arch.isAARCH64()) {
+ if (builtin.zig_backend == .stage2_llvm and builtin.os.tag != .macos and builtin.cpu.arch.isAARCH64()) {
// https://github.com/ziglang/zig/issues/14096
return error.SkipZigTest;
}
@@ -271,12 +269,12 @@ test "copy VaList" {
}
test "unused VaList arg" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
- if (builtin.os.tag != .macos and builtin.cpu.arch.isAARCH64()) {
+ if (builtin.zig_backend == .stage2_llvm and builtin.os.tag != .macos and builtin.cpu.arch.isAARCH64()) {
// https://github.com/ziglang/zig/issues/14096
return error.SkipZigTest;
}
test/behavior/vector.zig
@@ -8,7 +8,6 @@ const expectEqual = std.testing.expectEqual;
test "implicit cast vector to array - bool" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -25,8 +24,8 @@ test "implicit cast vector to array - bool" {
}
test "vector wrap operators" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -49,8 +48,8 @@ test "vector wrap operators" {
}
test "vector bin compares with mem.eql" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -74,8 +73,8 @@ test "vector bin compares with mem.eql" {
}
test "vector int operators" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -97,8 +96,8 @@ test "vector int operators" {
}
test "vector float operators" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -141,8 +140,8 @@ test "vector float operators" {
}
test "vector bit operators" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -173,7 +172,7 @@ test "vector bit operators" {
}
test "implicit cast vector to array" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -193,7 +192,7 @@ test "implicit cast vector to array" {
}
test "array to vector" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -212,7 +211,7 @@ test "array to vector" {
}
test "array vector coercion - odd sizes" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
@@ -251,7 +250,7 @@ test "array vector coercion - odd sizes" {
}
test "array to vector with element type coercion" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
@@ -273,7 +272,6 @@ test "array to vector with element type coercion" {
test "peer type resolution with coercible element types" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -291,8 +289,8 @@ test "peer type resolution with coercible element types" {
}
test "tuple to vector" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -314,8 +312,8 @@ test "tuple to vector" {
}
test "vector casts of sizes not divisible by 8" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -354,7 +352,7 @@ test "vector casts of sizes not divisible by 8" {
}
test "vector @splat" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -395,7 +393,7 @@ test "vector @splat" {
}
test "load vector elements via comptime index" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -416,7 +414,7 @@ test "load vector elements via comptime index" {
}
test "store vector elements via comptime index" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -443,7 +441,6 @@ test "store vector elements via comptime index" {
}
test "load vector elements via runtime index" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -465,7 +462,7 @@ test "load vector elements via runtime index" {
}
test "store vector elements via runtime index" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -487,7 +484,6 @@ test "store vector elements via runtime index" {
}
test "initialize vector which is a struct field" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -508,8 +504,8 @@ test "initialize vector which is a struct field" {
}
test "vector comparison operators" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -554,8 +550,8 @@ test "vector comparison operators" {
}
test "vector division operators" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -647,9 +643,9 @@ test "vector division operators" {
}
test "vector bitwise not operator" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -681,9 +677,9 @@ test "vector bitwise not operator" {
}
test "vector boolean not operator" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -705,8 +701,8 @@ test "vector boolean not operator" {
}
test "vector shift operators" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -776,8 +772,8 @@ test "vector shift operators" {
}
test "vector reduce operation" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -908,7 +904,6 @@ test "vector reduce operation" {
test "vector @reduce comptime" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -924,7 +919,7 @@ test "vector @reduce comptime" {
}
test "mask parameter of @shuffle is comptime scope" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -946,8 +941,8 @@ test "mask parameter of @shuffle is comptime scope" {
}
test "saturating add" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -980,8 +975,8 @@ test "saturating add" {
}
test "saturating subtraction" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1004,8 +999,8 @@ test "saturating subtraction" {
}
test "saturating multiplication" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1032,8 +1027,8 @@ test "saturating multiplication" {
}
test "saturating shift-left" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1056,8 +1051,8 @@ test "saturating shift-left" {
}
test "multiplication-assignment operator with an array operand" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1077,8 +1072,8 @@ test "multiplication-assignment operator with an array operand" {
}
test "@addWithOverflow" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1127,8 +1122,8 @@ test "@addWithOverflow" {
}
test "@subWithOverflow" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1161,8 +1156,8 @@ test "@subWithOverflow" {
}
test "@mulWithOverflow" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1184,8 +1179,8 @@ test "@mulWithOverflow" {
}
test "@shlWithOverflow" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1229,8 +1224,8 @@ test "alignment of vectors" {
}
test "loading the second vector from a slice of vectors" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1246,8 +1241,8 @@ test "loading the second vector from a slice of vectors" {
}
test "array of vectors is copied" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -1270,8 +1265,8 @@ test "array of vectors is copied" {
}
test "byte vector initialized in inline function" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1297,7 +1292,6 @@ test "byte vector initialized in inline function" {
test "zero divisor" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1317,7 +1311,6 @@ test "zero divisor" {
test "zero multiplicand" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
@@ -1341,7 +1334,6 @@ test "zero multiplicand" {
test "@intCast to u0" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1364,8 +1356,8 @@ test "modRem with zero divisor" {
}
test "array operands to shuffle are coerced to vectors" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
@@ -1379,7 +1371,7 @@ test "array operands to shuffle are coerced to vectors" {
}
test "load packed vector element" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
@@ -1391,7 +1383,7 @@ test "load packed vector element" {
}
test "store packed vector element" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
@@ -1408,7 +1400,7 @@ test "store packed vector element" {
}
test "store to vector in slice" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1426,7 +1418,7 @@ test "store to vector in slice" {
}
test "store vector with memset" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1459,7 +1451,7 @@ test "store vector with memset" {
}
test "addition of vectors represented as strings" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const V = @Vector(3, u8);
@@ -1469,7 +1461,7 @@ test "addition of vectors represented as strings" {
}
test "compare vectors with different element types" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1482,7 +1474,6 @@ test "compare vectors with different element types" {
}
test "vector pointer is indexable" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1506,7 +1497,6 @@ test "vector pointer is indexable" {
}
test "boolean vector with 2 or more booleans" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1519,7 +1509,7 @@ test "boolean vector with 2 or more booleans" {
}
test "bitcast to vector with different child type" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1551,7 +1541,6 @@ test "index into comptime-known vector is comptime-known" {
test "arithmetic on zero-length vectors" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
{
@@ -1568,7 +1557,6 @@ test "arithmetic on zero-length vectors" {
test "@reduce on bool vector" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
const a = @Vector(2, bool){ true, true };
@@ -1578,7 +1566,7 @@ test "@reduce on bool vector" {
}
test "bitcast vector to array of smaller vectors" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
test/behavior/void.zig
@@ -36,7 +36,6 @@ fn times(n: usize) []const void {
test "void optional" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var x: ?void = {};
test/behavior/while.zig
@@ -124,8 +124,6 @@ test "while copies its payload" {
}
test "continue and break" {
- if (builtin.zig_backend == .stage2_aarch64 and builtin.os.tag == .macos) return error.SkipZigTest;
-
try runContinueAndBreakTest();
try expect(continue_and_break_counter == 8);
}
@@ -176,6 +174,7 @@ test "while with optional as condition with else" {
}
test "while with error union condition" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -209,7 +208,6 @@ test "while on bool with else result follow break prong" {
test "while on optional with else result follow else prong" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const result = while (returnNull()) |value| {
@@ -220,7 +218,6 @@ test "while on optional with else result follow else prong" {
test "while on optional with else result follow break prong" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const result = while (returnOptional(10)) |value| {
@@ -292,7 +289,6 @@ test "while bool 2 break statements and an else" {
test "while optional 2 break statements and an else" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -310,8 +306,8 @@ test "while optional 2 break statements and an else" {
}
test "while error 2 break statements and an else" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@@ -348,8 +344,8 @@ test "else continue outer while" {
}
test "try terminating an infinite loop" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
// Test coverage for https://github.com/ziglang/zig/issues/13546
@@ -376,7 +372,6 @@ test "while loop with comptime true condition needs no else block to return valu
}
test "int returned from switch in while" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var x: u32 = 3;
@@ -389,7 +384,6 @@ test "int returned from switch in while" {
test "breaking from a loop in an if statement" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
test/behavior/widening.zig
@@ -4,7 +4,6 @@ const mem = std.mem;
const builtin = @import("builtin");
test "integer widening" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -29,7 +28,6 @@ test "integer widening u0 to u8" {
}
test "implicit unsigned integer to signed integer" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -40,7 +38,6 @@ test "implicit unsigned integer to signed integer" {
}
test "float widening" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -60,7 +57,6 @@ test "float widening" {
}
test "float widening f16 to f128" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -73,7 +69,6 @@ test "float widening f16 to f128" {
}
test "cast small unsigned to larger signed" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
test/c_import/macros.zig
@@ -25,7 +25,6 @@ test "casting to void with a macro" {
test "initializer list expression" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -38,7 +37,6 @@ test "initializer list expression" {
}
test "sizeof in macros" {
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -55,7 +53,6 @@ test "reference to a struct type" {
test "cast negative integer to pointer" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -64,7 +61,6 @@ test "cast negative integer to pointer" {
test "casting to union with a macro" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -80,7 +76,6 @@ test "casting to union with a macro" {
test "casting or calling a value with a paren-surrounded macro" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -99,7 +94,6 @@ test "casting or calling a value with a paren-surrounded macro" {
test "nested comma operator" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -109,7 +103,6 @@ test "nested comma operator" {
test "cast functions" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -123,7 +116,6 @@ test "cast functions" {
test "large integer macro" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -132,7 +124,6 @@ test "large integer macro" {
test "string literal macro with embedded tab character" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -141,7 +132,6 @@ test "string literal macro with embedded tab character" {
test "string and char literals that are not UTF-8 encoded. Issue #12784" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
@@ -152,7 +142,6 @@ test "string and char literals that are not UTF-8 encoded. Issue #12784" {
test "Macro that uses division operator. Issue #13162" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
@@ -196,7 +185,6 @@ test "Macro that uses division operator. Issue #13162" {
test "Macro that uses remainder operator. Issue #13346" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
test/behavior.zig
@@ -123,7 +123,6 @@ test {
}
if (builtin.zig_backend != .stage2_arm and
- builtin.zig_backend != .stage2_aarch64 and
builtin.zig_backend != .stage2_spirv)
{
_ = @import("behavior/export_keyword.zig");
@@ -141,7 +140,8 @@ test {
}
// This bug only repros in the root file
-test "deference @embedFile() of a file full of zero bytes" {
+test "dereference @embedFile() of a file full of zero bytes" {
+ if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
const contents = @embedFile("behavior/zero.bin").*;
test/tests.zig
@@ -191,6 +191,30 @@ const test_targets = blk: {
.link_libc = true,
},
+ .{
+ .target = .{
+ .cpu_arch = .aarch64,
+ .os_tag = .linux,
+ .abi = .none,
+ },
+ .use_llvm = false,
+ .use_lld = false,
+ .optimize_mode = .ReleaseFast,
+ .strip = true,
+ },
+ .{
+ .target = .{
+ .cpu_arch = .aarch64,
+ .cpu_model = .{ .explicit = &std.Target.aarch64.cpu.neoverse_n1 },
+ .os_tag = .linux,
+ .abi = .none,
+ },
+ .use_llvm = false,
+ .use_lld = false,
+ .optimize_mode = .ReleaseFast,
+ .strip = true,
+ },
+
.{
.target = .{
.cpu_arch = .aarch64_be,
@@ -1182,6 +1206,18 @@ const test_targets = blk: {
},
},
+ .{
+ .target = .{
+ .cpu_arch = .aarch64,
+ .os_tag = .macos,
+ .abi = .none,
+ },
+ .use_llvm = false,
+ .use_lld = false,
+ .optimize_mode = .ReleaseFast,
+ .strip = true,
+ },
+
.{
.target = .{
.cpu_arch = .x86_64,
@@ -2260,7 +2296,7 @@ pub fn addModuleTests(b: *std.Build, options: ModuleTestOptions) *Step {
continue;
// TODO get compiler-rt tests passing for self-hosted backends.
- if ((target.cpu.arch != .x86_64 or target.ofmt != .elf) and
+ if (((target.cpu.arch != .x86_64 and target.cpu.arch != .aarch64) or target.ofmt == .coff) and
test_target.use_llvm == false and mem.eql(u8, options.name, "compiler-rt"))
continue;
CMakeLists.txt
@@ -550,6 +550,14 @@ set(ZIG_STAGE2_SOURCES
src/clang_options.zig
src/clang_options_data.zig
src/codegen.zig
+ src/codegen/aarch64.zig
+ src/codegen/aarch64/abi.zig
+ src/codegen/aarch64/Assemble.zig
+ src/codegen/aarch64/Disassemble.zig
+ src/codegen/aarch64/encoding.zig
+ src/codegen/aarch64/instructions.zon
+ src/codegen/aarch64/Mir.zig
+ src/codegen/aarch64/Select.zig
src/codegen/c.zig
src/codegen/c/Type.zig
src/codegen/llvm.zig