Commit cc6694a323
Changed files (7)
src
arch
x86_64
src/arch/x86_64/CodeGen.zig
@@ -12566,7 +12566,7 @@ fn resolveCallingConventionValues(
abi.getCAbiIntReturnRegs(resolved_cc)[0..classes.len],
0..,
) |class, ret_reg, ret_reg_i| {
- result.return_value = switch (classes[0]) {
+ result.return_value = switch (class) {
.integer => switch (ret_reg_i) {
0 => InstTracking.init(.{ .register = registerAlias(
ret_reg,
@@ -12582,6 +12582,7 @@ fn resolveCallingConventionValues(
0 => InstTracking.init(.{ .register = .xmm0 }),
else => return self.fail("TODO handle multiple classes per type", .{}),
},
+ .sseup => continue,
.memory => switch (ret_reg_i) {
0 => ret: {
const ret_indirect_reg =
@@ -12602,7 +12603,7 @@ fn resolveCallingConventionValues(
}
// Input params
- next_param: for (param_types, result.args) |ty, *arg| {
+ for (param_types, result.args) |ty, *arg| {
assert(ty.hasRuntimeBitsIgnoreComptime(mod));
const classes = switch (self.target.os.tag) {
@@ -12620,7 +12621,6 @@ fn resolveCallingConventionValues(
1 => .{ .register_pair = .{ arg.register, param_reg } },
else => return self.fail("TODO handle multiple classes per type", .{}),
};
- continue;
} else break,
.float, .sse => switch (self.target.os.tag) {
.windows => if (param_reg_i < 4) {
@@ -12630,7 +12630,6 @@ fn resolveCallingConventionValues(
.register = @enumFromInt(@intFromEnum(Register.xmm0) + param_reg_i),
};
param_reg_i += 1;
- continue;
} else break,
else => if (param_sse_reg_i < 8) {
if (class_i > 0)
@@ -12639,15 +12638,15 @@ fn resolveCallingConventionValues(
@intFromEnum(Register.xmm0) + param_sse_reg_i,
) };
param_sse_reg_i += 1;
- continue;
} else break,
},
+ .sseup => {},
.memory => break,
else => return self.fail("TODO handle calling convention class {s}", .{
@tagName(class),
}),
}
- } else continue :next_param;
+ } else continue;
const param_size: u31 = @intCast(ty.abiSize(mod));
const param_align: u31 = @intCast(ty.abiAlignment(mod).toByteUnitsOptional().?);
test/behavior/abs.zig
@@ -97,7 +97,7 @@ test "@abs floats" {
try comptime testAbsFloats(f80);
if (builtin.zig_backend != .stage2_x86_64 and builtin.zig_backend != .stage2_wasm) try testAbsFloats(f80);
try comptime testAbsFloats(f128);
- if (builtin.zig_backend != .stage2_x86_64 and builtin.zig_backend != .stage2_wasm) try testAbsFloats(f128);
+ if (builtin.zig_backend != .stage2_wasm) try testAbsFloats(f128);
}
fn testAbsFloats(comptime T: type) !void {
test/behavior/bitcast.zig
@@ -297,11 +297,11 @@ test "triple level result location with bitcast sandwich passed as tuple element
test "@bitCast packed struct of floats" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf) return error.SkipZigTest;
const Foo = packed struct {
a: f16 = 0,
test/behavior/cast.zig
@@ -1369,10 +1369,10 @@ fn boolToStr(b: bool) []const u8 {
test "cast f16 to wider types" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArmOrThumb()) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
@@ -1389,9 +1389,9 @@ test "cast f16 to wider types" {
test "cast f128 to narrower types" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf) return error.SkipZigTest;
const S = struct {
fn doTheTest() !void {
test/behavior/floatop.zig
@@ -130,7 +130,7 @@ test "cmp f128" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c and builtin.cpu.arch.isArmOrThumb()) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf) return error.SkipZigTest;
try testCmp(f128);
try comptime testCmp(f128);
test/behavior/math.zig
@@ -637,11 +637,11 @@ fn testShrTrunc(x: u16) !void {
}
test "f128" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf) return error.SkipZigTest;
try test_f128();
try comptime test_f128();
@@ -1481,7 +1481,6 @@ test "@round f80" {
test "@round f128" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c and comptime builtin.cpu.arch.isArmOrThumb()) return error.SkipZigTest;
@@ -1522,9 +1521,9 @@ test "vector integer addition" {
test "NaN comparison" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf) return error.SkipZigTest;
try testNanEqNan(f16);
try testNanEqNan(f32);
test/behavior/widening.zig
@@ -60,11 +60,11 @@ test "float widening" {
}
test "float widening f16 to f128" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf) return error.SkipZigTest;
var x: f16 = 12.34;
var y: f128 = x;