Commit 8470652f10
Changed files (11)
lib
compiler_rt
src
arch
x86_64
lib/compiler_rt/common.zig
@@ -82,7 +82,7 @@ pub fn panic(msg: []const u8, error_return_trace: ?*std.builtin.StackTrace, _: ?
/// need for extending them to wider fp types.
/// TODO remove this; do this type selection in the language rather than
/// here in compiler-rt.
-pub fn F16T(comptime other_type: type) type {
+pub fn F16T(comptime OtherType: type) type {
return switch (builtin.cpu.arch) {
.arm, .armeb, .thumb, .thumbeb => if (std.Target.arm.featureSetHas(builtin.cpu.features, .has_v8))
switch (builtin.abi.floatAbi()) {
@@ -93,7 +93,7 @@ pub fn F16T(comptime other_type: type) type {
u16,
.aarch64, .aarch64_be, .aarch64_32 => f16,
.riscv64 => if (builtin.zig_backend == .stage1) u16 else f16,
- .x86, .x86_64 => if (builtin.target.isDarwin()) switch (other_type) {
+ .x86, .x86_64 => if (builtin.target.isDarwin()) switch (OtherType) {
// Starting with LLVM 16, Darwin uses different abi for f16
// depending on the type of the other return/argument..???
f32, f64 => u16,
src/arch/x86_64/CodeGen.zig
@@ -2515,63 +2515,96 @@ fn airFptrunc(self: *Self, inst: Air.Inst.Index) !void {
const src_ty = self.typeOf(ty_op.operand);
const src_bits = src_ty.floatBits(self.target.*);
- const src_mcv = try self.resolveInst(ty_op.operand);
- const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
- src_mcv
- else
- try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv);
- const dst_reg = dst_mcv.getReg().?.to128();
- const dst_lock = self.register_manager.lockReg(dst_reg);
- defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
+ const result = result: {
+ if (switch (dst_bits) {
+ 16 => switch (src_bits) {
+ 32 => !self.hasFeature(.f16c),
+ 64, 80, 128 => true,
+ else => unreachable,
+ },
+ 32 => switch (src_bits) {
+ 64 => false,
+ 80, 128 => true,
+ else => unreachable,
+ },
+ 64 => switch (src_bits) {
+ 80, 128 => true,
+ else => unreachable,
+ },
+ 80 => switch (dst_bits) {
+ 128 => true,
+ else => unreachable,
+ },
+ else => unreachable,
+ }) {
+ var callee: ["__trunc?f?f2".len]u8 = undefined;
+ break :result try self.genCall(.{ .lib = .{
+ .return_type = self.floatCompilerRtAbiType(dst_ty, src_ty).toIntern(),
+ .param_types = &.{self.floatCompilerRtAbiType(src_ty, dst_ty).toIntern()},
+ .callee = std.fmt.bufPrint(&callee, "__trunc{c}f{c}f2", .{
+ floatCompilerRtAbiName(src_bits),
+ floatCompilerRtAbiName(dst_bits),
+ }) catch unreachable,
+ } }, &.{ty_op.operand});
+ }
+
+ const src_mcv = try self.resolveInst(ty_op.operand);
+ const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
+ src_mcv
+ else
+ try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv);
+ const dst_reg = dst_mcv.getReg().?.to128();
+ const dst_lock = self.register_manager.lockReg(dst_reg);
+ defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
- if (dst_bits == 16 and self.hasFeature(.f16c)) {
- switch (src_bits) {
- 32 => {
- const mat_src_reg = if (src_mcv.isRegister())
+ if (dst_bits == 16) {
+ assert(self.hasFeature(.f16c));
+ switch (src_bits) {
+ 32 => {
+ const mat_src_reg = if (src_mcv.isRegister())
+ src_mcv.getReg().?
+ else
+ try self.copyToTmpRegister(src_ty, src_mcv);
+ try self.asmRegisterRegisterImmediate(
+ .{ .v_, .cvtps2ph },
+ dst_reg,
+ mat_src_reg.to128(),
+ Immediate.u(0b1_00),
+ );
+ },
+ else => unreachable,
+ }
+ } else {
+ assert(src_bits == 64 and dst_bits == 32);
+ if (self.hasFeature(.avx)) if (src_mcv.isMemory()) try self.asmRegisterRegisterMemory(
+ .{ .v_ss, .cvtsd2 },
+ dst_reg,
+ dst_reg,
+ src_mcv.mem(.qword),
+ ) else try self.asmRegisterRegisterRegister(
+ .{ .v_ss, .cvtsd2 },
+ dst_reg,
+ dst_reg,
+ (if (src_mcv.isRegister())
src_mcv.getReg().?
else
- try self.copyToTmpRegister(src_ty, src_mcv);
- try self.asmRegisterRegisterImmediate(
- .{ .v_, .cvtps2ph },
- dst_reg,
- mat_src_reg.to128(),
- Immediate.u(0b1_00),
- );
- },
- else => return self.fail("TODO implement airFptrunc from {} to {}", .{
- src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?),
- }),
+ try self.copyToTmpRegister(src_ty, src_mcv)).to128(),
+ ) else if (src_mcv.isMemory()) try self.asmRegisterMemory(
+ .{ ._ss, .cvtsd2 },
+ dst_reg,
+ src_mcv.mem(.qword),
+ ) else try self.asmRegisterRegister(
+ .{ ._ss, .cvtsd2 },
+ dst_reg,
+ (if (src_mcv.isRegister())
+ src_mcv.getReg().?
+ else
+ try self.copyToTmpRegister(src_ty, src_mcv)).to128(),
+ );
}
- } else if (src_bits == 64 and dst_bits == 32) {
- if (self.hasFeature(.avx)) if (src_mcv.isMemory()) try self.asmRegisterRegisterMemory(
- .{ .v_ss, .cvtsd2 },
- dst_reg,
- dst_reg,
- src_mcv.mem(.qword),
- ) else try self.asmRegisterRegisterRegister(
- .{ .v_ss, .cvtsd2 },
- dst_reg,
- dst_reg,
- (if (src_mcv.isRegister())
- src_mcv.getReg().?
- else
- try self.copyToTmpRegister(src_ty, src_mcv)).to128(),
- ) else if (src_mcv.isMemory()) try self.asmRegisterMemory(
- .{ ._ss, .cvtsd2 },
- dst_reg,
- src_mcv.mem(.qword),
- ) else try self.asmRegisterRegister(
- .{ ._ss, .cvtsd2 },
- dst_reg,
- (if (src_mcv.isRegister())
- src_mcv.getReg().?
- else
- try self.copyToTmpRegister(src_ty, src_mcv)).to128(),
- );
- } else return self.fail("TODO implement airFptrunc from {} to {}", .{
- src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?),
- });
- return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
+ break :result dst_mcv;
+ };
+ return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airFpext(self: *Self, inst: Air.Inst.Index) !void {
@@ -2581,58 +2614,96 @@ fn airFpext(self: *Self, inst: Air.Inst.Index) !void {
const src_ty = self.typeOf(ty_op.operand);
const src_bits = src_ty.floatBits(self.target.*);
- const src_mcv = try self.resolveInst(ty_op.operand);
- const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
- src_mcv
- else
- try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv);
- const dst_reg = dst_mcv.getReg().?.to128();
- const dst_lock = self.register_manager.lockReg(dst_reg);
- defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
+ const result = result: {
+ if (switch (src_bits) {
+ 16 => switch (dst_bits) {
+ 32, 64 => !self.hasFeature(.f16c),
+ 80, 128 => true,
+ else => unreachable,
+ },
+ 32 => switch (dst_bits) {
+ 64 => false,
+ 80, 128 => true,
+ else => unreachable,
+ },
+ 64 => switch (dst_bits) {
+ 80, 128 => true,
+ else => unreachable,
+ },
+ 80 => switch (dst_bits) {
+ 128 => true,
+ else => unreachable,
+ },
+ else => unreachable,
+ }) {
+ var callee: ["__extend?f?f2".len]u8 = undefined;
+ break :result try self.genCall(.{ .lib = .{
+ .return_type = self.floatCompilerRtAbiType(dst_ty, src_ty).toIntern(),
+ .param_types = &.{self.floatCompilerRtAbiType(src_ty, dst_ty).toIntern()},
+ .callee = std.fmt.bufPrint(&callee, "__extend{c}f{c}f2", .{
+ floatCompilerRtAbiName(src_bits),
+ floatCompilerRtAbiName(dst_bits),
+ }) catch unreachable,
+ } }, &.{ty_op.operand});
+ }
- if (src_bits == 16 and self.hasFeature(.f16c)) {
- const mat_src_reg = if (src_mcv.isRegister())
- src_mcv.getReg().?
+ const src_mcv = try self.resolveInst(ty_op.operand);
+ const dst_mcv = if (src_mcv.isRegister() and self.reuseOperand(inst, ty_op.operand, 0, src_mcv))
+ src_mcv
else
- try self.copyToTmpRegister(src_ty, src_mcv);
- try self.asmRegisterRegister(.{ .v_ps, .cvtph2 }, dst_reg, mat_src_reg.to128());
- switch (dst_bits) {
- 32 => {},
- 64 => try self.asmRegisterRegisterRegister(.{ .v_sd, .cvtss2 }, dst_reg, dst_reg, dst_reg),
- else => return self.fail("TODO implement airFpext from {} to {}", .{
- src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?),
- }),
- }
- } else if (src_bits == 32 and dst_bits == 64) {
- if (self.hasFeature(.avx)) if (src_mcv.isMemory()) try self.asmRegisterRegisterMemory(
- .{ .v_sd, .cvtss2 },
- dst_reg,
- dst_reg,
- src_mcv.mem(.dword),
- ) else try self.asmRegisterRegisterRegister(
- .{ .v_sd, .cvtss2 },
- dst_reg,
- dst_reg,
- (if (src_mcv.isRegister())
- src_mcv.getReg().?
- else
- try self.copyToTmpRegister(src_ty, src_mcv)).to128(),
- ) else if (src_mcv.isMemory()) try self.asmRegisterMemory(
- .{ ._sd, .cvtss2 },
- dst_reg,
- src_mcv.mem(.dword),
- ) else try self.asmRegisterRegister(
- .{ ._sd, .cvtss2 },
- dst_reg,
- (if (src_mcv.isRegister())
+ try self.copyToRegisterWithInstTracking(inst, dst_ty, src_mcv);
+ const dst_reg = dst_mcv.getReg().?.to128();
+ const dst_lock = self.register_manager.lockReg(dst_reg);
+ defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
+
+ if (src_bits == 16) {
+ assert(self.hasFeature(.f16c));
+ const mat_src_reg = if (src_mcv.isRegister())
src_mcv.getReg().?
else
- try self.copyToTmpRegister(src_ty, src_mcv)).to128(),
- );
- } else return self.fail("TODO implement airFpext from {} to {}", .{
- src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?),
- });
- return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
+ try self.copyToTmpRegister(src_ty, src_mcv);
+ try self.asmRegisterRegister(.{ .v_ps, .cvtph2 }, dst_reg, mat_src_reg.to128());
+ switch (dst_bits) {
+ 32 => {},
+ 64 => try self.asmRegisterRegisterRegister(
+ .{ .v_sd, .cvtss2 },
+ dst_reg,
+ dst_reg,
+ dst_reg,
+ ),
+ else => unreachable,
+ }
+ } else {
+ assert(src_bits == 32 and dst_bits == 64);
+ if (self.hasFeature(.avx)) if (src_mcv.isMemory()) try self.asmRegisterRegisterMemory(
+ .{ .v_sd, .cvtss2 },
+ dst_reg,
+ dst_reg,
+ src_mcv.mem(.dword),
+ ) else try self.asmRegisterRegisterRegister(
+ .{ .v_sd, .cvtss2 },
+ dst_reg,
+ dst_reg,
+ (if (src_mcv.isRegister())
+ src_mcv.getReg().?
+ else
+ try self.copyToTmpRegister(src_ty, src_mcv)).to128(),
+ ) else if (src_mcv.isMemory()) try self.asmRegisterMemory(
+ .{ ._sd, .cvtss2 },
+ dst_reg,
+ src_mcv.mem(.dword),
+ ) else try self.asmRegisterRegister(
+ .{ ._sd, .cvtss2 },
+ dst_reg,
+ (if (src_mcv.isRegister())
+ src_mcv.getReg().?
+ else
+ try self.copyToTmpRegister(src_ty, src_mcv)).to128(),
+ );
+ }
+ break :result dst_mcv;
+ };
+ return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airIntCast(self: *Self, inst: Air.Inst.Index) !void {
@@ -8358,26 +8429,64 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const ty = self.typeOf(bin_op.lhs);
- try self.spillEflagsIfOccupied();
- self.eflags_inst = inst;
+ const result: Condition = result: {
+ switch (ty.zigTypeTag(mod)) {
+ .Float => {
+ const float_bits = ty.floatBits(self.target.*);
+ if (switch (float_bits) {
+ 16 => !self.hasFeature(.f16c),
+ 32, 64 => false,
+ 80, 128 => true,
+ else => unreachable,
+ }) {
+ var callee: ["__???f2".len]u8 = undefined;
+ const ret = try self.genCall(.{ .lib = .{
+ .return_type = .i32_type,
+ .param_types = &.{ ty.toIntern(), ty.toIntern() },
+ .callee = std.fmt.bufPrint(&callee, "__{s}{c}f2", .{
+ switch (op) {
+ .eq => "eq",
+ .neq => "ne",
+ .lt => "lt",
+ .lte => "le",
+ .gt => "gt",
+ .gte => "ge",
+ },
+ floatCompilerRtAbiName(float_bits),
+ }) catch unreachable,
+ } }, &.{ bin_op.lhs, bin_op.rhs });
+ try self.genBinOpMir(.{ ._, .@"test" }, Type.i32, ret, ret);
+ break :result switch (op) {
+ .eq => .e,
+ .neq => .ne,
+ .lt => .l,
+ .lte => .le,
+ .gt => .g,
+ .gte => .ge,
+ };
+ }
+ },
+ else => {},
+ }
- const lhs_mcv = try self.resolveInst(bin_op.lhs);
- const lhs_lock = switch (lhs_mcv) {
- .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
- else => null,
- };
- defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock);
+ try self.spillEflagsIfOccupied();
- const rhs_mcv = try self.resolveInst(bin_op.rhs);
- const rhs_lock = switch (rhs_mcv) {
- .register => |reg| self.register_manager.lockReg(reg),
- else => null,
- };
- defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock);
+ const lhs_mcv = try self.resolveInst(bin_op.lhs);
+ const lhs_lock = switch (lhs_mcv) {
+ .register => |reg| self.register_manager.lockRegAssumeUnused(reg),
+ else => null,
+ };
+ defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock);
- const result = MCValue{
- .eflags = switch (ty.zigTypeTag(mod)) {
- else => result: {
+ const rhs_mcv = try self.resolveInst(bin_op.rhs);
+ const rhs_lock = switch (rhs_mcv) {
+ .register => |reg| self.register_manager.lockReg(reg),
+ else => null,
+ };
+ defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock);
+
+ switch (ty.zigTypeTag(mod)) {
+ else => {
const abi_size: u16 = @intCast(ty.abiSize(mod));
const may_flip: enum {
may_flip,
@@ -8479,7 +8588,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
},
);
},
- .Float => result: {
+ .Float => {
const flipped = switch (op) {
.lt, .lte => true,
.eq, .gte, .gt, .neq => false,
@@ -8495,7 +8604,8 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
const src_mcv = if (flipped) lhs_mcv else rhs_mcv;
switch (ty.floatBits(self.target.*)) {
- 16 => if (self.hasFeature(.f16c)) {
+ 16 => {
+ assert(self.hasFeature(.f16c));
const tmp1_reg = (try self.register_manager.allocReg(null, sse)).to128();
const tmp1_mcv = MCValue{ .register = tmp1_reg };
const tmp1_lock = self.register_manager.lockRegAssumeUnused(tmp1_reg);
@@ -8524,9 +8634,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
try self.asmRegisterRegister(.{ .v_ps, .cvtph2 }, tmp1_reg, tmp1_reg);
try self.asmRegisterRegister(.{ .v_, .movshdup }, tmp2_reg, tmp1_reg);
try self.genBinOpMir(.{ ._ss, .ucomi }, ty, tmp1_mcv, tmp2_mcv);
- } else return self.fail("TODO implement airCmp for {}", .{
- ty.fmt(mod),
- }),
+ },
32 => try self.genBinOpMir(
.{ ._ss, .ucomi },
ty,
@@ -8539,9 +8647,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
.{ .register = dst_reg },
src_mcv,
),
- else => return self.fail("TODO implement airCmp for {}", .{
- ty.fmt(mod),
- }),
+ else => unreachable,
}
break :result switch (if (flipped) op.reverse() else op) {
@@ -8552,9 +8658,11 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
.neq => .nz_or_p,
};
},
- },
+ }
};
- return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
+
+ self.eflags_inst = inst;
+ return self.finishAir(inst, .{ .eflags = result }, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airCmpVector(self: *Self, inst: Air.Inst.Index) !void {
@@ -8572,7 +8680,6 @@ fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void {
try self.genLazySymbolRef(.lea, addr_reg, link.File.LazySymbol.initDecl(.const_data, null, mod));
try self.spillEflagsIfOccupied();
- self.eflags_inst = inst;
const op_ty = self.typeOf(un_op);
const op_abi_size: u32 = @intCast(op_ty.abiSize(mod));
@@ -8586,8 +8693,9 @@ fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void {
registerAlias(dst_reg, op_abi_size),
Memory.sib(Memory.PtrSize.fromSize(op_abi_size), .{ .base = .{ .reg = addr_reg } }),
);
- const result = MCValue{ .eflags = .b };
- return self.finishAir(inst, result, .{ un_op, .none, .none });
+
+ self.eflags_inst = inst;
+ return self.finishAir(inst, .{ .eflags = .b }, .{ un_op, .none, .none });
}
fn airTry(self: *Self, inst: Air.Inst.Index) !void {
@@ -8777,7 +8885,6 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC
}
try self.spillEflagsIfOccupied();
- self.eflags_inst = inst;
const pl_ty = opt_ty.optionalChild(mod);
@@ -8786,6 +8893,7 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC
else
.{ .off = @intCast(pl_ty.abiSize(mod)), .ty = Type.bool };
+ self.eflags_inst = inst;
switch (opt_mcv) {
.none,
.unreach,
@@ -8867,7 +8975,6 @@ fn isNull(self: *Self, inst: Air.Inst.Index, opt_ty: Type, opt_mcv: MCValue) !MC
fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue) !MCValue {
const mod = self.bin_file.options.module.?;
try self.spillEflagsIfOccupied();
- self.eflags_inst = inst;
const opt_ty = ptr_ty.childType(mod);
const pl_ty = opt_ty.optionalChild(mod);
@@ -8893,6 +9000,8 @@ fn isNullPtr(self: *Self, inst: Air.Inst.Index, ptr_ty: Type, ptr_mcv: MCValue)
}),
Immediate.u(0),
);
+
+ self.eflags_inst = inst;
return .{ .eflags = .e };
}
@@ -8905,9 +9014,6 @@ fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, ty: Type, operand: MCValue) !
}
try self.spillEflagsIfOccupied();
- if (maybe_inst) |inst| {
- self.eflags_inst = inst;
- }
const err_off = errUnionErrorOffset(ty.errorUnionPayload(mod), mod);
switch (operand) {
@@ -8945,6 +9051,7 @@ fn isErr(self: *Self, maybe_inst: ?Air.Inst.Index, ty: Type, operand: MCValue) !
else => return self.fail("TODO implement isErr for {}", .{operand}),
}
+ if (maybe_inst) |inst| self.eflags_inst = inst;
return MCValue{ .eflags = .a };
}
@@ -10526,106 +10633,150 @@ fn airFloatFromInt(self: *Self, inst: Air.Inst.Index) !void {
const mod = self.bin_file.options.module.?;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
+ const dst_ty = self.typeOfIndex(inst);
+ const dst_bits = dst_ty.floatBits(self.target.*);
+
const src_ty = self.typeOf(ty_op.operand);
const src_bits: u32 = @intCast(src_ty.bitSize(mod));
const src_signedness =
if (src_ty.isAbiInt(mod)) src_ty.intInfo(mod).signedness else .unsigned;
- const dst_ty = self.typeOfIndex(inst);
-
const src_size = math.divCeil(u32, @max(switch (src_signedness) {
.signed => src_bits,
.unsigned => src_bits + 1,
}, 32), 8) catch unreachable;
- if (src_size > 8) return self.fail("TODO implement airFloatFromInt from {} to {}", .{
- src_ty.fmt(mod), dst_ty.fmt(mod),
- });
- const src_mcv = try self.resolveInst(ty_op.operand);
- const src_reg = if (src_mcv.isRegister())
- src_mcv.getReg().?
- else
- try self.copyToTmpRegister(src_ty, src_mcv);
- const src_lock = self.register_manager.lockRegAssumeUnused(src_reg);
- defer self.register_manager.unlockReg(src_lock);
+ const result = result: {
+ if (switch (dst_bits) {
+ 16, 80, 128 => true,
+ 32, 64 => src_size > 8 and src_size < 16,
+ else => unreachable,
+ }) {
+ var callee: ["__floatun?i?f".len]u8 = undefined;
+ break :result try self.genCall(.{ .lib = .{
+ .return_type = dst_ty.toIntern(),
+ .param_types = &.{src_ty.toIntern()},
+ .callee = std.fmt.bufPrint(&callee, "__float{s}{c}i{c}f", .{
+ switch (src_signedness) {
+ .signed => "",
+ .unsigned => "un",
+ },
+ intCompilerRtAbiName(src_bits),
+ floatCompilerRtAbiName(dst_bits),
+ }) catch unreachable,
+ } }, &.{ty_op.operand});
+ }
- if (src_bits < src_size * 8) try self.truncateRegister(src_ty, src_reg);
+ if (src_size > 8) return self.fail("TODO implement airFloatFromInt from {} to {}", .{
+ src_ty.fmt(mod), dst_ty.fmt(mod),
+ });
- const dst_reg = try self.register_manager.allocReg(inst, regClassForType(dst_ty, mod));
- const dst_mcv = MCValue{ .register = dst_reg };
- const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
- defer self.register_manager.unlockReg(dst_lock);
+ const src_mcv = try self.resolveInst(ty_op.operand);
+ const src_reg = if (src_mcv.isRegister())
+ src_mcv.getReg().?
+ else
+ try self.copyToTmpRegister(src_ty, src_mcv);
+ const src_lock = self.register_manager.lockRegAssumeUnused(src_reg);
+ defer self.register_manager.unlockReg(src_lock);
- const mir_tag = @as(?Mir.Inst.FixedTag, switch (dst_ty.zigTypeTag(mod)) {
- .Float => switch (dst_ty.floatBits(self.target.*)) {
- 32 => if (self.hasFeature(.avx)) .{ .v_ss, .cvtsi2 } else .{ ._ss, .cvtsi2 },
- 64 => if (self.hasFeature(.avx)) .{ .v_sd, .cvtsi2 } else .{ ._sd, .cvtsi2 },
- 16, 80, 128 => null,
- else => unreachable,
- },
- else => null,
- }) orelse return self.fail("TODO implement airFloatFromInt from {} to {}", .{
- src_ty.fmt(mod), dst_ty.fmt(mod),
- });
- const dst_alias = dst_reg.to128();
- const src_alias = registerAlias(src_reg, src_size);
- switch (mir_tag[0]) {
- .v_ss, .v_sd => try self.asmRegisterRegisterRegister(mir_tag, dst_alias, dst_alias, src_alias),
- else => try self.asmRegisterRegister(mir_tag, dst_alias, src_alias),
- }
+ if (src_bits < src_size * 8) try self.truncateRegister(src_ty, src_reg);
- return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
+ const dst_reg = try self.register_manager.allocReg(inst, regClassForType(dst_ty, mod));
+ const dst_mcv = MCValue{ .register = dst_reg };
+ const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
+ defer self.register_manager.unlockReg(dst_lock);
+
+ const mir_tag = @as(?Mir.Inst.FixedTag, switch (dst_ty.zigTypeTag(mod)) {
+ .Float => switch (dst_ty.floatBits(self.target.*)) {
+ 32 => if (self.hasFeature(.avx)) .{ .v_ss, .cvtsi2 } else .{ ._ss, .cvtsi2 },
+ 64 => if (self.hasFeature(.avx)) .{ .v_sd, .cvtsi2 } else .{ ._sd, .cvtsi2 },
+ 16, 80, 128 => null,
+ else => unreachable,
+ },
+ else => null,
+ }) orelse return self.fail("TODO implement airFloatFromInt from {} to {}", .{
+ src_ty.fmt(mod), dst_ty.fmt(mod),
+ });
+ const dst_alias = dst_reg.to128();
+ const src_alias = registerAlias(src_reg, src_size);
+ switch (mir_tag[0]) {
+ .v_ss, .v_sd => try self.asmRegisterRegisterRegister(mir_tag, dst_alias, dst_alias, src_alias),
+ else => try self.asmRegisterRegister(mir_tag, dst_alias, src_alias),
+ }
+
+ break :result dst_mcv;
+ };
+ return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airIntFromFloat(self: *Self, inst: Air.Inst.Index) !void {
const mod = self.bin_file.options.module.?;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
- const src_ty = self.typeOf(ty_op.operand);
const dst_ty = self.typeOfIndex(inst);
const dst_bits: u32 = @intCast(dst_ty.bitSize(mod));
const dst_signedness =
if (dst_ty.isAbiInt(mod)) dst_ty.intInfo(mod).signedness else .unsigned;
-
const dst_size = math.divCeil(u32, @max(switch (dst_signedness) {
.signed => dst_bits,
.unsigned => dst_bits + 1,
}, 32), 8) catch unreachable;
- if (dst_size > 8) return self.fail("TODO implement airIntFromFloat from {} to {}", .{
- src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?),
- });
- const src_mcv = try self.resolveInst(ty_op.operand);
- const src_reg = if (src_mcv.isRegister())
- src_mcv.getReg().?
- else
- try self.copyToTmpRegister(src_ty, src_mcv);
- const src_lock = self.register_manager.lockRegAssumeUnused(src_reg);
- defer self.register_manager.unlockReg(src_lock);
+ const src_ty = self.typeOf(ty_op.operand);
+ const src_bits = src_ty.floatBits(self.target.*);
- const dst_reg = try self.register_manager.allocReg(inst, regClassForType(dst_ty, mod));
- const dst_mcv = MCValue{ .register = dst_reg };
- const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
- defer self.register_manager.unlockReg(dst_lock);
+ const result = result: {
+ if (switch (src_bits) {
+ 16, 80, 128 => true,
+ 32, 64 => dst_size > 8 and dst_size < 16,
+ else => unreachable,
+ }) {
+ var callee: ["__fixuns?f?i".len]u8 = undefined;
+ break :result try self.genCall(.{ .lib = .{
+ .return_type = dst_ty.toIntern(),
+ .param_types = &.{src_ty.toIntern()},
+ .callee = std.fmt.bufPrint(&callee, "__fix{s}{c}f{c}i", .{
+ switch (dst_signedness) {
+ .signed => "",
+ .unsigned => "uns",
+ },
+ floatCompilerRtAbiName(src_bits),
+ intCompilerRtAbiName(dst_bits),
+ }) catch unreachable,
+ } }, &.{ty_op.operand});
+ }
- try self.asmRegisterRegister(
- @as(?Mir.Inst.FixedTag, switch (src_ty.zigTypeTag(mod)) {
- .Float => switch (src_ty.floatBits(self.target.*)) {
+ if (dst_size > 8) return self.fail("TODO implement airIntFromFloat from {} to {}", .{
+ src_ty.fmt(mod), dst_ty.fmt(mod),
+ });
+
+ const src_mcv = try self.resolveInst(ty_op.operand);
+ const src_reg = if (src_mcv.isRegister())
+ src_mcv.getReg().?
+ else
+ try self.copyToTmpRegister(src_ty, src_mcv);
+ const src_lock = self.register_manager.lockRegAssumeUnused(src_reg);
+ defer self.register_manager.unlockReg(src_lock);
+
+ const dst_reg = try self.register_manager.allocReg(inst, regClassForType(dst_ty, mod));
+ const dst_mcv = MCValue{ .register = dst_reg };
+ const dst_lock = self.register_manager.lockRegAssumeUnused(dst_reg);
+ defer self.register_manager.unlockReg(dst_lock);
+
+ try self.asmRegisterRegister(
+ switch (src_bits) {
32 => if (self.hasFeature(.avx)) .{ .v_, .cvttss2si } else .{ ._, .cvttss2si },
64 => if (self.hasFeature(.avx)) .{ .v_, .cvttsd2si } else .{ ._, .cvttsd2si },
- 16, 80, 128 => null,
else => unreachable,
},
- else => null,
- }) orelse return self.fail("TODO implement airIntFromFloat from {} to {}", .{
- src_ty.fmt(self.bin_file.options.module.?), dst_ty.fmt(self.bin_file.options.module.?),
- }),
- registerAlias(dst_reg, dst_size),
- src_reg.to128(),
- );
+ registerAlias(dst_reg, dst_size),
+ src_reg.to128(),
+ );
- if (dst_bits < dst_size * 8) try self.truncateRegister(dst_ty, dst_reg);
+ if (dst_bits < dst_size * 8) try self.truncateRegister(dst_ty, dst_reg);
- return self.finishAir(inst, dst_mcv, .{ ty_op.operand, .none, .none });
+ break :result dst_mcv;
+ };
+ return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
}
fn airCmpxchg(self: *Self, inst: Air.Inst.Index) !void {
@@ -12276,3 +12427,30 @@ fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type {
const mod = self.bin_file.options.module.?;
return self.air.typeOfIndex(inst, &mod.intern_pool);
}
+
+fn intCompilerRtAbiName(int_bits: u32) u8 {
+ return switch (int_bits) {
+ 1...32 => 's',
+ 33...64 => 'd',
+ 65...128 => 't',
+ else => unreachable,
+ };
+}
+
+fn floatCompilerRtAbiName(float_bits: u32) u8 {
+ return switch (float_bits) {
+ 16 => 'h',
+ 32 => 's',
+ 64 => 'd',
+ 80 => 'x',
+ 128 => 't',
+ else => unreachable,
+ };
+}
+
+fn floatCompilerRtAbiType(self: *Self, ty: Type, other_ty: Type) Type {
+ if (ty.toIntern() == .f16_type and
+ (other_ty.toIntern() == .f32_type or other_ty.toIntern() == .f64_type) and
+ self.target.isDarwin()) return Type.u16;
+ return ty;
+}
test/behavior/bugs/12680.zig
@@ -9,9 +9,10 @@ test "export a function twice" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf) return error.SkipZigTest;
+
if (builtin.os.tag == .macos and builtin.zig_backend == .stage2_c) {
// TODO: test.c: error: aliases are not supported on darwin
return error.SkipZigTest;
test/behavior/bugs/529.zig
@@ -11,11 +11,11 @@ comptime {
const builtin = @import("builtin");
test "issue 529 fixed" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf) return error.SkipZigTest;
@import("529_other_file.zig").issue529(null);
issue529(null);
test/behavior/asm.zig
@@ -85,7 +85,6 @@ test "alternative constraints" {
test "sized integer/float in asm input" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -137,7 +136,6 @@ test "sized integer/float in asm input" {
test "struct/array/union types as input values" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
test/behavior/bitcast.zig
@@ -470,7 +470,6 @@ test "@bitCast of packed struct of bools all true" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
@@ -492,7 +491,6 @@ test "@bitCast of packed struct of bools all false" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
test/behavior/cast.zig
@@ -1611,7 +1611,6 @@ test "coercion from single-item pointer to @as to slice" {
test "peer type resolution: const sentinel slice and mutable non-sentinel slice" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
@@ -1664,7 +1663,6 @@ test "peer type resolution: float and comptime-known fixed-width integer" {
test "peer type resolution: same array type with sentinel" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
@@ -1687,7 +1685,6 @@ test "peer type resolution: same array type with sentinel" {
test "peer type resolution: array with sentinel and array without sentinel" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
@@ -1710,7 +1707,6 @@ test "peer type resolution: array with sentinel and array without sentinel" {
test "peer type resolution: array and vector with same child type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
@@ -1756,7 +1752,6 @@ test "peer type resolution: array with smaller child type and vector with larger
test "peer type resolution: error union and optional of same type" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
@@ -1780,7 +1775,6 @@ test "peer type resolution: error union and optional of same type" {
test "peer type resolution: C pointer and @TypeOf(null)" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
@@ -1803,7 +1797,6 @@ test "peer type resolution: C pointer and @TypeOf(null)" {
test "peer type resolution: three-way resolution combines error set and optional" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
@@ -1846,7 +1839,6 @@ test "peer type resolution: three-way resolution combines error set and optional
test "peer type resolution: vector and optional vector" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
@@ -1869,7 +1861,6 @@ test "peer type resolution: vector and optional vector" {
test "peer type resolution: optional fixed-width int and comptime_int" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
@@ -1892,7 +1883,6 @@ test "peer type resolution: optional fixed-width int and comptime_int" {
test "peer type resolution: array and tuple" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
@@ -1916,7 +1906,6 @@ test "peer type resolution: array and tuple" {
test "peer type resolution: vector and tuple" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
@@ -1940,7 +1929,6 @@ test "peer type resolution: vector and tuple" {
test "peer type resolution: vector and array and tuple" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
@@ -1983,7 +1971,6 @@ test "peer type resolution: vector and array and tuple" {
test "peer type resolution: empty tuple pointer and slice" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
@@ -2005,7 +1992,6 @@ test "peer type resolution: empty tuple pointer and slice" {
test "peer type resolution: tuple pointer and slice" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
@@ -2027,7 +2013,6 @@ test "peer type resolution: tuple pointer and slice" {
test "peer type resolution: tuple pointer and optional slice" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
@@ -2049,7 +2034,6 @@ test "peer type resolution: tuple pointer and optional slice" {
test "peer type resolution: many compatible pointers" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
@@ -2116,7 +2100,6 @@ test "peer type resolution: many compatible pointers" {
test "peer type resolution: tuples with comptime fields" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
@@ -2149,7 +2132,6 @@ test "peer type resolution: tuples with comptime fields" {
test "peer type resolution: C pointer and many pointer" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
@@ -2173,7 +2155,6 @@ test "peer type resolution: C pointer and many pointer" {
test "peer type resolution: pointer attributes are combined correctly" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
@@ -2218,7 +2199,6 @@ test "peer type resolution: pointer attributes are combined correctly" {
test "cast builtins can wrap result in optional" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
@@ -2258,7 +2238,6 @@ test "cast builtins can wrap result in optional" {
test "cast builtins can wrap result in error union" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -2298,7 +2277,6 @@ test "cast builtins can wrap result in error union" {
test "cast builtins can wrap result in error union and optional" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // TODO
@@ -2496,7 +2474,6 @@ test "@as does not corrupt values with incompatible representations" {
test "result information is preserved through many nested structures" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
test/behavior/defer.zig
@@ -133,7 +133,6 @@ test "errdefer with payload" {
}
test "reference to errdefer payload" {
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
test/behavior/floatop.zig
@@ -23,7 +23,6 @@ test "cmp f16" {
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf) return error.SkipZigTest;
- if (no_x86_64_hardware_f16_support) return error.SkipZigTest;
try testCmp(f16);
try comptime testCmp(f16);
@@ -115,7 +114,7 @@ test "different sized float comparisons" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
- if (no_x86_64_hardware_f16_support) return error.SkipZigTest;
+ if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf) return error.SkipZigTest;
try testDifferentSizedFloatComparisons();
try comptime testDifferentSizedFloatComparisons();
test/behavior/struct.zig
@@ -1651,7 +1651,6 @@ test "instantiate struct with comptime field" {
test "struct field pointer has correct alignment" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1684,7 +1683,6 @@ test "struct field pointer has correct alignment" {
test "extern struct field pointer has correct alignment" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
test/behavior/union.zig
@@ -1534,7 +1534,6 @@ test "coerce enum literal to union in result loc" {
test "defined-layout union field pointer has correct alignment" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1572,7 +1571,6 @@ test "defined-layout union field pointer has correct alignment" {
test "undefined-layout union field pointer has correct alignment" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@@ -1610,7 +1608,6 @@ test "undefined-layout union field pointer has correct alignment" {
test "packed union field pointer has correct alignment" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO