Commit c99c085d70
Changed files (127)
lib
compiler_rt
lib/compiler_rt/absv.zig
@@ -1,18 +1,6 @@
-// absv - absolute oVerflow
-// * @panic, if value can not be represented
-// - absvXi4_generic for unoptimized version
-const std = @import("std");
-const builtin = @import("builtin");
-const linkage: std.builtin.GlobalLinkage = if (builtin.is_test) .Internal else .Weak;
-pub const panic = @import("common.zig").panic;
-
-comptime {
- @export(__absvsi2, .{ .name = "__absvsi2", .linkage = linkage });
- @export(__absvdi2, .{ .name = "__absvdi2", .linkage = linkage });
- @export(__absvti2, .{ .name = "__absvti2", .linkage = linkage });
-}
-
-inline fn absvXi(comptime ST: type, a: ST) ST {
+/// absv - absolute oVerflow
+/// * @panic if value can not be represented
+pub inline fn absv(comptime ST: type, a: ST) ST {
const UT = switch (ST) {
i32 => u32,
i64 => u64,
@@ -31,18 +19,6 @@ inline fn absvXi(comptime ST: type, a: ST) ST {
return x;
}
-pub fn __absvsi2(a: i32) callconv(.C) i32 {
- return absvXi(i32, a);
-}
-
-pub fn __absvdi2(a: i64) callconv(.C) i64 {
- return absvXi(i64, a);
-}
-
-pub fn __absvti2(a: i128) callconv(.C) i128 {
- return absvXi(i128, a);
-}
-
test {
_ = @import("absvsi2_test.zig");
_ = @import("absvdi2_test.zig");
lib/compiler_rt/absvdi2.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const absv = @import("./absv.zig").absv;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__absvdi2, .{ .name = "__absvdi2", .linkage = common.linkage });
+}
+
+fn __absvdi2(a: i64) callconv(.C) i64 {
+ return absv(i64, a);
+}
lib/compiler_rt/absvsi2.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const absv = @import("./absv.zig").absv;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__absvsi2, .{ .name = "__absvsi2", .linkage = common.linkage });
+}
+
+fn __absvsi2(a: i32) callconv(.C) i32 {
+ return absv(i32, a);
+}
lib/compiler_rt/absvti2.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const absv = @import("./absv.zig").absv;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__absvti2, .{ .name = "__absvti2", .linkage = common.linkage });
+}
+
+fn __absvti2(a: i128) callconv(.C) i128 {
+ return absv(i128, a);
+}
lib/compiler_rt/adddf3.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const addf3 = @import("./addf3.zig").addf3;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_dadd, .{ .name = "__aeabi_dadd", .linkage = common.linkage });
+ } else {
+ @export(__adddf3, .{ .name = "__adddf3", .linkage = common.linkage });
+ }
+}
+
+fn __adddf3(a: f64, b: f64) callconv(.C) f64 {
+ return addf3(f64, a, b);
+}
+
+fn __aeabi_dadd(a: f64, b: f64) callconv(.AAPCS) f64 {
+ return addf3(f64, a, b);
+}
lib/compiler_rt/addXf3.zig → lib/compiler_rt/addf3.zig
@@ -1,111 +1,12 @@
-// Ported from:
-//
-// https://github.com/llvm/llvm-project/blob/02d85149a05cb1f6dc49f0ba7a2ceca53718ae17/compiler-rt/lib/builtins/fp_add_impl.inc
-
const std = @import("std");
-const builtin = @import("builtin");
const math = std.math;
-const arch = builtin.cpu.arch;
-const is_test = builtin.is_test;
-const linkage: std.builtin.GlobalLinkage = if (builtin.is_test) .Internal else .Weak;
-
-const common = @import("common.zig");
+const common = @import("./common.zig");
const normalize = common.normalize;
-pub const panic = common.panic;
-
-comptime {
- @export(__addsf3, .{ .name = "__addsf3", .linkage = linkage });
- @export(__adddf3, .{ .name = "__adddf3", .linkage = linkage });
- @export(__addxf3, .{ .name = "__addxf3", .linkage = linkage });
- @export(__addtf3, .{ .name = "__addtf3", .linkage = linkage });
-
- @export(__subsf3, .{ .name = "__subsf3", .linkage = linkage });
- @export(__subdf3, .{ .name = "__subdf3", .linkage = linkage });
- @export(__subxf3, .{ .name = "__subxf3", .linkage = linkage });
- @export(__subtf3, .{ .name = "__subtf3", .linkage = linkage });
-
- if (!is_test) {
- if (arch.isARM() or arch.isThumb()) {
- @export(__aeabi_fadd, .{ .name = "__aeabi_fadd", .linkage = linkage });
- @export(__aeabi_dadd, .{ .name = "__aeabi_dadd", .linkage = linkage });
- @export(__aeabi_fsub, .{ .name = "__aeabi_fsub", .linkage = linkage });
- @export(__aeabi_dsub, .{ .name = "__aeabi_dsub", .linkage = linkage });
- }
-
- if (arch.isPPC() or arch.isPPC64()) {
- @export(__addkf3, .{ .name = "__addkf3", .linkage = linkage });
- @export(__subkf3, .{ .name = "__subkf3", .linkage = linkage });
- }
- }
-}
-
-pub fn __addsf3(a: f32, b: f32) callconv(.C) f32 {
- return addXf3(f32, a, b);
-}
-
-pub fn __adddf3(a: f64, b: f64) callconv(.C) f64 {
- return addXf3(f64, a, b);
-}
-
-pub fn __addxf3(a: f80, b: f80) callconv(.C) f80 {
- return addXf3(f80, a, b);
-}
-
-pub fn __subxf3(a: f80, b: f80) callconv(.C) f80 {
- var b_rep = std.math.break_f80(b);
- b_rep.exp ^= 0x8000;
- return __addxf3(a, std.math.make_f80(b_rep));
-}
-
-pub fn __addtf3(a: f128, b: f128) callconv(.C) f128 {
- return addXf3(f128, a, b);
-}
-
-pub fn __addkf3(a: f128, b: f128) callconv(.C) f128 {
- return @call(.{ .modifier = .always_inline }, __addtf3, .{ a, b });
-}
-
-pub fn __subsf3(a: f32, b: f32) callconv(.C) f32 {
- const neg_b = @bitCast(f32, @bitCast(u32, b) ^ (@as(u32, 1) << 31));
- return addXf3(f32, a, neg_b);
-}
-
-pub fn __subdf3(a: f64, b: f64) callconv(.C) f64 {
- const neg_b = @bitCast(f64, @bitCast(u64, b) ^ (@as(u64, 1) << 63));
- return addXf3(f64, a, neg_b);
-}
-
-pub fn __subtf3(a: f128, b: f128) callconv(.C) f128 {
- const neg_b = @bitCast(f128, @bitCast(u128, b) ^ (@as(u128, 1) << 127));
- return addXf3(f128, a, neg_b);
-}
-
-pub fn __subkf3(a: f128, b: f128) callconv(.C) f128 {
- return @call(.{ .modifier = .always_inline }, __subtf3, .{ a, b });
-}
-
-pub fn __aeabi_fadd(a: f32, b: f32) callconv(.AAPCS) f32 {
- @setRuntimeSafety(false);
- return @call(.{ .modifier = .always_inline }, __addsf3, .{ a, b });
-}
-
-pub fn __aeabi_dadd(a: f64, b: f64) callconv(.AAPCS) f64 {
- @setRuntimeSafety(false);
- return @call(.{ .modifier = .always_inline }, __adddf3, .{ a, b });
-}
-
-pub fn __aeabi_fsub(a: f32, b: f32) callconv(.AAPCS) f32 {
- @setRuntimeSafety(false);
- return @call(.{ .modifier = .always_inline }, __subsf3, .{ a, b });
-}
-
-pub fn __aeabi_dsub(a: f64, b: f64) callconv(.AAPCS) f64 {
- @setRuntimeSafety(false);
- return @call(.{ .modifier = .always_inline }, __subdf3, .{ a, b });
-}
-// TODO: restore inline keyword, see: https://github.com/ziglang/zig/issues/2154
-pub fn addXf3(comptime T: type, a: T, b: T) T {
+/// Ported from:
+///
+/// https://github.com/llvm/llvm-project/blob/02d85149a05cb1f6dc49f0ba7a2ceca53718ae17/compiler-rt/lib/builtins/fp_add_impl.inc
+pub inline fn addf3(comptime T: type, a: T, b: T) T {
const bits = @typeInfo(T).Float.bits;
const Z = std.meta.Int(.unsigned, bits);
const S = std.meta.Int(.unsigned, bits - @clz(Z, @as(Z, bits) - 1));
@@ -267,5 +168,5 @@ pub fn addXf3(comptime T: type, a: T, b: T) T {
}
test {
- _ = @import("addXf3_test.zig");
+ _ = @import("addf3_test.zig");
}
lib/compiler_rt/addXf3_test.zig → lib/compiler_rt/addf3_test.zig
@@ -7,7 +7,7 @@ const std = @import("std");
const math = std.math;
const qnan128 = @bitCast(f128, @as(u128, 0x7fff800000000000) << 64);
-const __addtf3 = @import("addXf3.zig").__addtf3;
+const __addtf3 = @import("addf3.zig").__addtf3;
fn test__addtf3(a: f128, b: f128, expected_hi: u64, expected_lo: u64) !void {
const x = __addtf3(a, b);
@@ -48,7 +48,7 @@ test "addtf3" {
try test__addtf3(0x1.edcba52449872455634654321fp-1, 0x1.23456734245345543849abcdefp+5, 0x40042afc95c8b579, 0x61e58dd6c51eb77c);
}
-const __subtf3 = @import("addXf3.zig").__subtf3;
+const __subtf3 = @import("addf3.zig").__subtf3;
fn test__subtf3(a: f128, b: f128, expected_hi: u64, expected_lo: u64) !void {
const x = __subtf3(a, b);
@@ -87,7 +87,7 @@ test "subtf3" {
try test__subtf3(0x1.ee9d7c52354a6936ab8d7654321fp-1, 0x1.234567829a3bcdef5678ade36734p+5, 0xc0041b8af1915166, 0xa44a7bca780a166c);
}
-const __addxf3 = @import("addXf3.zig").__addxf3;
+const __addxf3 = @import("addf3.zig").__addxf3;
const qnan80 = @bitCast(f80, @bitCast(u80, math.nan(f80)) | (1 << (math.floatFractionalBits(f80) - 1)));
fn test__addxf3(a: f80, b: f80, expected: u80) !void {
lib/compiler_rt/addsf3.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const addf3 = @import("./addf3.zig").addf3;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_fadd, .{ .name = "__aeabi_fadd", .linkage = common.linkage });
+ } else {
+ @export(__addsf3, .{ .name = "__addsf3", .linkage = common.linkage });
+ }
+}
+
+fn __addsf3(a: f32, b: f32) callconv(.C) f32 {
+ return addf3(f32, a, b);
+}
+
+fn __aeabi_fadd(a: f32, b: f32) callconv(.AAPCS) f32 {
+ return addf3(f32, a, b);
+}
lib/compiler_rt/addtf3.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const addf3 = @import("./addf3.zig").addf3;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__addkf3, .{ .name = "__addkf3", .linkage = common.linkage });
+ } else {
+ @export(__addtf3, .{ .name = "__addtf3", .linkage = common.linkage });
+ }
+}
+
+fn __addtf3(a: f128, b: f128) callconv(.C) f128 {
+ return addf3(f128, a, b);
+}
+
+fn __addkf3(a: f128, b: f128) callconv(.C) f128 {
+ return addf3(f128, a, b);
+}
lib/compiler_rt/addxf3.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const addf3 = @import("./addf3.zig").addf3;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__addxf3, .{ .name = "__addxf3", .linkage = common.linkage });
+}
+
+fn __addxf3(a: f80, b: f80) callconv(.C) f80 {
+ return addf3(f80, a, b);
+}
lib/compiler_rt/cmpdf2.zig
@@ -0,0 +1,68 @@
+///! The quoted behavior definitions are from
+///! https://gcc.gnu.org/onlinedocs/gcc-12.1.0/gccint/Soft-float-library-routines.html#Soft-float-library-routines
+const common = @import("./common.zig");
+const comparef = @import("./comparef.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_dcmpeq, .{ .name = "__aeabi_dcmpeq", .linkage = common.linkage });
+ @export(__aeabi_dcmplt, .{ .name = "__aeabi_dcmplt", .linkage = common.linkage });
+ @export(__aeabi_dcmple, .{ .name = "__aeabi_dcmple", .linkage = common.linkage });
+ } else {
+ @export(__eqdf2, .{ .name = "__eqdf2", .linkage = common.linkage });
+ @export(__nedf2, .{ .name = "__nedf2", .linkage = common.linkage });
+ @export(__ledf2, .{ .name = "__ledf2", .linkage = common.linkage });
+ @export(__cmpdf2, .{ .name = "__cmpdf2", .linkage = common.linkage });
+ @export(__ltdf2, .{ .name = "__ltdf2", .linkage = common.linkage });
+ }
+}
+
+/// "These functions calculate a <=> b. That is, if a is less than b, they return -1;
+/// if a is greater than b, they return 1; and if a and b are equal they return 0.
+/// If either argument is NaN they return 1..."
+///
+/// Note that this matches the definition of `__ledf2`, `__eqdf2`, `__nedf2`, `__cmpdf2`,
+/// and `__ltdf2`.
+fn __cmpdf2(a: f64, b: f64) callconv(.C) i32 {
+ return @enumToInt(comparef.cmpf2(f64, comparef.LE, a, b));
+}
+
+/// "These functions return a value less than or equal to zero if neither argument is NaN,
+/// and a is less than or equal to b."
+fn __ledf2(a: f64, b: f64) callconv(.C) i32 {
+ return __cmpdf2(a, b);
+}
+
+/// "These functions return zero if neither argument is NaN, and a and b are equal."
+/// Note that due to some kind of historical accident, __eqdf2 and __nedf2 are defined
+/// to have the same return value.
+fn __eqdf2(a: f64, b: f64) callconv(.C) i32 {
+ return __cmpdf2(a, b);
+}
+
+/// "These functions return a nonzero value if either argument is NaN, or if a and b are unequal."
+/// Note that due to some kind of historical accident, __eqdf2 and __nedf2 are defined
+/// to have the same return value.
+fn __nedf2(a: f64, b: f64) callconv(.C) i32 {
+ return __cmpdf2(a, b);
+}
+
+/// "These functions return a value less than zero if neither argument is NaN, and a
+/// is strictly less than b."
+fn __ltdf2(a: f64, b: f64) callconv(.C) i32 {
+ return __cmpdf2(a, b);
+}
+
+fn __aeabi_dcmpeq(a: f64, b: f64) callconv(.AAPCS) i32 {
+ return @boolToInt(comparef.cmpf2(f64, comparef.LE, a, b) == .Equal);
+}
+
+fn __aeabi_dcmplt(a: f64, b: f64) callconv(.AAPCS) i32 {
+ return @boolToInt(comparef.cmpf2(f64, comparef.LE, a, b) == .Less);
+}
+
+fn __aeabi_dcmple(a: f64, b: f64) callconv(.AAPCS) i32 {
+ return @boolToInt(comparef.cmpf2(f64, comparef.LE, a, b) != .Greater);
+}
lib/compiler_rt/cmpsf2.zig
@@ -0,0 +1,68 @@
+///! The quoted behavior definitions are from
+///! https://gcc.gnu.org/onlinedocs/gcc-12.1.0/gccint/Soft-float-library-routines.html#Soft-float-library-routines
+const common = @import("./common.zig");
+const comparef = @import("./comparef.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_fcmpeq, .{ .name = "__aeabi_fcmpeq", .linkage = common.linkage });
+ @export(__aeabi_fcmplt, .{ .name = "__aeabi_fcmplt", .linkage = common.linkage });
+ @export(__aeabi_fcmple, .{ .name = "__aeabi_fcmple", .linkage = common.linkage });
+ } else {
+ @export(__eqsf2, .{ .name = "__eqsf2", .linkage = common.linkage });
+ @export(__nesf2, .{ .name = "__nesf2", .linkage = common.linkage });
+ @export(__lesf2, .{ .name = "__lesf2", .linkage = common.linkage });
+ @export(__cmpsf2, .{ .name = "__cmpsf2", .linkage = common.linkage });
+ @export(__ltsf2, .{ .name = "__ltsf2", .linkage = common.linkage });
+ }
+}
+
+/// "These functions calculate a <=> b. That is, if a is less than b, they return -1;
+/// if a is greater than b, they return 1; and if a and b are equal they return 0.
+/// If either argument is NaN they return 1..."
+///
+/// Note that this matches the definition of `__lesf2`, `__eqsf2`, `__nesf2`, `__cmpsf2`,
+/// and `__ltsf2`.
+fn __cmpsf2(a: f32, b: f32) callconv(.C) i32 {
+ return @enumToInt(comparef.cmpf2(f32, comparef.LE, a, b));
+}
+
+/// "These functions return a value less than or equal to zero if neither argument is NaN,
+/// and a is less than or equal to b."
+fn __lesf2(a: f32, b: f32) callconv(.C) i32 {
+ return __cmpsf2(a, b);
+}
+
+/// "These functions return zero if neither argument is NaN, and a and b are equal."
+/// Note that due to some kind of historical accident, __eqsf2 and __nesf2 are defined
+/// to have the same return value.
+fn __eqsf2(a: f32, b: f32) callconv(.C) i32 {
+ return __cmpsf2(a, b);
+}
+
+/// "These functions return a nonzero value if either argument is NaN, or if a and b are unequal."
+/// Note that due to some kind of historical accident, __eqsf2 and __nesf2 are defined
+/// to have the same return value.
+fn __nesf2(a: f32, b: f32) callconv(.C) i32 {
+ return __cmpsf2(a, b);
+}
+
+/// "These functions return a value less than zero if neither argument is NaN, and a
+/// is strictly less than b."
+fn __ltsf2(a: f32, b: f32) callconv(.C) i32 {
+ return __cmpsf2(a, b);
+}
+
+fn __aeabi_fcmpeq(a: f32, b: f32) callconv(.AAPCS) i32 {
+ return @boolToInt(comparef.cmpf2(f32, comparef.LE, a, b) == .Equal);
+}
+
+fn __aeabi_fcmplt(a: f32, b: f32) callconv(.AAPCS) i32 {
+ return @boolToInt(comparef.cmpf2(f32, comparef.LE, a, b) == .Less);
+}
+
+fn __aeabi_fcmple(a: f32, b: f32) callconv(.AAPCS) i32 {
+ return @boolToInt(comparef.cmpf2(f32, comparef.LE, a, b) != .Greater);
+}
lib/compiler_rt/cmptf2.zig
@@ -0,0 +1,73 @@
+///! The quoted behavior definitions are from
+///! https://gcc.gnu.org/onlinedocs/gcc-12.1.0/gccint/Soft-float-library-routines.html#Soft-float-library-routines
+const common = @import("./common.zig");
+const comparef = @import("./comparef.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__eqkf2, .{ .name = "__eqkf2", .linkage = common.linkage });
+ @export(__nekf2, .{ .name = "__nekf2", .linkage = common.linkage });
+ @export(__ltkf2, .{ .name = "__ltkf2", .linkage = common.linkage });
+ @export(__lekf2, .{ .name = "__lekf2", .linkage = common.linkage });
+ } else {
+ @export(__eqtf2, .{ .name = "__eqtf2", .linkage = common.linkage });
+ @export(__netf2, .{ .name = "__netf2", .linkage = common.linkage });
+ @export(__letf2, .{ .name = "__letf2", .linkage = common.linkage });
+ @export(__cmptf2, .{ .name = "__cmptf2", .linkage = common.linkage });
+ @export(__lttf2, .{ .name = "__lttf2", .linkage = common.linkage });
+ }
+}
+
+/// "These functions calculate a <=> b. That is, if a is less than b, they return -1;
+/// if a is greater than b, they return 1; and if a and b are equal they return 0.
+/// If either argument is NaN they return 1..."
+///
+/// Note that this matches the definition of `__letf2`, `__eqtf2`, `__netf2`, `__cmptf2`,
+/// and `__lttf2`.
+fn __cmptf2(a: f128, b: f128) callconv(.C) i32 {
+ return @enumToInt(comparef.cmpf2(f128, comparef.LE, a, b));
+}
+
+/// "These functions return a value less than or equal to zero if neither argument is NaN,
+/// and a is less than or equal to b."
+fn __letf2(a: f128, b: f128) callconv(.C) i32 {
+ return __cmptf2(a, b);
+}
+
+/// "These functions return zero if neither argument is NaN, and a and b are equal."
+/// Note that due to some kind of historical accident, __eqtf2 and __netf2 are defined
+/// to have the same return value.
+fn __eqtf2(a: f128, b: f128) callconv(.C) i32 {
+ return __cmptf2(a, b);
+}
+
+/// "These functions return a nonzero value if either argument is NaN, or if a and b are unequal."
+/// Note that due to some kind of historical accident, __eqtf2 and __netf2 are defined
+/// to have the same return value.
+fn __netf2(a: f128, b: f128) callconv(.C) i32 {
+ return __cmptf2(a, b);
+}
+
+/// "These functions return a value less than zero if neither argument is NaN, and a
+/// is strictly less than b."
+fn __lttf2(a: f128, b: f128) callconv(.C) i32 {
+ return __cmptf2(a, b);
+}
+
+fn __eqkf2(a: f128, b: f128) callconv(.C) i32 {
+ return __cmptf2(a, b);
+}
+
+fn __nekf2(a: f128, b: f128) callconv(.C) i32 {
+ return __cmptf2(a, b);
+}
+
+fn __ltkf2(a: f128, b: f128) callconv(.C) i32 {
+ return __cmptf2(a, b);
+}
+
+fn __lekf2(a: f128, b: f128) callconv(.C) i32 {
+ return __cmptf2(a, b);
+}
lib/compiler_rt/cmpxf2.zig
@@ -0,0 +1,50 @@
+///! The quoted behavior definitions are from
+///! https://gcc.gnu.org/onlinedocs/gcc-12.1.0/gccint/Soft-float-library-routines.html#Soft-float-library-routines
+const common = @import("./common.zig");
+const comparef = @import("./comparef.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__eqxf2, .{ .name = "__eqxf2", .linkage = common.linkage });
+ @export(__nexf2, .{ .name = "__nexf2", .linkage = common.linkage });
+ @export(__lexf2, .{ .name = "__lexf2", .linkage = common.linkage });
+ @export(__cmpxf2, .{ .name = "__cmpxf2", .linkage = common.linkage });
+ @export(__ltxf2, .{ .name = "__ltxf2", .linkage = common.linkage });
+}
+
+/// "These functions calculate a <=> b. That is, if a is less than b, they return -1;
+/// if a is greater than b, they return 1; and if a and b are equal they return 0.
+/// If either argument is NaN they return 1..."
+///
+/// Note that this matches the definition of `__lexf2`, `__eqxf2`, `__nexf2`, `__cmpxf2`,
+/// and `__ltxf2`.
+fn __cmpxf2(a: f80, b: f80) callconv(.C) i32 {
+ return @enumToInt(comparef.cmp_f80(comparef.LE, a, b));
+}
+
+/// "These functions return a value less than or equal to zero if neither argument is NaN,
+/// and a is less than or equal to b."
+fn __lexf2(a: f80, b: f80) callconv(.C) i32 {
+ return __cmpxf2(a, b);
+}
+
+/// "These functions return zero if neither argument is NaN, and a and b are equal."
+/// Note that due to some kind of historical accident, __eqxf2 and __nexf2 are defined
+/// to have the same return value.
+fn __eqxf2(a: f80, b: f80) callconv(.C) i32 {
+ return __cmpxf2(a, b);
+}
+
+/// "These functions return a nonzero value if either argument is NaN, or if a and b are unequal."
+/// Note that due to some kind of historical accident, __eqxf2 and __nexf2 are defined
+/// to have the same return value.
+fn __nexf2(a: f80, b: f80) callconv(.C) i32 {
+ return __cmpxf2(a, b);
+}
+
+/// "These functions return a value less than zero if neither argument is NaN, and a
+/// is strictly less than b."
+fn __ltxf2(a: f80, b: f80) callconv(.C) i32 {
+ return __cmpxf2(a, b);
+}
lib/compiler_rt/common.zig
@@ -3,8 +3,14 @@ const builtin = @import("builtin");
const math = std.math;
const is_test = builtin.is_test;
+pub const linkage: std.builtin.GlobalLinkage = if (builtin.is_test) .Internal else .Weak;
+pub const want_aeabi = builtin.cpu.arch.isARM() or builtin.cpu.arch.isThumb();
+pub const want_ppc_abi = builtin.cpu.arch.isPPC() or builtin.cpu.arch.isPPC64();
+pub const want_msvc_abi = builtin.abi == .msvc;
+pub const want_gnu_abi = builtin.abi.isGnu();
+
// Avoid dragging in the runtime safety mechanisms into this .o file,
-// unless we're trying to test this file.
+// unless we're trying to test compiler-rt.
pub fn panic(msg: []const u8, error_return_trace: ?*std.builtin.StackTrace) noreturn {
_ = error_return_trace;
@setCold(true);
@@ -15,8 +21,13 @@ pub fn panic(msg: []const u8, error_return_trace: ?*std.builtin.StackTrace) nore
}
}
+/// AArch64 is the only ABI (at the moment) to support f16 arguments without the
+/// need for extending them to wider fp types.
+/// TODO remove this; do this type selection in the language rather than
+/// here in compiler-rt.
+pub const F16T = if (builtin.cpu.arch.isAARCH64()) f16 else u16;
+
pub fn wideMultiply(comptime Z: type, a: Z, b: Z, hi: *Z, lo: *Z) void {
- @setRuntimeSafety(is_test);
switch (Z) {
u16 => {
// 16x16 --> 32 bit multiply
@@ -130,15 +141,11 @@ pub fn wideMultiply(comptime Z: type, a: Z, b: Z, hi: *Z, lo: *Z) void {
}
}
-// TODO: restore inline keyword, see: https://github.com/ziglang/zig/issues/2154
pub fn normalize(comptime T: type, significand: *std.meta.Int(.unsigned, @typeInfo(T).Float.bits)) i32 {
- const bits = @typeInfo(T).Float.bits;
- const Z = std.meta.Int(.unsigned, bits);
- const S = std.meta.Int(.unsigned, bits - @clz(Z, @as(Z, bits) - 1));
- const fractionalBits = math.floatFractionalBits(T);
- const integerBit = @as(Z, 1) << fractionalBits;
+ const Z = std.meta.Int(.unsigned, @typeInfo(T).Float.bits);
+ const integerBit = @as(Z, 1) << std.math.floatFractionalBits(T);
- const shift = @clz(std.meta.Int(.unsigned, bits), significand.*) - @clz(Z, integerBit);
- significand.* <<= @intCast(S, shift);
+ const shift = @clz(Z, significand.*) - @clz(Z, integerBit);
+ significand.* <<= @intCast(std.math.Log2Int(Z), shift);
return @as(i32, 1) - shift;
}
lib/compiler_rt/comparef.zig
@@ -0,0 +1,118 @@
+const std = @import("std");
+
+pub const LE = enum(i32) {
+ Less = -1,
+ Equal = 0,
+ Greater = 1,
+
+ const Unordered: LE = .Greater;
+};
+
+pub const GE = enum(i32) {
+ Less = -1,
+ Equal = 0,
+ Greater = 1,
+
+ const Unordered: GE = .Less;
+};
+
+pub inline fn cmpf2(comptime T: type, comptime RT: type, a: T, b: T) RT {
+ const bits = @typeInfo(T).Float.bits;
+ const srep_t = std.meta.Int(.signed, bits);
+ const rep_t = std.meta.Int(.unsigned, bits);
+
+ const significandBits = std.math.floatMantissaBits(T);
+ const exponentBits = std.math.floatExponentBits(T);
+ const signBit = (@as(rep_t, 1) << (significandBits + exponentBits));
+ const absMask = signBit - 1;
+ const infT = comptime std.math.inf(T);
+ const infRep = @bitCast(rep_t, infT);
+
+ const aInt = @bitCast(srep_t, a);
+ const bInt = @bitCast(srep_t, b);
+ const aAbs = @bitCast(rep_t, aInt) & absMask;
+ const bAbs = @bitCast(rep_t, bInt) & absMask;
+
+ // If either a or b is NaN, they are unordered.
+ if (aAbs > infRep or bAbs > infRep) return RT.Unordered;
+
+ // If a and b are both zeros, they are equal.
+ if ((aAbs | bAbs) == 0) return .Equal;
+
+ // If at least one of a and b is positive, we get the same result comparing
+ // a and b as signed integers as we would with a floating-point compare.
+ if ((aInt & bInt) >= 0) {
+ if (aInt < bInt) {
+ return .Less;
+ } else if (aInt == bInt) {
+ return .Equal;
+ } else return .Greater;
+ } else {
+ // Otherwise, both are negative, so we need to flip the sense of the
+ // comparison to get the correct result. (This assumes a twos- or ones-
+ // complement integer representation; if integers are represented in a
+ // sign-magnitude representation, then this flip is incorrect).
+ if (aInt > bInt) {
+ return .Less;
+ } else if (aInt == bInt) {
+ return .Equal;
+ } else return .Greater;
+ }
+}
+
+pub inline fn cmp_f80(comptime RT: type, a: f80, b: f80) RT {
+ const a_rep = std.math.break_f80(a);
+ const b_rep = std.math.break_f80(b);
+ const sig_bits = std.math.floatMantissaBits(f80);
+ const int_bit = 0x8000000000000000;
+ const sign_bit = 0x8000;
+ const special_exp = 0x7FFF;
+
+ // If either a or b is NaN, they are unordered.
+ if ((a_rep.exp & special_exp == special_exp and a_rep.fraction ^ int_bit != 0) or
+ (b_rep.exp & special_exp == special_exp and b_rep.fraction ^ int_bit != 0))
+ return RT.Unordered;
+
+ // If a and b are both zeros, they are equal.
+ if ((a_rep.fraction | b_rep.fraction) | ((a_rep.exp | b_rep.exp) & special_exp) == 0)
+ return .Equal;
+
+ if (@boolToInt(a_rep.exp == b_rep.exp) & @boolToInt(a_rep.fraction == b_rep.fraction) != 0) {
+ return .Equal;
+ } else if (a_rep.exp & sign_bit != b_rep.exp & sign_bit) {
+ // signs are different
+ if (@bitCast(i16, a_rep.exp) < @bitCast(i16, b_rep.exp)) {
+ return .Less;
+ } else {
+ return .Greater;
+ }
+ } else {
+ const a_fraction = a_rep.fraction | (@as(u80, a_rep.exp) << sig_bits);
+ const b_fraction = b_rep.fraction | (@as(u80, b_rep.exp) << sig_bits);
+ if (a_fraction < b_fraction) {
+ return .Less;
+ } else {
+ return .Greater;
+ }
+ }
+}
+
+pub inline fn unordcmp(comptime T: type, a: T, b: T) i32 {
+ const rep_t = std.meta.Int(.unsigned, @typeInfo(T).Float.bits);
+
+ const significandBits = std.math.floatMantissaBits(T);
+ const exponentBits = std.math.floatExponentBits(T);
+ const signBit = (@as(rep_t, 1) << (significandBits + exponentBits));
+ const absMask = signBit - 1;
+ const infRep = @bitCast(rep_t, std.math.inf(T));
+
+ const aAbs: rep_t = @bitCast(rep_t, a) & absMask;
+ const bAbs: rep_t = @bitCast(rep_t, b) & absMask;
+
+ return @boolToInt(aAbs > infRep or bAbs > infRep);
+}
+
+test {
+ _ = @import("comparesf2_test.zig");
+ _ = @import("comparedf2_test.zig");
+}
lib/compiler_rt/compareXf2.zig
@@ -1,440 +0,0 @@
-// Ported from:
-//
-// https://github.com/llvm/llvm-project/commit/d674d96bc56c0f377879d01c9d8dfdaaa7859cdb/compiler-rt/lib/builtins/comparesf2.c
-
-const std = @import("std");
-const builtin = @import("builtin");
-const is_test = builtin.is_test;
-const arch = builtin.cpu.arch;
-const linkage: std.builtin.GlobalLinkage = if (builtin.is_test) .Internal else .Weak;
-pub const panic = @import("common.zig").panic;
-
-comptime {
- @export(__lesf2, .{ .name = "__lesf2", .linkage = linkage });
- @export(__ledf2, .{ .name = "__ledf2", .linkage = linkage });
- @export(__letf2, .{ .name = "__letf2", .linkage = linkage });
- @export(__lexf2, .{ .name = "__lexf2", .linkage = linkage });
-
- @export(__gesf2, .{ .name = "__gesf2", .linkage = linkage });
- @export(__gedf2, .{ .name = "__gedf2", .linkage = linkage });
- @export(__getf2, .{ .name = "__getf2", .linkage = linkage });
- @export(__gexf2, .{ .name = "__gexf2", .linkage = linkage });
-
- @export(__eqsf2, .{ .name = "__eqsf2", .linkage = linkage });
- @export(__eqdf2, .{ .name = "__eqdf2", .linkage = linkage });
- @export(__eqxf2, .{ .name = "__eqxf2", .linkage = linkage });
-
- @export(__ltsf2, .{ .name = "__ltsf2", .linkage = linkage });
- @export(__ltdf2, .{ .name = "__ltdf2", .linkage = linkage });
- @export(__ltxf2, .{ .name = "__ltxf2", .linkage = linkage });
-
- @export(__nesf2, .{ .name = "__nesf2", .linkage = linkage });
- @export(__nedf2, .{ .name = "__nedf2", .linkage = linkage });
- @export(__nexf2, .{ .name = "__nexf2", .linkage = linkage });
-
- @export(__gtsf2, .{ .name = "__gtsf2", .linkage = linkage });
- @export(__gtdf2, .{ .name = "__gtdf2", .linkage = linkage });
- @export(__gtxf2, .{ .name = "__gtxf2", .linkage = linkage });
-
- @export(__unordsf2, .{ .name = "__unordsf2", .linkage = linkage });
- @export(__unorddf2, .{ .name = "__unorddf2", .linkage = linkage });
- @export(__unordtf2, .{ .name = "__unordtf2", .linkage = linkage });
-
- if (!is_test) {
- @export(__cmpsf2, .{ .name = "__cmpsf2", .linkage = linkage });
- @export(__cmpdf2, .{ .name = "__cmpdf2", .linkage = linkage });
- @export(__cmptf2, .{ .name = "__cmptf2", .linkage = linkage });
- @export(__eqtf2, .{ .name = "__eqtf2", .linkage = linkage });
- @export(__lttf2, .{ .name = "__lttf2", .linkage = linkage });
- @export(__gttf2, .{ .name = "__gttf2", .linkage = linkage });
- @export(__netf2, .{ .name = "__netf2", .linkage = linkage });
-
- if (arch.isARM() or arch.isThumb()) {
- @export(__aeabi_fcmpeq, .{ .name = "__aeabi_fcmpeq", .linkage = linkage });
- @export(__aeabi_fcmplt, .{ .name = "__aeabi_fcmplt", .linkage = linkage });
- @export(__aeabi_fcmple, .{ .name = "__aeabi_fcmple", .linkage = linkage });
- @export(__aeabi_fcmpge, .{ .name = "__aeabi_fcmpge", .linkage = linkage });
- @export(__aeabi_fcmpgt, .{ .name = "__aeabi_fcmpgt", .linkage = linkage });
- @export(__aeabi_fcmpun, .{ .name = "__aeabi_fcmpun", .linkage = linkage });
-
- @export(__aeabi_dcmpeq, .{ .name = "__aeabi_dcmpeq", .linkage = linkage });
- @export(__aeabi_dcmplt, .{ .name = "__aeabi_dcmplt", .linkage = linkage });
- @export(__aeabi_dcmple, .{ .name = "__aeabi_dcmple", .linkage = linkage });
- @export(__aeabi_dcmpge, .{ .name = "__aeabi_dcmpge", .linkage = linkage });
- @export(__aeabi_dcmpgt, .{ .name = "__aeabi_dcmpgt", .linkage = linkage });
- @export(__aeabi_dcmpun, .{ .name = "__aeabi_dcmpun", .linkage = linkage });
- }
-
- if (arch.isPPC() or arch.isPPC64()) {
- @export(__eqkf2, .{ .name = "__eqkf2", .linkage = linkage });
- @export(__nekf2, .{ .name = "__nekf2", .linkage = linkage });
- @export(__gekf2, .{ .name = "__gekf2", .linkage = linkage });
- @export(__ltkf2, .{ .name = "__ltkf2", .linkage = linkage });
- @export(__lekf2, .{ .name = "__lekf2", .linkage = linkage });
- @export(__gtkf2, .{ .name = "__gtkf2", .linkage = linkage });
- @export(__unordkf2, .{ .name = "__unordkf2", .linkage = linkage });
- }
- }
-}
-
-const LE = enum(i32) {
- Less = -1,
- Equal = 0,
- Greater = 1,
-
- const Unordered: LE = .Greater;
-};
-
-const GE = enum(i32) {
- Less = -1,
- Equal = 0,
- Greater = 1,
-
- const Unordered: GE = .Less;
-};
-
-pub inline fn cmp(comptime T: type, comptime RT: type, a: T, b: T) RT {
- @setRuntimeSafety(builtin.is_test);
-
- const bits = @typeInfo(T).Float.bits;
- const srep_t = std.meta.Int(.signed, bits);
- const rep_t = std.meta.Int(.unsigned, bits);
-
- const significandBits = std.math.floatMantissaBits(T);
- const exponentBits = std.math.floatExponentBits(T);
- const signBit = (@as(rep_t, 1) << (significandBits + exponentBits));
- const absMask = signBit - 1;
- const infT = comptime std.math.inf(T);
- const infRep = @bitCast(rep_t, infT);
-
- const aInt = @bitCast(srep_t, a);
- const bInt = @bitCast(srep_t, b);
- const aAbs = @bitCast(rep_t, aInt) & absMask;
- const bAbs = @bitCast(rep_t, bInt) & absMask;
-
- // If either a or b is NaN, they are unordered.
- if (aAbs > infRep or bAbs > infRep) return RT.Unordered;
-
- // If a and b are both zeros, they are equal.
- if ((aAbs | bAbs) == 0) return .Equal;
-
- // If at least one of a and b is positive, we get the same result comparing
- // a and b as signed integers as we would with a floating-point compare.
- if ((aInt & bInt) >= 0) {
- if (aInt < bInt) {
- return .Less;
- } else if (aInt == bInt) {
- return .Equal;
- } else return .Greater;
- } else {
- // Otherwise, both are negative, so we need to flip the sense of the
- // comparison to get the correct result. (This assumes a twos- or ones-
- // complement integer representation; if integers are represented in a
- // sign-magnitude representation, then this flip is incorrect).
- if (aInt > bInt) {
- return .Less;
- } else if (aInt == bInt) {
- return .Equal;
- } else return .Greater;
- }
-}
-
-pub inline fn unordcmp(comptime T: type, a: T, b: T) i32 {
- @setRuntimeSafety(builtin.is_test);
-
- const rep_t = std.meta.Int(.unsigned, @typeInfo(T).Float.bits);
-
- const significandBits = std.math.floatMantissaBits(T);
- const exponentBits = std.math.floatExponentBits(T);
- const signBit = (@as(rep_t, 1) << (significandBits + exponentBits));
- const absMask = signBit - 1;
- const infRep = @bitCast(rep_t, std.math.inf(T));
-
- const aAbs: rep_t = @bitCast(rep_t, a) & absMask;
- const bAbs: rep_t = @bitCast(rep_t, b) & absMask;
-
- return @boolToInt(aAbs > infRep or bAbs > infRep);
-}
-
-// Comparison between f32
-
-pub fn __lesf2(a: f32, b: f32) callconv(.C) i32 {
- @setRuntimeSafety(builtin.is_test);
- const float = cmp(f32, LE, a, b);
- return @bitCast(i32, float);
-}
-
-pub fn __gesf2(a: f32, b: f32) callconv(.C) i32 {
- @setRuntimeSafety(builtin.is_test);
- const float = cmp(f32, GE, a, b);
- return @bitCast(i32, float);
-}
-
-pub fn __cmpsf2(a: f32, b: f32) callconv(.C) i32 {
- return @call(.{ .modifier = .always_inline }, __lesf2, .{ a, b });
-}
-
-pub fn __eqsf2(a: f32, b: f32) callconv(.C) i32 {
- return @call(.{ .modifier = .always_inline }, __lesf2, .{ a, b });
-}
-
-pub fn __ltsf2(a: f32, b: f32) callconv(.C) i32 {
- return @call(.{ .modifier = .always_inline }, __lesf2, .{ a, b });
-}
-
-pub fn __nesf2(a: f32, b: f32) callconv(.C) i32 {
- return @call(.{ .modifier = .always_inline }, __lesf2, .{ a, b });
-}
-
-pub fn __gtsf2(a: f32, b: f32) callconv(.C) i32 {
- return @call(.{ .modifier = .always_inline }, __gesf2, .{ a, b });
-}
-
-// Comparison between f64
-
-pub fn __ledf2(a: f64, b: f64) callconv(.C) i32 {
- @setRuntimeSafety(builtin.is_test);
- const float = cmp(f64, LE, a, b);
- return @bitCast(i32, float);
-}
-
-pub fn __gedf2(a: f64, b: f64) callconv(.C) i32 {
- @setRuntimeSafety(builtin.is_test);
- const float = cmp(f64, GE, a, b);
- return @bitCast(i32, float);
-}
-
-pub fn __cmpdf2(a: f64, b: f64) callconv(.C) i32 {
- return @call(.{ .modifier = .always_inline }, __ledf2, .{ a, b });
-}
-
-pub fn __eqdf2(a: f64, b: f64) callconv(.C) i32 {
- return @call(.{ .modifier = .always_inline }, __ledf2, .{ a, b });
-}
-
-pub fn __ltdf2(a: f64, b: f64) callconv(.C) i32 {
- return @call(.{ .modifier = .always_inline }, __ledf2, .{ a, b });
-}
-
-pub fn __nedf2(a: f64, b: f64) callconv(.C) i32 {
- return @call(.{ .modifier = .always_inline }, __ledf2, .{ a, b });
-}
-
-pub fn __gtdf2(a: f64, b: f64) callconv(.C) i32 {
- return @call(.{ .modifier = .always_inline }, __gedf2, .{ a, b });
-}
-
-// Comparison between f80
-
-pub inline fn cmp_f80(comptime RT: type, a: f80, b: f80) RT {
- const a_rep = std.math.break_f80(a);
- const b_rep = std.math.break_f80(b);
- const sig_bits = std.math.floatMantissaBits(f80);
- const int_bit = 0x8000000000000000;
- const sign_bit = 0x8000;
- const special_exp = 0x7FFF;
-
- // If either a or b is NaN, they are unordered.
- if ((a_rep.exp & special_exp == special_exp and a_rep.fraction ^ int_bit != 0) or
- (b_rep.exp & special_exp == special_exp and b_rep.fraction ^ int_bit != 0))
- return RT.Unordered;
-
- // If a and b are both zeros, they are equal.
- if ((a_rep.fraction | b_rep.fraction) | ((a_rep.exp | b_rep.exp) & special_exp) == 0)
- return .Equal;
-
- if (@boolToInt(a_rep.exp == b_rep.exp) & @boolToInt(a_rep.fraction == b_rep.fraction) != 0) {
- return .Equal;
- } else if (a_rep.exp & sign_bit != b_rep.exp & sign_bit) {
- // signs are different
- if (@bitCast(i16, a_rep.exp) < @bitCast(i16, b_rep.exp)) {
- return .Less;
- } else {
- return .Greater;
- }
- } else {
- const a_fraction = a_rep.fraction | (@as(u80, a_rep.exp) << sig_bits);
- const b_fraction = b_rep.fraction | (@as(u80, b_rep.exp) << sig_bits);
- if (a_fraction < b_fraction) {
- return .Less;
- } else {
- return .Greater;
- }
- }
-}
-
-pub fn __lexf2(a: f80, b: f80) callconv(.C) i32 {
- @setRuntimeSafety(builtin.is_test);
- const float = cmp_f80(LE, a, b);
- return @bitCast(i32, float);
-}
-
-pub fn __gexf2(a: f80, b: f80) callconv(.C) i32 {
- @setRuntimeSafety(builtin.is_test);
- const float = cmp_f80(GE, a, b);
- return @bitCast(i32, float);
-}
-
-pub fn __eqxf2(a: f80, b: f80) callconv(.C) i32 {
- return @call(.{ .modifier = .always_inline }, __lexf2, .{ a, b });
-}
-
-pub fn __ltxf2(a: f80, b: f80) callconv(.C) i32 {
- return @call(.{ .modifier = .always_inline }, __lexf2, .{ a, b });
-}
-
-pub fn __nexf2(a: f80, b: f80) callconv(.C) i32 {
- return @call(.{ .modifier = .always_inline }, __lexf2, .{ a, b });
-}
-
-pub fn __gtxf2(a: f80, b: f80) callconv(.C) i32 {
- return @call(.{ .modifier = .always_inline }, __gexf2, .{ a, b });
-}
-
-// Comparison between f128
-
-pub fn __letf2(a: f128, b: f128) callconv(.C) i32 {
- @setRuntimeSafety(builtin.is_test);
- const float = cmp(f128, LE, a, b);
- return @bitCast(i32, float);
-}
-
-pub fn __getf2(a: f128, b: f128) callconv(.C) i32 {
- @setRuntimeSafety(builtin.is_test);
- const float = cmp(f128, GE, a, b);
- return @bitCast(i32, float);
-}
-
-pub fn __cmptf2(a: f128, b: f128) callconv(.C) i32 {
- return @call(.{ .modifier = .always_inline }, __letf2, .{ a, b });
-}
-
-pub fn __eqtf2(a: f128, b: f128) callconv(.C) i32 {
- return @call(.{ .modifier = .always_inline }, __letf2, .{ a, b });
-}
-
-pub fn __lttf2(a: f128, b: f128) callconv(.C) i32 {
- return @call(.{ .modifier = .always_inline }, __letf2, .{ a, b });
-}
-
-pub fn __netf2(a: f128, b: f128) callconv(.C) i32 {
- return @call(.{ .modifier = .always_inline }, __letf2, .{ a, b });
-}
-
-pub fn __gttf2(a: f128, b: f128) callconv(.C) i32 {
- return @call(.{ .modifier = .always_inline }, __getf2, .{ a, b });
-}
-
-pub fn __eqkf2(a: f128, b: f128) callconv(.C) i32 {
- return @call(.{ .modifier = .always_inline }, __letf2, .{ a, b });
-}
-
-pub fn __nekf2(a: f128, b: f128) callconv(.C) i32 {
- return @call(.{ .modifier = .always_inline }, __letf2, .{ a, b });
-}
-
-pub fn __gekf2(a: f128, b: f128) callconv(.C) i32 {
- return @call(.{ .modifier = .always_inline }, __getf2, .{ a, b });
-}
-
-pub fn __ltkf2(a: f128, b: f128) callconv(.C) i32 {
- return @call(.{ .modifier = .always_inline }, __letf2, .{ a, b });
-}
-
-pub fn __lekf2(a: f128, b: f128) callconv(.C) i32 {
- return @call(.{ .modifier = .always_inline }, __letf2, .{ a, b });
-}
-
-pub fn __gtkf2(a: f128, b: f128) callconv(.C) i32 {
- return @call(.{ .modifier = .always_inline }, __getf2, .{ a, b });
-}
-
-// Unordered comparison between f32/f64/f128
-
-pub fn __unordsf2(a: f32, b: f32) callconv(.C) i32 {
- @setRuntimeSafety(builtin.is_test);
- return unordcmp(f32, a, b);
-}
-
-pub fn __unorddf2(a: f64, b: f64) callconv(.C) i32 {
- @setRuntimeSafety(builtin.is_test);
- return unordcmp(f64, a, b);
-}
-
-pub fn __unordtf2(a: f128, b: f128) callconv(.C) i32 {
- @setRuntimeSafety(builtin.is_test);
- return unordcmp(f128, a, b);
-}
-
-pub fn __unordkf2(a: f128, b: f128) callconv(.C) i32 {
- return @call(.{ .modifier = .always_inline }, __unordtf2, .{ a, b });
-}
-
-// ARM EABI intrinsics
-
-pub fn __aeabi_fcmpeq(a: f32, b: f32) callconv(.AAPCS) i32 {
- @setRuntimeSafety(false);
- return @boolToInt(@call(.{ .modifier = .always_inline }, __eqsf2, .{ a, b }) == 0);
-}
-
-pub fn __aeabi_fcmplt(a: f32, b: f32) callconv(.AAPCS) i32 {
- @setRuntimeSafety(false);
- return @boolToInt(@call(.{ .modifier = .always_inline }, __ltsf2, .{ a, b }) < 0);
-}
-
-pub fn __aeabi_fcmple(a: f32, b: f32) callconv(.AAPCS) i32 {
- @setRuntimeSafety(false);
- return @boolToInt(@call(.{ .modifier = .always_inline }, __lesf2, .{ a, b }) <= 0);
-}
-
-pub fn __aeabi_fcmpge(a: f32, b: f32) callconv(.AAPCS) i32 {
- @setRuntimeSafety(false);
- return @boolToInt(@call(.{ .modifier = .always_inline }, __gesf2, .{ a, b }) >= 0);
-}
-
-pub fn __aeabi_fcmpgt(a: f32, b: f32) callconv(.AAPCS) i32 {
- @setRuntimeSafety(false);
- return @boolToInt(@call(.{ .modifier = .always_inline }, __gtsf2, .{ a, b }) > 0);
-}
-
-pub fn __aeabi_fcmpun(a: f32, b: f32) callconv(.AAPCS) i32 {
- @setRuntimeSafety(false);
- return @call(.{ .modifier = .always_inline }, __unordsf2, .{ a, b });
-}
-
-pub fn __aeabi_dcmpeq(a: f64, b: f64) callconv(.AAPCS) i32 {
- @setRuntimeSafety(false);
- return @boolToInt(@call(.{ .modifier = .always_inline }, __eqdf2, .{ a, b }) == 0);
-}
-
-pub fn __aeabi_dcmplt(a: f64, b: f64) callconv(.AAPCS) i32 {
- @setRuntimeSafety(false);
- return @boolToInt(@call(.{ .modifier = .always_inline }, __ltdf2, .{ a, b }) < 0);
-}
-
-pub fn __aeabi_dcmple(a: f64, b: f64) callconv(.AAPCS) i32 {
- @setRuntimeSafety(false);
- return @boolToInt(@call(.{ .modifier = .always_inline }, __ledf2, .{ a, b }) <= 0);
-}
-
-pub fn __aeabi_dcmpge(a: f64, b: f64) callconv(.AAPCS) i32 {
- @setRuntimeSafety(false);
- return @boolToInt(@call(.{ .modifier = .always_inline }, __gedf2, .{ a, b }) >= 0);
-}
-
-pub fn __aeabi_dcmpgt(a: f64, b: f64) callconv(.AAPCS) i32 {
- @setRuntimeSafety(false);
- return @boolToInt(@call(.{ .modifier = .always_inline }, __gtdf2, .{ a, b }) > 0);
-}
-
-pub fn __aeabi_dcmpun(a: f64, b: f64) callconv(.AAPCS) i32 {
- @setRuntimeSafety(false);
- return @call(.{ .modifier = .always_inline }, __unorddf2, .{ a, b });
-}
-
-test "comparesf2" {
- _ = @import("comparesf2_test.zig");
-}
-test "comparedf2" {
- _ = @import("comparedf2_test.zig");
-}
lib/compiler_rt/extend_f80.zig
@@ -1,140 +0,0 @@
-const std = @import("std");
-const builtin = @import("builtin");
-const is_test = builtin.is_test;
-const arch = builtin.cpu.arch;
-const linkage: std.builtin.GlobalLinkage = if (builtin.is_test) .Internal else .Weak;
-pub const panic = @import("common.zig").panic;
-
-comptime {
- @export(__extendhfxf2, .{ .name = "__extendhfxf2", .linkage = linkage });
- @export(__extendsfxf2, .{ .name = "__extendsfxf2", .linkage = linkage });
- @export(__extenddfxf2, .{ .name = "__extenddfxf2", .linkage = linkage });
- @export(__extendxftf2, .{ .name = "__extendxftf2", .linkage = linkage });
-}
-
-// AArch64 is the only ABI (at the moment) to support f16 arguments without the
-// need for extending them to wider fp types.
-const F16T = if (arch.isAARCH64()) f16 else u16;
-
-fn __extendhfxf2(a: F16T) callconv(.C) f80 {
- return extendF80(f16, @bitCast(u16, a));
-}
-
-fn __extendsfxf2(a: f32) callconv(.C) f80 {
- return extendF80(f32, @bitCast(u32, a));
-}
-
-fn __extenddfxf2(a: f64) callconv(.C) f80 {
- return extendF80(f64, @bitCast(u64, a));
-}
-
-inline fn extendF80(comptime src_t: type, a: std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits)) f80 {
- @setRuntimeSafety(builtin.is_test);
-
- const src_rep_t = std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits);
- const src_sig_bits = std.math.floatMantissaBits(src_t);
- const dst_int_bit = 0x8000000000000000;
- const dst_sig_bits = std.math.floatMantissaBits(f80) - 1; // -1 for the integer bit
-
- const dst_exp_bias = 16383;
-
- const src_bits = @bitSizeOf(src_t);
- const src_exp_bits = src_bits - src_sig_bits - 1;
- const src_inf_exp = (1 << src_exp_bits) - 1;
- const src_exp_bias = src_inf_exp >> 1;
-
- const src_min_normal = 1 << src_sig_bits;
- const src_inf = src_inf_exp << src_sig_bits;
- const src_sign_mask = 1 << (src_sig_bits + src_exp_bits);
- const src_abs_mask = src_sign_mask - 1;
- const src_qnan = 1 << (src_sig_bits - 1);
- const src_nan_code = src_qnan - 1;
-
- var dst: std.math.F80 = undefined;
-
- // Break a into a sign and representation of the absolute value
- const a_abs = a & src_abs_mask;
- const sign: u16 = if (a & src_sign_mask != 0) 0x8000 else 0;
-
- if (a_abs -% src_min_normal < src_inf - src_min_normal) {
- // a is a normal number.
- // Extend to the destination type by shifting the significand and
- // exponent into the proper position and rebiasing the exponent.
- dst.exp = @intCast(u16, a_abs >> src_sig_bits);
- dst.exp += dst_exp_bias - src_exp_bias;
- dst.fraction = @as(u64, a_abs) << (dst_sig_bits - src_sig_bits);
- dst.fraction |= dst_int_bit; // bit 64 is always set for normal numbers
- } else if (a_abs >= src_inf) {
- // a is NaN or infinity.
- // Conjure the result by beginning with infinity, then setting the qNaN
- // bit (if needed) and right-aligning the rest of the trailing NaN
- // payload field.
- dst.exp = 0x7fff;
- dst.fraction = dst_int_bit;
- dst.fraction |= @as(u64, a_abs & src_qnan) << (dst_sig_bits - src_sig_bits);
- dst.fraction |= @as(u64, a_abs & src_nan_code) << (dst_sig_bits - src_sig_bits);
- } else if (a_abs != 0) {
- // a is denormal.
- // renormalize the significand and clear the leading bit, then insert
- // the correct adjusted exponent in the destination type.
- const scale: u16 = @clz(src_rep_t, a_abs) -
- @clz(src_rep_t, @as(src_rep_t, src_min_normal));
-
- dst.fraction = @as(u64, a_abs) << @intCast(u6, dst_sig_bits - src_sig_bits + scale);
- dst.fraction |= dst_int_bit; // bit 64 is always set for normal numbers
- dst.exp = @truncate(u16, a_abs >> @intCast(u4, src_sig_bits - scale));
- dst.exp ^= 1;
- dst.exp |= dst_exp_bias - src_exp_bias - scale + 1;
- } else {
- // a is zero.
- dst.exp = 0;
- dst.fraction = 0;
- }
-
- dst.exp |= sign;
- return std.math.make_f80(dst);
-}
-
-fn __extendxftf2(a: f80) callconv(.C) f128 {
- @setRuntimeSafety(builtin.is_test);
-
- const src_int_bit: u64 = 0x8000000000000000;
- const src_sig_mask = ~src_int_bit;
- const src_sig_bits = std.math.floatMantissaBits(f80) - 1; // -1 for the integer bit
- const dst_sig_bits = std.math.floatMantissaBits(f128);
-
- const dst_bits = @bitSizeOf(f128);
-
- const dst_min_normal = @as(u128, 1) << dst_sig_bits;
-
- // Break a into a sign and representation of the absolute value
- var a_rep = std.math.break_f80(a);
- const sign = a_rep.exp & 0x8000;
- a_rep.exp &= 0x7FFF;
- var abs_result: u128 = undefined;
-
- if (a_rep.exp == 0 and a_rep.fraction == 0) {
- // zero
- abs_result = 0;
- } else if (a_rep.exp == 0x7FFF) {
- // a is nan or infinite
- abs_result = @as(u128, a_rep.fraction) << (dst_sig_bits - src_sig_bits);
- abs_result |= @as(u128, a_rep.exp) << dst_sig_bits;
- } else if (a_rep.fraction & src_int_bit != 0) {
- // a is a normal value
- abs_result = @as(u128, a_rep.fraction & src_sig_mask) << (dst_sig_bits - src_sig_bits);
- abs_result |= @as(u128, a_rep.exp) << dst_sig_bits;
- } else {
- // a is denormal
- // renormalize the significand and clear the leading bit and integer part,
- // then insert the correct adjusted exponent in the destination type.
- const scale: u32 = @clz(u64, a_rep.fraction);
- abs_result = @as(u128, a_rep.fraction) << @intCast(u7, dst_sig_bits - src_sig_bits + scale + 1);
- abs_result ^= dst_min_normal;
- abs_result |= @as(u128, scale + 1) << dst_sig_bits;
- }
-
- // Apply the signbit to (dst_t)abs(a).
- const result: u128 align(@alignOf(f128)) = abs_result | @as(u128, sign) << (dst_bits - 16);
- return @bitCast(f128, result);
-}
lib/compiler_rt/extenddftf2.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const extendf = @import("./extendf.zig").extendf;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__extenddfkf2, .{ .name = "__extenddfkf2", .linkage = common.linkage });
+ } else {
+ @export(__extenddftf2, .{ .name = "__extenddftf2", .linkage = common.linkage });
+ }
+}
+
+fn __extenddftf2(a: f64) callconv(.C) f128 {
+ return extendf(f128, f64, @bitCast(u64, a));
+}
+
+fn __extenddfkf2(a: f64) callconv(.C) f128 {
+ return extendf(f128, f64, @bitCast(u64, a));
+}
lib/compiler_rt/extenddfxf2.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const extend_f80 = @import("./extendf.zig").extend_f80;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__extenddfxf2, .{ .name = "__extenddfxf2", .linkage = common.linkage });
+}
+
+fn __extenddfxf2(a: f64) callconv(.C) f80 {
+ return extend_f80(f64, @bitCast(u64, a));
+}
lib/compiler_rt/extendXfYf2.zig → lib/compiler_rt/extendf.zig
@@ -1,81 +1,10 @@
const std = @import("std");
-const builtin = @import("builtin");
-const is_test = builtin.is_test;
-const arch = builtin.cpu.arch;
-const linkage: std.builtin.GlobalLinkage = if (builtin.is_test) .Internal else .Weak;
-pub const panic = @import("common.zig").panic;
-
-comptime {
- @export(__extenddftf2, .{ .name = "__extenddftf2", .linkage = linkage });
- @export(__extendsftf2, .{ .name = "__extendsftf2", .linkage = linkage });
- @export(__extendhfsf2, .{ .name = "__extendhfsf2", .linkage = linkage });
- @export(__extendhftf2, .{ .name = "__extendhftf2", .linkage = linkage });
- @export(__extendsfdf2, .{ .name = "__extendsfdf2", .linkage = linkage });
-
- if (!is_test) {
- @export(__gnu_h2f_ieee, .{ .name = "__gnu_h2f_ieee", .linkage = linkage });
-
- if (arch.isARM() or arch.isThumb()) {
- @export(__aeabi_f2d, .{ .name = "__aeabi_f2d", .linkage = linkage });
- @export(__aeabi_h2f, .{ .name = "__aeabi_h2f", .linkage = linkage });
- }
-
- if (arch.isPPC() or arch.isPPC64()) {
- @export(__extendsfkf2, .{ .name = "__extendsfkf2", .linkage = linkage });
- @export(__extenddfkf2, .{ .name = "__extenddfkf2", .linkage = linkage });
- }
- }
-}
-
-pub fn __extendsfdf2(a: f32) callconv(.C) f64 {
- return extendXfYf2(f64, f32, @bitCast(u32, a));
-}
-
-pub fn __extenddftf2(a: f64) callconv(.C) f128 {
- return extendXfYf2(f128, f64, @bitCast(u64, a));
-}
-
-pub fn __extenddfkf2(a: f64) callconv(.C) f128 {
- return @call(.{ .modifier = .always_inline }, __extenddftf2, .{a});
-}
-
-pub fn __extendsftf2(a: f32) callconv(.C) f128 {
- return extendXfYf2(f128, f32, @bitCast(u32, a));
-}
-
-pub fn __extendsfkf2(a: f32) callconv(.C) f128 {
- return @call(.{ .modifier = .always_inline }, __extendsftf2, .{a});
-}
-
-// AArch64 is the only ABI (at the moment) to support f16 arguments without the
-// need for extending them to wider fp types.
-pub const F16T = if (arch.isAARCH64()) f16 else u16;
-
-pub fn __extendhfsf2(a: F16T) callconv(.C) f32 {
- return extendXfYf2(f32, f16, @bitCast(u16, a));
-}
-
-pub fn __gnu_h2f_ieee(a: F16T) callconv(.C) f32 {
- return @call(.{ .modifier = .always_inline }, __extendhfsf2, .{a});
-}
-
-pub fn __extendhftf2(a: F16T) callconv(.C) f128 {
- return extendXfYf2(f128, f16, @bitCast(u16, a));
-}
-
-pub fn __aeabi_h2f(arg: u16) callconv(.AAPCS) f32 {
- @setRuntimeSafety(false);
- return @call(.{ .modifier = .always_inline }, extendXfYf2, .{ f32, f16, arg });
-}
-
-pub fn __aeabi_f2d(arg: f32) callconv(.AAPCS) f64 {
- @setRuntimeSafety(false);
- return @call(.{ .modifier = .always_inline }, extendXfYf2, .{ f64, f32, @bitCast(u32, arg) });
-}
-
-inline fn extendXfYf2(comptime dst_t: type, comptime src_t: type, a: std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits)) dst_t {
- @setRuntimeSafety(builtin.is_test);
+pub inline fn extendf(
+ comptime dst_t: type,
+ comptime src_t: type,
+ a: std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits),
+) dst_t {
const src_rep_t = std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits);
const dst_rep_t = std.meta.Int(.unsigned, @typeInfo(dst_t).Float.bits);
const srcSigBits = std.math.floatMantissaBits(src_t);
@@ -143,6 +72,71 @@ inline fn extendXfYf2(comptime dst_t: type, comptime src_t: type, a: std.meta.In
return @bitCast(dst_t, result);
}
+pub inline fn extend_f80(comptime src_t: type, a: std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits)) f80 {
+ const src_rep_t = std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits);
+ const src_sig_bits = std.math.floatMantissaBits(src_t);
+ const dst_int_bit = 0x8000000000000000;
+ const dst_sig_bits = std.math.floatMantissaBits(f80) - 1; // -1 for the integer bit
+
+ const dst_exp_bias = 16383;
+
+ const src_bits = @bitSizeOf(src_t);
+ const src_exp_bits = src_bits - src_sig_bits - 1;
+ const src_inf_exp = (1 << src_exp_bits) - 1;
+ const src_exp_bias = src_inf_exp >> 1;
+
+ const src_min_normal = 1 << src_sig_bits;
+ const src_inf = src_inf_exp << src_sig_bits;
+ const src_sign_mask = 1 << (src_sig_bits + src_exp_bits);
+ const src_abs_mask = src_sign_mask - 1;
+ const src_qnan = 1 << (src_sig_bits - 1);
+ const src_nan_code = src_qnan - 1;
+
+ var dst: std.math.F80 = undefined;
+
+ // Break a into a sign and representation of the absolute value
+ const a_abs = a & src_abs_mask;
+ const sign: u16 = if (a & src_sign_mask != 0) 0x8000 else 0;
+
+ if (a_abs -% src_min_normal < src_inf - src_min_normal) {
+ // a is a normal number.
+ // Extend to the destination type by shifting the significand and
+ // exponent into the proper position and rebiasing the exponent.
+ dst.exp = @intCast(u16, a_abs >> src_sig_bits);
+ dst.exp += dst_exp_bias - src_exp_bias;
+ dst.fraction = @as(u64, a_abs) << (dst_sig_bits - src_sig_bits);
+ dst.fraction |= dst_int_bit; // bit 64 is always set for normal numbers
+ } else if (a_abs >= src_inf) {
+ // a is NaN or infinity.
+ // Conjure the result by beginning with infinity, then setting the qNaN
+ // bit (if needed) and right-aligning the rest of the trailing NaN
+ // payload field.
+ dst.exp = 0x7fff;
+ dst.fraction = dst_int_bit;
+ dst.fraction |= @as(u64, a_abs & src_qnan) << (dst_sig_bits - src_sig_bits);
+ dst.fraction |= @as(u64, a_abs & src_nan_code) << (dst_sig_bits - src_sig_bits);
+ } else if (a_abs != 0) {
+ // a is denormal.
+ // renormalize the significand and clear the leading bit, then insert
+ // the correct adjusted exponent in the destination type.
+ const scale: u16 = @clz(src_rep_t, a_abs) -
+ @clz(src_rep_t, @as(src_rep_t, src_min_normal));
+
+ dst.fraction = @as(u64, a_abs) << @intCast(u6, dst_sig_bits - src_sig_bits + scale);
+ dst.fraction |= dst_int_bit; // bit 64 is always set for normal numbers
+ dst.exp = @truncate(u16, a_abs >> @intCast(u4, src_sig_bits - scale));
+ dst.exp ^= 1;
+ dst.exp |= dst_exp_bias - src_exp_bias - scale + 1;
+ } else {
+ // a is zero.
+ dst.exp = 0;
+ dst.fraction = 0;
+ }
+
+ dst.exp |= sign;
+ return std.math.make_f80(dst);
+}
+
test {
_ = @import("extendXfYf2_test.zig");
}
lib/compiler_rt/extendhfsf2.zig
@@ -0,0 +1,26 @@
+const common = @import("./common.zig");
+const extendf = @import("./extendf.zig").extendf;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_gnu_abi) {
+ @export(__gnu_h2f_ieee, .{ .name = "__gnu_h2f_ieee", .linkage = common.linkage });
+ } else if (common.want_aeabi) {
+ @export(__aeabi_h2f, .{ .name = "__aeabi_h2f", .linkage = common.linkage });
+ } else {
+ @export(__extendhfsf2, .{ .name = "__extendhfsf2", .linkage = common.linkage });
+ }
+}
+
+fn __extendhfsf2(a: common.F16T) callconv(.C) f32 {
+ return extendf(f32, f16, @bitCast(u16, a));
+}
+
+fn __gnu_h2f_ieee(a: common.F16T) callconv(.C) f32 {
+ return extendf(f32, f16, @bitCast(u16, a));
+}
+
+fn __aeabi_h2f(a: u16) callconv(.AAPCS) f32 {
+ return extendf(f32, f16, @bitCast(u16, a));
+}
lib/compiler_rt/extendhftf2.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const extendf = @import("./extendf.zig").extendf;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__extendhftf2, .{ .name = "__extendhftf2", .linkage = common.linkage });
+}
+
+fn __extendhftf2(a: common.F16T) callconv(.C) f128 {
+ return extendf(f128, f16, @bitCast(u16, a));
+}
lib/compiler_rt/extendhfxf2.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const extend_f80 = @import("./extendf.zig").extend_f80;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__extendhfxf2, .{ .name = "__extendhfxf2", .linkage = common.linkage });
+}
+
+fn __extendhfxf2(a: common.F16T) callconv(.C) f80 {
+ return extend_f80(f16, @bitCast(u16, a));
+}
lib/compiler_rt/extendsfdf2.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const extendf = @import("./extendf.zig").extendf;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_f2d, .{ .name = "__aeabi_f2d", .linkage = common.linkage });
+ } else {
+ @export(__extendsfdf2, .{ .name = "__extendsfdf2", .linkage = common.linkage });
+ }
+}
+
+fn __extendsfdf2(a: f32) callconv(.C) f64 {
+ return extendf(f64, f32, @bitCast(u32, a));
+}
+
+fn __aeabi_f2d(a: f32) callconv(.AAPCS) f64 {
+ return extendf(f64, f32, @bitCast(u32, a));
+}
lib/compiler_rt/extendsftf2.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const extendf = @import("./extendf.zig").extendf;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__extendsfkf2, .{ .name = "__extendsfkf2", .linkage = common.linkage });
+ } else {
+ @export(__extendsftf2, .{ .name = "__extendsftf2", .linkage = common.linkage });
+ }
+}
+
+fn __extendsftf2(a: f32) callconv(.C) f128 {
+ return extendf(f128, f32, @bitCast(u32, a));
+}
+
+fn __extendsfkf2(a: f32) callconv(.C) f128 {
+ return extendf(f128, f32, @bitCast(u32, a));
+}
lib/compiler_rt/extendsfxf2.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const extend_f80 = @import("./extendf.zig").extend_f80;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__extendsfxf2, .{ .name = "__extendsfxf2", .linkage = common.linkage });
+}
+
+fn __extendsfxf2(a: f32) callconv(.C) f80 {
+ return extend_f80(f32, @bitCast(u32, a));
+}
lib/compiler_rt/extendxftf2.zig
@@ -0,0 +1,50 @@
+const std = @import("std");
+const common = @import("./common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__extendxftf2, .{ .name = "__extendxftf2", .linkage = common.linkage });
+}
+
+fn __extendxftf2(a: f80) callconv(.C) f128 {
+ const src_int_bit: u64 = 0x8000000000000000;
+ const src_sig_mask = ~src_int_bit;
+ const src_sig_bits = std.math.floatMantissaBits(f80) - 1; // -1 for the integer bit
+ const dst_sig_bits = std.math.floatMantissaBits(f128);
+
+ const dst_bits = @bitSizeOf(f128);
+
+ const dst_min_normal = @as(u128, 1) << dst_sig_bits;
+
+ // Break a into a sign and representation of the absolute value
+ var a_rep = std.math.break_f80(a);
+ const sign = a_rep.exp & 0x8000;
+ a_rep.exp &= 0x7FFF;
+ var abs_result: u128 = undefined;
+
+ if (a_rep.exp == 0 and a_rep.fraction == 0) {
+ // zero
+ abs_result = 0;
+ } else if (a_rep.exp == 0x7FFF) {
+ // a is nan or infinite
+ abs_result = @as(u128, a_rep.fraction) << (dst_sig_bits - src_sig_bits);
+ abs_result |= @as(u128, a_rep.exp) << dst_sig_bits;
+ } else if (a_rep.fraction & src_int_bit != 0) {
+ // a is a normal value
+ abs_result = @as(u128, a_rep.fraction & src_sig_mask) << (dst_sig_bits - src_sig_bits);
+ abs_result |= @as(u128, a_rep.exp) << dst_sig_bits;
+ } else {
+ // a is denormal
+ // renormalize the significand and clear the leading bit and integer part,
+ // then insert the correct adjusted exponent in the destination type.
+ const scale: u32 = @clz(u64, a_rep.fraction);
+ abs_result = @as(u128, a_rep.fraction) << @intCast(u7, dst_sig_bits - src_sig_bits + scale + 1);
+ abs_result ^= dst_min_normal;
+ abs_result |= @as(u128, scale + 1) << dst_sig_bits;
+ }
+
+ // Apply the signbit to (dst_t)abs(a).
+ const result: u128 align(@alignOf(f128)) = abs_result | @as(u128, sign) << (dst_bits - 16);
+ return @bitCast(f128, result);
+}
lib/compiler_rt/fixdfdi.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_d2lz, .{ .name = "__aeabi_d2lz", .linkage = common.linkage });
+ } else {
+ @export(__fixdfdi, .{ .name = "__fixdfdi", .linkage = common.linkage });
+ }
+}
+
+fn __fixdfdi(a: f64) callconv(.C) i64 {
+ return floatToInt(i64, a);
+}
+
+fn __aeabi_d2lz(a: f64) callconv(.AAPCS) i64 {
+ return floatToInt(i64, a);
+}
lib/compiler_rt/fixdfsi.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_d2iz, .{ .name = "__aeabi_d2iz", .linkage = common.linkage });
+ } else {
+ @export(__fixdfsi, .{ .name = "__fixdfsi", .linkage = common.linkage });
+ }
+}
+
+fn __fixdfsi(a: f64) callconv(.C) i32 {
+ return floatToInt(i32, a);
+}
+
+fn __aeabi_d2iz(a: f64) callconv(.AAPCS) i32 {
+ return floatToInt(i32, a);
+}
lib/compiler_rt/fixdfti.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixdfti, .{ .name = "__fixdfti", .linkage = common.linkage });
+}
+
+fn __fixdfti(a: f64) callconv(.C) i128 {
+ return floatToInt(i128, a);
+}
lib/compiler_rt/fixhfdi.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixhfdi, .{ .name = "__fixhfdi", .linkage = common.linkage });
+}
+
+fn __fixhfdi(a: f16) callconv(.C) i64 {
+ return floatToInt(i64, a);
+}
lib/compiler_rt/fixhfsi.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixhfsi, .{ .name = "__fixhfsi", .linkage = common.linkage });
+}
+
+fn __fixhfsi(a: f16) callconv(.C) i32 {
+ return floatToInt(i32, a);
+}
lib/compiler_rt/fixhfti.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixhfti, .{ .name = "__fixhfti", .linkage = common.linkage });
+}
+
+fn __fixhfti(a: f16) callconv(.C) i128 {
+ return floatToInt(i128, a);
+}
lib/compiler_rt/fixsfdi.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_f2lz, .{ .name = "__aeabi_f2lz", .linkage = common.linkage });
+ } else {
+ @export(__fixsfdi, .{ .name = "__fixsfdi", .linkage = common.linkage });
+ }
+}
+
+fn __fixsfdi(a: f32) callconv(.C) i64 {
+ return floatToInt(i64, a);
+}
+
+fn __aeabi_f2lz(a: f32) callconv(.AAPCS) i64 {
+ return floatToInt(i64, a);
+}
lib/compiler_rt/fixsfsi.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_f2iz, .{ .name = "__aeabi_f2iz", .linkage = common.linkage });
+ } else {
+ @export(__fixsfsi, .{ .name = "__fixsfsi", .linkage = common.linkage });
+ }
+}
+
+fn __fixsfsi(a: f32) callconv(.C) i32 {
+ return floatToInt(i32, a);
+}
+
+fn __aeabi_f2iz(a: f32) callconv(.AAPCS) i32 {
+ return floatToInt(i32, a);
+}
lib/compiler_rt/fixsfti.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixsfti, .{ .name = "__fixsfti", .linkage = common.linkage });
+}
+
+fn __fixsfti(a: f32) callconv(.C) i128 {
+ return floatToInt(i128, a);
+}
lib/compiler_rt/fixtfdi.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__fixkfdi, .{ .name = "__fixkfdi", .linkage = common.linkage });
+ } else {
+ @export(__fixtfdi, .{ .name = "__fixtfdi", .linkage = common.linkage });
+ }
+}
+
+fn __fixtfdi(a: f128) callconv(.C) i64 {
+ return floatToInt(i64, a);
+}
+
+fn __fixkfdi(a: f128) callconv(.C) i64 {
+ return floatToInt(i64, a);
+}
lib/compiler_rt/fixtfsi.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__fixkfsi, .{ .name = "__fixkfsi", .linkage = common.linkage });
+ } else {
+ @export(__fixtfsi, .{ .name = "__fixtfsi", .linkage = common.linkage });
+ }
+}
+
+fn __fixtfsi(a: f128) callconv(.C) i32 {
+ return floatToInt(i32, a);
+}
+
+fn __fixkfsi(a: f128) callconv(.C) i32 {
+ return floatToInt(i32, a);
+}
lib/compiler_rt/fixtfti.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixtfti, .{ .name = "__fixtfti", .linkage = common.linkage });
+}
+
+fn __fixtfti(a: f128) callconv(.C) i128 {
+ return floatToInt(i128, a);
+}
lib/compiler_rt/fixunsdfdi.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_d2ulz, .{ .name = "__aeabi_d2ulz", .linkage = common.linkage });
+ } else {
+ @export(__fixunsdfdi, .{ .name = "__fixunsdfdi", .linkage = common.linkage });
+ }
+}
+
+fn __fixunsdfdi(a: f64) callconv(.C) u64 {
+ return floatToInt(u64, a);
+}
+
+fn __aeabi_d2ulz(a: f64) callconv(.AAPCS) u64 {
+ return floatToInt(u64, a);
+}
lib/compiler_rt/fixunsdfsi.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_d2uiz, .{ .name = "__aeabi_d2uiz", .linkage = common.linkage });
+ } else {
+ @export(__fixunsdfsi, .{ .name = "__fixunsdfsi", .linkage = common.linkage });
+ }
+}
+
+fn __fixunsdfsi(a: f64) callconv(.C) u32 {
+ return floatToInt(u32, a);
+}
+
+fn __aeabi_d2uiz(a: f64) callconv(.AAPCS) u32 {
+ return floatToInt(u32, a);
+}
lib/compiler_rt/fixunsdfti.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixunsdfti, .{ .name = "__fixunsdfti", .linkage = common.linkage });
+}
+
+fn __fixunsdfti(a: f64) callconv(.C) u128 {
+ return floatToInt(u128, a);
+}
lib/compiler_rt/fixunshfdi.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixunshfdi, .{ .name = "__fixunshfdi", .linkage = common.linkage });
+}
+
+fn __fixunshfdi(a: f16) callconv(.C) u64 {
+ return floatToInt(u64, a);
+}
lib/compiler_rt/fixunshfsi.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixunshfsi, .{ .name = "__fixunshfsi", .linkage = common.linkage });
+}
+
+fn __fixunshfsi(a: f16) callconv(.C) u32 {
+ return floatToInt(u32, a);
+}
lib/compiler_rt/fixunshfti.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixunshfti, .{ .name = "__fixunshfti", .linkage = common.linkage });
+}
+
+fn __fixunshfti(a: f16) callconv(.C) u128 {
+ return floatToInt(u128, a);
+}
lib/compiler_rt/fixunssfdi.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_f2ulz, .{ .name = "__aeabi_f2ulz", .linkage = common.linkage });
+ } else {
+ @export(__fixunssfdi, .{ .name = "__fixunssfdi", .linkage = common.linkage });
+ }
+}
+
+fn __fixunssfdi(a: f32) callconv(.C) u64 {
+ return floatToInt(u64, a);
+}
+
+fn __aeabi_f2ulz(a: f32) callconv(.AAPCS) u64 {
+ return floatToInt(u64, a);
+}
lib/compiler_rt/fixunssfsi.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_f2uiz, .{ .name = "__aeabi_f2uiz", .linkage = common.linkage });
+ } else {
+ @export(__fixunssfsi, .{ .name = "__fixunssfsi", .linkage = common.linkage });
+ }
+}
+
+fn __fixunssfsi(a: f32) callconv(.C) u32 {
+ return floatToInt(u32, a);
+}
+
+fn __aeabi_f2uiz(a: f32) callconv(.AAPCS) u32 {
+ return floatToInt(u32, a);
+}
lib/compiler_rt/fixunssfti.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixunssfti, .{ .name = "__fixunssfti", .linkage = common.linkage });
+}
+
+fn __fixunssfti(a: f32) callconv(.C) u128 {
+ return floatToInt(u128, a);
+}
lib/compiler_rt/fixunstfdi.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__fixunskfdi, .{ .name = "__fixunskfdi", .linkage = common.linkage });
+ } else {
+ @export(__fixunstfdi, .{ .name = "__fixunstfdi", .linkage = common.linkage });
+ }
+}
+
+fn __fixunstfdi(a: f128) callconv(.C) u64 {
+ return floatToInt(u64, a);
+}
+
+fn __fixunskfdi(a: f128) callconv(.C) u64 {
+ return floatToInt(u64, a);
+}
lib/compiler_rt/fixunstfsi.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__fixunskfsi, .{ .name = "__fixunskfsi", .linkage = common.linkage });
+ } else {
+ @export(__fixunstfsi, .{ .name = "__fixunstfsi", .linkage = common.linkage });
+ }
+}
+
+fn __fixunstfsi(a: f128) callconv(.C) u32 {
+ return floatToInt(u32, a);
+}
+
+fn __fixunskfsi(a: f128) callconv(.C) u32 {
+ return floatToInt(u32, a);
+}
lib/compiler_rt/fixunstfti.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixunstfti, .{ .name = "__fixunstfti", .linkage = common.linkage });
+}
+
+fn __fixunstfti(a: f128) callconv(.C) u128 {
+ return floatToInt(u128, a);
+}
lib/compiler_rt/fixunsxfdi.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixunsxfdi, .{ .name = "__fixunsxfdi", .linkage = common.linkage });
+}
+
+fn __fixunsxfdi(a: f80) callconv(.C) u64 {
+ return floatToInt(u64, a);
+}
lib/compiler_rt/fixunsxfsi.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixunsxfsi, .{ .name = "__fixunsxfsi", .linkage = common.linkage });
+}
+
+fn __fixunsxfsi(a: f80) callconv(.C) u32 {
+ return floatToInt(u32, a);
+}
lib/compiler_rt/fixunsxfti.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixunsxfti, .{ .name = "__fixunsxfti", .linkage = common.linkage });
+}
+
+fn __fixunsxfti(a: f80) callconv(.C) u128 {
+ return floatToInt(u128, a);
+}
lib/compiler_rt/fixxfdi.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixxfdi, .{ .name = "__fixxfdi", .linkage = common.linkage });
+}
+
+fn __fixxfdi(a: f80) callconv(.C) i64 {
+ return floatToInt(i64, a);
+}
lib/compiler_rt/fixxfsi.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixxfsi, .{ .name = "__fixxfsi", .linkage = common.linkage });
+}
+
+fn __fixxfsi(a: f80) callconv(.C) i32 {
+ return floatToInt(i32, a);
+}
lib/compiler_rt/fixxfti.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const floatToInt = @import("./float_to_int.zig").floatToInt;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__fixxfti, .{ .name = "__fixxfti", .linkage = common.linkage });
+}
+
+fn __fixxfti(a: f80) callconv(.C) i128 {
+ return floatToInt(i128, a);
+}
lib/compiler_rt/fixXfYi.zig
@@ -1,312 +0,0 @@
-const std = @import("std");
-const builtin = @import("builtin");
-const math = std.math;
-const Log2Int = math.Log2Int;
-const arch = builtin.cpu.arch;
-const is_test = builtin.is_test;
-const linkage: std.builtin.GlobalLinkage = if (builtin.is_test) .Internal else .Weak;
-pub const panic = @import("common.zig").panic;
-
-comptime {
- // Float -> Integral Conversion
-
- // Conversion from f32
- @export(__fixsfsi, .{ .name = "__fixsfsi", .linkage = linkage });
- @export(__fixunssfsi, .{ .name = "__fixunssfsi", .linkage = linkage });
-
- @export(__fixsfdi, .{ .name = "__fixsfdi", .linkage = linkage });
- @export(__fixunssfdi, .{ .name = "__fixunssfdi", .linkage = linkage });
-
- @export(__fixsfti, .{ .name = "__fixsfti", .linkage = linkage });
- @export(__fixunssfti, .{ .name = "__fixunssfti", .linkage = linkage });
-
- // Conversion from f64
- @export(__fixdfsi, .{ .name = "__fixdfsi", .linkage = linkage });
- @export(__fixunsdfsi, .{ .name = "__fixunsdfsi", .linkage = linkage });
-
- @export(__fixdfdi, .{ .name = "__fixdfdi", .linkage = linkage });
- @export(__fixunsdfdi, .{ .name = "__fixunsdfdi", .linkage = linkage });
-
- @export(__fixdfti, .{ .name = "__fixdfti", .linkage = linkage });
- @export(__fixunsdfti, .{ .name = "__fixunsdfti", .linkage = linkage });
-
- // Conversion from f80
- @export(__fixxfsi, .{ .name = "__fixxfsi", .linkage = linkage });
- @export(__fixunsxfsi, .{ .name = "__fixunsxfsi", .linkage = linkage });
-
- @export(__fixxfdi, .{ .name = "__fixxfdi", .linkage = linkage });
- @export(__fixunsxfdi, .{ .name = "__fixunsxfdi", .linkage = linkage });
-
- @export(__fixxfti, .{ .name = "__fixxfti", .linkage = linkage });
- @export(__fixunsxfti, .{ .name = "__fixunsxfti", .linkage = linkage });
-
- // Conversion from f128
- @export(__fixtfsi, .{ .name = "__fixtfsi", .linkage = linkage });
- @export(__fixunstfsi, .{ .name = "__fixunstfsi", .linkage = linkage });
-
- @export(__fixtfdi, .{ .name = "__fixtfdi", .linkage = linkage });
- @export(__fixunstfdi, .{ .name = "__fixunstfdi", .linkage = linkage });
-
- @export(__fixtfti, .{ .name = "__fixtfti", .linkage = linkage });
- @export(__fixunstfti, .{ .name = "__fixunstfti", .linkage = linkage });
-
- if (!is_test) {
- if (arch.isARM() or arch.isThumb()) {
- @export(__aeabi_f2ulz, .{ .name = "__aeabi_f2ulz", .linkage = linkage });
- @export(__aeabi_d2ulz, .{ .name = "__aeabi_d2ulz", .linkage = linkage });
-
- @export(__aeabi_f2lz, .{ .name = "__aeabi_f2lz", .linkage = linkage });
- @export(__aeabi_d2lz, .{ .name = "__aeabi_d2lz", .linkage = linkage });
-
- @export(__aeabi_d2uiz, .{ .name = "__aeabi_d2uiz", .linkage = linkage });
-
- @export(__aeabi_f2uiz, .{ .name = "__aeabi_f2uiz", .linkage = linkage });
-
- @export(__aeabi_f2iz, .{ .name = "__aeabi_f2iz", .linkage = linkage });
- @export(__aeabi_d2iz, .{ .name = "__aeabi_d2iz", .linkage = linkage });
- }
-
- if (arch.isPPC() or arch.isPPC64()) {
- @export(__fixkfdi, .{ .name = "__fixkfdi", .linkage = linkage });
- @export(__fixkfsi, .{ .name = "__fixkfsi", .linkage = linkage });
- @export(__fixunskfsi, .{ .name = "__fixunskfsi", .linkage = linkage });
- @export(__fixunskfdi, .{ .name = "__fixunskfdi", .linkage = linkage });
- }
- }
-}
-
-pub inline fn fixXfYi(comptime I: type, a: anytype) I {
- @setRuntimeSafety(is_test);
-
- const F = @TypeOf(a);
- const float_bits = @typeInfo(F).Float.bits;
- const int_bits = @typeInfo(I).Int.bits;
- const rep_t = std.meta.Int(.unsigned, float_bits);
- const sig_bits = math.floatMantissaBits(F);
- const exp_bits = math.floatExponentBits(F);
- const fractional_bits = math.floatFractionalBits(F);
-
- const implicit_bit = if (F != f80) (@as(rep_t, 1) << sig_bits) else 0;
- const max_exp = (1 << (exp_bits - 1));
- const exp_bias = max_exp - 1;
- const sig_mask = (@as(rep_t, 1) << sig_bits) - 1;
-
- // Break a into sign, exponent, significand
- const a_rep: rep_t = @bitCast(rep_t, a);
- const negative = (a_rep >> (float_bits - 1)) != 0;
- const exponent = @intCast(i32, (a_rep << 1) >> (sig_bits + 1)) - exp_bias;
- const significand: rep_t = (a_rep & sig_mask) | implicit_bit;
-
- // If the exponent is negative, the result rounds to zero.
- if (exponent < 0) return 0;
-
- // If the value is too large for the integer type, saturate.
- switch (@typeInfo(I).Int.signedness) {
- .unsigned => {
- if (negative) return 0;
- if (@intCast(c_uint, exponent) >= @minimum(int_bits, max_exp)) return math.maxInt(I);
- },
- .signed => if (@intCast(c_uint, exponent) >= @minimum(int_bits - 1, max_exp)) {
- return if (negative) math.minInt(I) else math.maxInt(I);
- },
- }
-
- // If 0 <= exponent < sig_bits, right shift to get the result.
- // Otherwise, shift left.
- var result: I = undefined;
- if (exponent < fractional_bits) {
- result = @intCast(I, significand >> @intCast(Log2Int(rep_t), fractional_bits - exponent));
- } else {
- result = @intCast(I, significand) << @intCast(Log2Int(I), exponent - fractional_bits);
- }
-
- if ((@typeInfo(I).Int.signedness == .signed) and negative)
- return ~result +% 1;
- return result;
-}
-
-// Conversion from f16
-
-pub fn __fixhfsi(a: f16) callconv(.C) i32 {
- return fixXfYi(i32, a);
-}
-
-pub fn __fixunshfsi(a: f16) callconv(.C) u32 {
- return fixXfYi(u32, a);
-}
-
-pub fn __fixhfdi(a: f16) callconv(.C) i64 {
- return fixXfYi(i64, a);
-}
-
-pub fn __fixunshfdi(a: f16) callconv(.C) u64 {
- return fixXfYi(u64, a);
-}
-
-pub fn __fixhfti(a: f16) callconv(.C) i128 {
- return fixXfYi(i128, a);
-}
-
-pub fn __fixunshfti(a: f16) callconv(.C) u128 {
- return fixXfYi(u128, a);
-}
-
-// Conversion from f32
-
-pub fn __fixsfsi(a: f32) callconv(.C) i32 {
- return fixXfYi(i32, a);
-}
-
-pub fn __fixunssfsi(a: f32) callconv(.C) u32 {
- return fixXfYi(u32, a);
-}
-
-pub fn __fixsfdi(a: f32) callconv(.C) i64 {
- return fixXfYi(i64, a);
-}
-
-pub fn __fixunssfdi(a: f32) callconv(.C) u64 {
- return fixXfYi(u64, a);
-}
-
-pub fn __fixsfti(a: f32) callconv(.C) i128 {
- return fixXfYi(i128, a);
-}
-
-pub fn __fixunssfti(a: f32) callconv(.C) u128 {
- return fixXfYi(u128, a);
-}
-
-// Conversion from f64
-
-pub fn __fixdfsi(a: f64) callconv(.C) i32 {
- return fixXfYi(i32, a);
-}
-
-pub fn __fixunsdfsi(a: f64) callconv(.C) u32 {
- return fixXfYi(u32, a);
-}
-
-pub fn __fixdfdi(a: f64) callconv(.C) i64 {
- return fixXfYi(i64, a);
-}
-
-pub fn __fixunsdfdi(a: f64) callconv(.C) u64 {
- return fixXfYi(u64, a);
-}
-
-pub fn __fixdfti(a: f64) callconv(.C) i128 {
- return fixXfYi(i128, a);
-}
-
-pub fn __fixunsdfti(a: f64) callconv(.C) u128 {
- return fixXfYi(u128, a);
-}
-
-// Conversion from f80
-
-pub fn __fixxfsi(a: f80) callconv(.C) i32 {
- return fixXfYi(i32, a);
-}
-
-pub fn __fixunsxfsi(a: f80) callconv(.C) u32 {
- return fixXfYi(u32, a);
-}
-
-pub fn __fixxfdi(a: f80) callconv(.C) i64 {
- return fixXfYi(i64, a);
-}
-
-pub fn __fixunsxfdi(a: f80) callconv(.C) u64 {
- return fixXfYi(u64, a);
-}
-
-pub fn __fixxfti(a: f80) callconv(.C) i128 {
- return fixXfYi(i128, a);
-}
-
-pub fn __fixunsxfti(a: f80) callconv(.C) u128 {
- return fixXfYi(u128, a);
-}
-
-// Conversion from f128
-
-pub fn __fixtfsi(a: f128) callconv(.C) i32 {
- return fixXfYi(i32, a);
-}
-
-pub fn __fixkfsi(a: f128) callconv(.C) i32 {
- return __fixtfsi(a);
-}
-
-pub fn __fixunstfsi(a: f128) callconv(.C) u32 {
- return fixXfYi(u32, a);
-}
-
-pub fn __fixunskfsi(a: f128) callconv(.C) u32 {
- return @call(.{ .modifier = .always_inline }, __fixunstfsi, .{a});
-}
-
-pub fn __fixtfdi(a: f128) callconv(.C) i64 {
- return fixXfYi(i64, a);
-}
-
-pub fn __fixkfdi(a: f128) callconv(.C) i64 {
- return @call(.{ .modifier = .always_inline }, __fixtfdi, .{a});
-}
-
-pub fn __fixunstfdi(a: f128) callconv(.C) u64 {
- return fixXfYi(u64, a);
-}
-
-pub fn __fixunskfdi(a: f128) callconv(.C) u64 {
- return @call(.{ .modifier = .always_inline }, __fixunstfdi, .{a});
-}
-
-pub fn __fixtfti(a: f128) callconv(.C) i128 {
- return fixXfYi(i128, a);
-}
-
-pub fn __fixunstfti(a: f128) callconv(.C) u128 {
- return fixXfYi(u128, a);
-}
-
-// Conversion from f32
-
-pub fn __aeabi_f2iz(a: f32) callconv(.AAPCS) i32 {
- return fixXfYi(i32, a);
-}
-
-pub fn __aeabi_f2uiz(a: f32) callconv(.AAPCS) u32 {
- return fixXfYi(u32, a);
-}
-
-pub fn __aeabi_f2lz(a: f32) callconv(.AAPCS) i64 {
- return fixXfYi(i64, a);
-}
-
-pub fn __aeabi_f2ulz(a: f32) callconv(.AAPCS) u64 {
- return fixXfYi(u64, a);
-}
-
-// Conversion from f64
-
-pub fn __aeabi_d2iz(a: f64) callconv(.AAPCS) i32 {
- return fixXfYi(i32, a);
-}
-
-pub fn __aeabi_d2uiz(a: f64) callconv(.AAPCS) u32 {
- return fixXfYi(u32, a);
-}
-
-pub fn __aeabi_d2lz(a: f64) callconv(.AAPCS) i64 {
- return fixXfYi(i64, a);
-}
-
-pub fn __aeabi_d2ulz(a: f64) callconv(.AAPCS) u64 {
- return fixXfYi(u64, a);
-}
-
-test {
- _ = @import("fixXfYi_test.zig");
-}
lib/compiler_rt/float_to_int.zig
@@ -0,0 +1,55 @@
+const Int = @import("std").meta.Int;
+const math = @import("std").math;
+const Log2Int = math.Log2Int;
+
+pub inline fn floatToInt(comptime I: type, a: anytype) I {
+ const F = @TypeOf(a);
+ const float_bits = @typeInfo(F).Float.bits;
+ const int_bits = @typeInfo(I).Int.bits;
+ const rep_t = Int(.unsigned, float_bits);
+ const sig_bits = math.floatMantissaBits(F);
+ const exp_bits = math.floatExponentBits(F);
+ const fractional_bits = math.floatFractionalBits(F);
+
+ const implicit_bit = if (F != f80) (@as(rep_t, 1) << sig_bits) else 0;
+ const max_exp = (1 << (exp_bits - 1));
+ const exp_bias = max_exp - 1;
+ const sig_mask = (@as(rep_t, 1) << sig_bits) - 1;
+
+ // Break a into sign, exponent, significand
+ const a_rep: rep_t = @bitCast(rep_t, a);
+ const negative = (a_rep >> (float_bits - 1)) != 0;
+ const exponent = @intCast(i32, (a_rep << 1) >> (sig_bits + 1)) - exp_bias;
+ const significand: rep_t = (a_rep & sig_mask) | implicit_bit;
+
+ // If the exponent is negative, the result rounds to zero.
+ if (exponent < 0) return 0;
+
+ // If the value is too large for the integer type, saturate.
+ switch (@typeInfo(I).Int.signedness) {
+ .unsigned => {
+ if (negative) return 0;
+ if (@intCast(c_uint, exponent) >= @minimum(int_bits, max_exp)) return math.maxInt(I);
+ },
+ .signed => if (@intCast(c_uint, exponent) >= @minimum(int_bits - 1, max_exp)) {
+ return if (negative) math.minInt(I) else math.maxInt(I);
+ },
+ }
+
+ // If 0 <= exponent < sig_bits, right shift to get the result.
+ // Otherwise, shift left.
+ var result: I = undefined;
+ if (exponent < fractional_bits) {
+ result = @intCast(I, significand >> @intCast(Log2Int(rep_t), fractional_bits - exponent));
+ } else {
+ result = @intCast(I, significand) << @intCast(Log2Int(I), exponent - fractional_bits);
+ }
+
+ if ((@typeInfo(I).Int.signedness == .signed) and negative)
+ return ~result +% 1;
+ return result;
+}
+
+test {
+ _ = @import("float_to_int_test.zig");
+}
lib/compiler_rt/fixXfYi_test.zig → lib/compiler_rt/float_to_int_test.zig
File renamed without changes
lib/compiler_rt/floatdidf.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_l2d, .{ .name = "__aeabi_l2d", .linkage = common.linkage });
+ } else {
+ @export(__floatdidf, .{ .name = "__floatdidf", .linkage = common.linkage });
+ }
+}
+
+fn __floatdidf(a: i64) callconv(.C) f64 {
+ return intToFloat(f64, a);
+}
+
+fn __aeabi_l2d(a: i64) callconv(.AAPCS) f64 {
+ return intToFloat(f64, a);
+}
lib/compiler_rt/floatdihf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floatdihf, .{ .name = "__floatdihf", .linkage = common.linkage });
+}
+
+fn __floatdihf(a: i64) callconv(.C) f16 {
+ return intToFloat(f16, a);
+}
lib/compiler_rt/floatdisf.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_l2f, .{ .name = "__aeabi_l2f", .linkage = common.linkage });
+ } else {
+ @export(__floatdisf, .{ .name = "__floatdisf", .linkage = common.linkage });
+ }
+}
+
+fn __floatdisf(a: i64) callconv(.C) f32 {
+ return intToFloat(f32, a);
+}
+
+fn __aeabi_l2f(a: i64) callconv(.AAPCS) f32 {
+ return intToFloat(f32, a);
+}
lib/compiler_rt/floatditf.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__floatdikf, .{ .name = "__floatdikf", .linkage = common.linkage });
+ } else {
+ @export(__floatditf, .{ .name = "__floatditf", .linkage = common.linkage });
+ }
+}
+
+fn __floatditf(a: i64) callconv(.C) f128 {
+ return intToFloat(f128, a);
+}
+
+fn __floatdikf(a: i64) callconv(.C) f128 {
+ return intToFloat(f128, a);
+}
lib/compiler_rt/floatdixf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floatdixf, .{ .name = "__floatdixf", .linkage = common.linkage });
+}
+
+fn __floatdixf(a: i64) callconv(.C) f80 {
+ return intToFloat(f80, a);
+}
lib/compiler_rt/floatsidf.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_i2d, .{ .name = "__aeabi_i2d", .linkage = common.linkage });
+ } else {
+ @export(__floatsidf, .{ .name = "__floatsidf", .linkage = common.linkage });
+ }
+}
+
+fn __floatsidf(a: i32) callconv(.C) f64 {
+ return intToFloat(f64, a);
+}
+
+fn __aeabi_i2d(a: i32) callconv(.AAPCS) f64 {
+ return intToFloat(f64, a);
+}
lib/compiler_rt/floatsihf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floatsihf, .{ .name = "__floatsihf", .linkage = common.linkage });
+}
+
+fn __floatsihf(a: i32) callconv(.C) f16 {
+ return intToFloat(f16, a);
+}
lib/compiler_rt/floatsisf.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_i2f, .{ .name = "__aeabi_i2f", .linkage = common.linkage });
+ } else {
+ @export(__floatsisf, .{ .name = "__floatsisf", .linkage = common.linkage });
+ }
+}
+
+fn __floatsisf(a: i32) callconv(.C) f32 {
+ return intToFloat(f32, a);
+}
+
+fn __aeabi_i2f(a: i32) callconv(.AAPCS) f32 {
+ return intToFloat(f32, a);
+}
lib/compiler_rt/floatsitf.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__floatsikf, .{ .name = "__floatsikf", .linkage = common.linkage });
+ } else {
+ @export(__floatsitf, .{ .name = "__floatsitf", .linkage = common.linkage });
+ }
+}
+
+fn __floatsitf(a: i32) callconv(.C) f128 {
+ return intToFloat(f128, a);
+}
+
+fn __floatsikf(a: i32) callconv(.C) f128 {
+ return intToFloat(f128, a);
+}
lib/compiler_rt/floatsixf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floatsixf, .{ .name = "__floatsixf", .linkage = common.linkage });
+}
+
+fn __floatsixf(a: i32) callconv(.C) f80 {
+ return intToFloat(f80, a);
+}
lib/compiler_rt/floattidf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floattidf, .{ .name = "__floattidf", .linkage = common.linkage });
+}
+
+fn __floattidf(a: i128) callconv(.C) f64 {
+ return intToFloat(f64, a);
+}
lib/compiler_rt/floattihf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floattihf, .{ .name = "__floattihf", .linkage = common.linkage });
+}
+
+fn __floattihf(a: i128) callconv(.C) f16 {
+ return intToFloat(f16, a);
+}
lib/compiler_rt/floattisf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floattisf, .{ .name = "__floattisf", .linkage = common.linkage });
+}
+
+fn __floattisf(a: i128) callconv(.C) f32 {
+ return intToFloat(f32, a);
+}
lib/compiler_rt/floattitf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floattitf, .{ .name = "__floattitf", .linkage = common.linkage });
+}
+
+fn __floattitf(a: i128) callconv(.C) f128 {
+ return intToFloat(f128, a);
+}
lib/compiler_rt/floattixf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floattixf, .{ .name = "__floattixf", .linkage = common.linkage });
+}
+
+fn __floattixf(a: i128) callconv(.C) f80 {
+ return intToFloat(f80, a);
+}
lib/compiler_rt/floatundidf.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_ul2d, .{ .name = "__aeabi_ul2d", .linkage = common.linkage });
+ } else {
+ @export(__floatundidf, .{ .name = "__floatundidf", .linkage = common.linkage });
+ }
+}
+
+fn __floatundidf(a: u64) callconv(.C) f64 {
+ return intToFloat(f64, a);
+}
+
+fn __aeabi_ul2d(a: u64) callconv(.AAPCS) f64 {
+ return intToFloat(f64, a);
+}
lib/compiler_rt/floatundihf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floatundihf, .{ .name = "__floatundihf", .linkage = common.linkage });
+}
+
+fn __floatundihf(a: u64) callconv(.C) f16 {
+ return intToFloat(f16, a);
+}
lib/compiler_rt/floatundisf.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_ul2f, .{ .name = "__aeabi_ul2f", .linkage = common.linkage });
+ } else {
+ @export(__floatundisf, .{ .name = "__floatundisf", .linkage = common.linkage });
+ }
+}
+
+fn __floatundisf(a: u64) callconv(.C) f32 {
+ return intToFloat(f32, a);
+}
+
+fn __aeabi_ul2f(a: u64) callconv(.AAPCS) f32 {
+ return intToFloat(f32, a);
+}
lib/compiler_rt/floatunditf.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__floatundikf, .{ .name = "__floatundikf", .linkage = common.linkage });
+ } else {
+ @export(__floatunditf, .{ .name = "__floatunditf", .linkage = common.linkage });
+ }
+}
+
+fn __floatunditf(a: u64) callconv(.C) f128 {
+ return intToFloat(f128, a);
+}
+
+fn __floatundikf(a: u64) callconv(.C) f128 {
+ return intToFloat(f128, a);
+}
lib/compiler_rt/floatundixf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floatundixf, .{ .name = "__floatundixf", .linkage = common.linkage });
+}
+
+fn __floatundixf(a: u64) callconv(.C) f80 {
+ return intToFloat(f80, a);
+}
lib/compiler_rt/floatunsidf.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_ui2d, .{ .name = "__aeabi_ui2d", .linkage = common.linkage });
+ } else {
+ @export(__floatunsidf, .{ .name = "__floatunsidf", .linkage = common.linkage });
+ }
+}
+
+fn __floatunsidf(a: u32) callconv(.C) f64 {
+ return intToFloat(f64, a);
+}
+
+fn __aeabi_ui2d(a: u32) callconv(.AAPCS) f64 {
+ return intToFloat(f64, a);
+}
lib/compiler_rt/floatunsihf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floatunsihf, .{ .name = "__floatunsihf", .linkage = common.linkage });
+}
+
+fn __floatunsihf(a: u32) callconv(.C) f16 {
+ return intToFloat(f16, a);
+}
lib/compiler_rt/floatunsisf.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_ui2f, .{ .name = "__aeabi_ui2f", .linkage = common.linkage });
+ } else {
+ @export(__floatunsisf, .{ .name = "__floatunsisf", .linkage = common.linkage });
+ }
+}
+
+fn __floatunsisf(a: u32) callconv(.C) f32 {
+ return intToFloat(f32, a);
+}
+
+fn __aeabi_ui2f(a: u32) callconv(.AAPCS) f32 {
+ return intToFloat(f32, a);
+}
lib/compiler_rt/floatunsitf.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__floatunsikf, .{ .name = "__floatunsikf", .linkage = common.linkage });
+ } else {
+ @export(__floatunsitf, .{ .name = "__floatunsitf", .linkage = common.linkage });
+ }
+}
+
+fn __floatunsitf(a: u32) callconv(.C) f128 {
+ return intToFloat(f128, a);
+}
+
+fn __floatunsikf(a: u32) callconv(.C) f128 {
+ return intToFloat(f128, a);
+}
lib/compiler_rt/floatunsixf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floatunsixf, .{ .name = "__floatunsixf", .linkage = common.linkage });
+}
+
+fn __floatunsixf(a: u32) callconv(.C) f80 {
+ return intToFloat(f80, a);
+}
lib/compiler_rt/floatuntidf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floatuntidf, .{ .name = "__floatuntidf", .linkage = common.linkage });
+}
+
+fn __floatuntidf(a: u128) callconv(.C) f64 {
+ return intToFloat(f64, a);
+}
lib/compiler_rt/floatuntihf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floatuntihf, .{ .name = "__floatuntihf", .linkage = common.linkage });
+}
+
+fn __floatuntihf(a: u128) callconv(.C) f16 {
+ return intToFloat(f16, a);
+}
lib/compiler_rt/floatuntisf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floatuntisf, .{ .name = "__floatuntisf", .linkage = common.linkage });
+}
+
+fn __floatuntisf(a: u128) callconv(.C) f32 {
+ return intToFloat(f32, a);
+}
lib/compiler_rt/floatuntitf.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__floatuntikf, .{ .name = "__floatuntikf", .linkage = common.linkage });
+ } else {
+ @export(__floatuntitf, .{ .name = "__floatuntitf", .linkage = common.linkage });
+ }
+}
+
+fn __floatuntitf(a: u128) callconv(.C) f128 {
+ return intToFloat(f128, a);
+}
+
+fn __floatuntikf(a: u128) callconv(.C) f128 {
+ return intToFloat(f128, a);
+}
lib/compiler_rt/floatuntixf.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const intToFloat = @import("./int_to_float.zig").intToFloat;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__floatuntixf, .{ .name = "__floatuntixf", .linkage = common.linkage });
+}
+
+pub fn __floatuntixf(a: u128) callconv(.C) f80 {
+ return intToFloat(f80, a);
+}
lib/compiler_rt/floatXiYf.zig
@@ -1,311 +0,0 @@
-const builtin = @import("builtin");
-const std = @import("std");
-const math = std.math;
-const expect = std.testing.expect;
-const arch = builtin.cpu.arch;
-const is_test = builtin.is_test;
-const linkage: std.builtin.GlobalLinkage = if (builtin.is_test) .Internal else .Weak;
-pub const panic = @import("common.zig").panic;
-
-comptime {
- // Integral -> Float Conversion
-
- // Conversion to f32
- @export(__floatsisf, .{ .name = "__floatsisf", .linkage = linkage });
- @export(__floatunsisf, .{ .name = "__floatunsisf", .linkage = linkage });
-
- @export(__floatundisf, .{ .name = "__floatundisf", .linkage = linkage });
- @export(__floatdisf, .{ .name = "__floatdisf", .linkage = linkage });
-
- @export(__floattisf, .{ .name = "__floattisf", .linkage = linkage });
- @export(__floatuntisf, .{ .name = "__floatuntisf", .linkage = linkage });
-
- // Conversion to f64
- @export(__floatsidf, .{ .name = "__floatsidf", .linkage = linkage });
- @export(__floatunsidf, .{ .name = "__floatunsidf", .linkage = linkage });
-
- @export(__floatdidf, .{ .name = "__floatdidf", .linkage = linkage });
- @export(__floatundidf, .{ .name = "__floatundidf", .linkage = linkage });
-
- @export(__floattidf, .{ .name = "__floattidf", .linkage = linkage });
- @export(__floatuntidf, .{ .name = "__floatuntidf", .linkage = linkage });
-
- // Conversion to f80
- @export(__floatsixf, .{ .name = "__floatsixf", .linkage = linkage });
- @export(__floatunsixf, .{ .name = "__floatunsixf", .linkage = linkage });
-
- @export(__floatdixf, .{ .name = "__floatdixf", .linkage = linkage });
- @export(__floatundixf, .{ .name = "__floatundixf", .linkage = linkage });
-
- @export(__floattixf, .{ .name = "__floattixf", .linkage = linkage });
- @export(__floatuntixf, .{ .name = "__floatuntixf", .linkage = linkage });
-
- // Conversion to f128
- @export(__floatsitf, .{ .name = "__floatsitf", .linkage = linkage });
- @export(__floatunsitf, .{ .name = "__floatunsitf", .linkage = linkage });
-
- @export(__floatditf, .{ .name = "__floatditf", .linkage = linkage });
- @export(__floatunditf, .{ .name = "__floatunditf", .linkage = linkage });
-
- @export(__floattitf, .{ .name = "__floattitf", .linkage = linkage });
- @export(__floatuntitf, .{ .name = "__floatuntitf", .linkage = linkage });
-
- if (!is_test) {
- if (arch.isARM() or arch.isThumb()) {
- @export(__aeabi_i2d, .{ .name = "__aeabi_i2d", .linkage = linkage });
- @export(__aeabi_l2d, .{ .name = "__aeabi_l2d", .linkage = linkage });
- @export(__aeabi_l2f, .{ .name = "__aeabi_l2f", .linkage = linkage });
- @export(__aeabi_ui2d, .{ .name = "__aeabi_ui2d", .linkage = linkage });
- @export(__aeabi_ul2d, .{ .name = "__aeabi_ul2d", .linkage = linkage });
- @export(__aeabi_ui2f, .{ .name = "__aeabi_ui2f", .linkage = linkage });
- @export(__aeabi_ul2f, .{ .name = "__aeabi_ul2f", .linkage = linkage });
-
- @export(__aeabi_i2f, .{ .name = "__aeabi_i2f", .linkage = linkage });
- }
-
- if (arch.isPPC() or arch.isPPC64()) {
- @export(__floatsikf, .{ .name = "__floatsikf", .linkage = linkage });
- @export(__floatdikf, .{ .name = "__floatdikf", .linkage = linkage });
- @export(__floatundikf, .{ .name = "__floatundikf", .linkage = linkage });
- @export(__floatunsikf, .{ .name = "__floatunsikf", .linkage = linkage });
- @export(__floatuntikf, .{ .name = "__floatuntikf", .linkage = linkage });
- }
- }
-}
-
-pub fn floatXiYf(comptime T: type, x: anytype) T {
- @setRuntimeSafety(is_test);
-
- if (x == 0) return 0;
-
- // Various constants whose values follow from the type parameters.
- // Any reasonable optimizer will fold and propagate all of these.
- const Z = std.meta.Int(.unsigned, @bitSizeOf(@TypeOf(x)));
- const uT = std.meta.Int(.unsigned, @bitSizeOf(T));
- const inf = math.inf(T);
- const float_bits = @bitSizeOf(T);
- const int_bits = @bitSizeOf(@TypeOf(x));
- const exp_bits = math.floatExponentBits(T);
- const fractional_bits = math.floatFractionalBits(T);
- const exp_bias = math.maxInt(std.meta.Int(.unsigned, exp_bits - 1));
- const implicit_bit = if (T != f80) @as(uT, 1) << fractional_bits else 0;
- const max_exp = exp_bias;
-
- // Sign
- var abs_val = math.absCast(x);
- const sign_bit = if (x < 0) @as(uT, 1) << (float_bits - 1) else 0;
- var result: uT = sign_bit;
-
- // Compute significand
- var exp = int_bits - @clz(Z, abs_val) - 1;
- if (int_bits <= fractional_bits or exp <= fractional_bits) {
- const shift_amt = fractional_bits - @intCast(math.Log2Int(uT), exp);
-
- // Shift up result to line up with the significand - no rounding required
- result = (@intCast(uT, abs_val) << shift_amt);
- result ^= implicit_bit; // Remove implicit integer bit
- } else {
- var shift_amt = @intCast(math.Log2Int(Z), exp - fractional_bits);
- const exact_tie: bool = @ctz(Z, abs_val) == shift_amt - 1;
-
- // Shift down result and remove implicit integer bit
- result = @intCast(uT, (abs_val >> (shift_amt - 1))) ^ (implicit_bit << 1);
-
- // Round result, including round-to-even for exact ties
- result = ((result + 1) >> 1) & ~@as(uT, @boolToInt(exact_tie));
- }
-
- // Compute exponent
- if ((int_bits > max_exp) and (exp > max_exp)) // If exponent too large, overflow to infinity
- return @bitCast(T, sign_bit | @bitCast(uT, inf));
-
- result += (@as(uT, exp) + exp_bias) << math.floatMantissaBits(T);
-
- // If the result included a carry, we need to restore the explicit integer bit
- if (T == f80) result |= 1 << fractional_bits;
-
- return @bitCast(T, sign_bit | result);
-}
-
-// Conversion to f16
-pub fn __floatsihf(a: i32) callconv(.C) f16 {
- return floatXiYf(f16, a);
-}
-
-pub fn __floatunsihf(a: u32) callconv(.C) f16 {
- return floatXiYf(f16, a);
-}
-
-pub fn __floatdihf(a: i64) callconv(.C) f16 {
- return floatXiYf(f16, a);
-}
-
-pub fn __floatundihf(a: u64) callconv(.C) f16 {
- return floatXiYf(f16, a);
-}
-
-pub fn __floattihf(a: i128) callconv(.C) f16 {
- return floatXiYf(f16, a);
-}
-
-pub fn __floatuntihf(a: u128) callconv(.C) f16 {
- return floatXiYf(f16, a);
-}
-
-// Conversion to f32
-pub fn __floatsisf(a: i32) callconv(.C) f32 {
- return floatXiYf(f32, a);
-}
-
-pub fn __floatunsisf(a: u32) callconv(.C) f32 {
- return floatXiYf(f32, a);
-}
-
-pub fn __floatdisf(a: i64) callconv(.C) f32 {
- return floatXiYf(f32, a);
-}
-
-pub fn __floatundisf(a: u64) callconv(.C) f32 {
- return floatXiYf(f32, a);
-}
-
-pub fn __floattisf(a: i128) callconv(.C) f32 {
- return floatXiYf(f32, a);
-}
-
-pub fn __floatuntisf(a: u128) callconv(.C) f32 {
- return floatXiYf(f32, a);
-}
-
-// Conversion to f64
-pub fn __floatsidf(a: i32) callconv(.C) f64 {
- return floatXiYf(f64, a);
-}
-
-pub fn __floatunsidf(a: u32) callconv(.C) f64 {
- return floatXiYf(f64, a);
-}
-
-pub fn __floatdidf(a: i64) callconv(.C) f64 {
- return floatXiYf(f64, a);
-}
-
-pub fn __floatundidf(a: u64) callconv(.C) f64 {
- return floatXiYf(f64, a);
-}
-
-pub fn __floattidf(a: i128) callconv(.C) f64 {
- return floatXiYf(f64, a);
-}
-
-pub fn __floatuntidf(a: u128) callconv(.C) f64 {
- return floatXiYf(f64, a);
-}
-
-// Conversion to f80
-pub fn __floatsixf(a: i32) callconv(.C) f80 {
- return floatXiYf(f80, a);
-}
-
-pub fn __floatunsixf(a: u32) callconv(.C) f80 {
- return floatXiYf(f80, a);
-}
-
-pub fn __floatdixf(a: i64) callconv(.C) f80 {
- return floatXiYf(f80, a);
-}
-
-pub fn __floatundixf(a: u64) callconv(.C) f80 {
- return floatXiYf(f80, a);
-}
-
-pub fn __floattixf(a: i128) callconv(.C) f80 {
- return floatXiYf(f80, a);
-}
-
-pub fn __floatuntixf(a: u128) callconv(.C) f80 {
- return floatXiYf(f80, a);
-}
-
-// Conversion to f128
-pub fn __floatsitf(a: i32) callconv(.C) f128 {
- return floatXiYf(f128, a);
-}
-
-pub fn __floatsikf(a: i32) callconv(.C) f128 {
- return @call(.{ .modifier = .always_inline }, __floatsitf, .{a});
-}
-
-pub fn __floatunsitf(a: u32) callconv(.C) f128 {
- return floatXiYf(f128, a);
-}
-
-pub fn __floatunsikf(a: u32) callconv(.C) f128 {
- return @call(.{ .modifier = .always_inline }, __floatunsitf, .{a});
-}
-
-pub fn __floatditf(a: i64) callconv(.C) f128 {
- return floatXiYf(f128, a);
-}
-
-pub fn __floatdikf(a: i64) callconv(.C) f128 {
- return @call(.{ .modifier = .always_inline }, __floatditf, .{a});
-}
-
-pub fn __floatunditf(a: u64) callconv(.C) f128 {
- return floatXiYf(f128, a);
-}
-
-pub fn __floatundikf(a: u64) callconv(.C) f128 {
- return @call(.{ .modifier = .always_inline }, __floatunditf, .{a});
-}
-
-pub fn __floattitf(a: i128) callconv(.C) f128 {
- return floatXiYf(f128, a);
-}
-
-pub fn __floatuntitf(a: u128) callconv(.C) f128 {
- return floatXiYf(f128, a);
-}
-
-pub fn __floatuntikf(a: u128) callconv(.C) f128 {
- return @call(.{ .modifier = .always_inline }, __floatuntitf, .{a});
-}
-
-// Conversion to f32
-pub fn __aeabi_ui2f(arg: u32) callconv(.AAPCS) f32 {
- return floatXiYf(f32, arg);
-}
-
-pub fn __aeabi_i2f(arg: i32) callconv(.AAPCS) f32 {
- return floatXiYf(f32, arg);
-}
-
-pub fn __aeabi_ul2f(arg: u64) callconv(.AAPCS) f32 {
- return floatXiYf(f32, arg);
-}
-
-pub fn __aeabi_l2f(arg: i64) callconv(.AAPCS) f32 {
- return floatXiYf(f32, arg);
-}
-
-// Conversion to f64
-pub fn __aeabi_ui2d(arg: u32) callconv(.AAPCS) f64 {
- return floatXiYf(f64, arg);
-}
-
-pub fn __aeabi_i2d(arg: i32) callconv(.AAPCS) f64 {
- return floatXiYf(f64, arg);
-}
-
-pub fn __aeabi_ul2d(arg: u64) callconv(.AAPCS) f64 {
- return floatXiYf(f64, arg);
-}
-
-pub fn __aeabi_l2d(arg: i64) callconv(.AAPCS) f64 {
- return floatXiYf(f64, arg);
-}
-
-test {
- _ = @import("floatXiYf_test.zig");
-}
lib/compiler_rt/gedf2.zig
@@ -0,0 +1,36 @@
+///! The quoted behavior definitions are from
+///! https://gcc.gnu.org/onlinedocs/gcc-12.1.0/gccint/Soft-float-library-routines.html#Soft-float-library-routines
+const common = @import("./common.zig");
+const comparef = @import("./comparef.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_dcmpge, .{ .name = "__aeabi_dcmpge", .linkage = common.linkage });
+ @export(__aeabi_dcmpgt, .{ .name = "__aeabi_dcmpgt", .linkage = common.linkage });
+ } else {
+ @export(__gedf2, .{ .name = "__gedf2", .linkage = common.linkage });
+ @export(__gtdf2, .{ .name = "__gtdf2", .linkage = common.linkage });
+ }
+}
+
+/// "These functions return a value greater than or equal to zero if neither
+/// argument is NaN, and a is greater than or equal to b."
+fn __gedf2(a: f64, b: f64) callconv(.C) i32 {
+ return @enumToInt(comparef.cmpf2(f64, comparef.GE, a, b));
+}
+
+/// "These functions return a value greater than zero if neither argument is NaN,
+/// and a is strictly greater than b."
+fn __gtdf2(a: f64, b: f64) callconv(.C) i32 {
+ return __gedf2(a, b);
+}
+
+fn __aeabi_dcmpge(a: f64, b: f64) callconv(.AAPCS) i32 {
+ return comparef.cmpf2(f64, comparef.GE, a, b) != .Less;
+}
+
+fn __aeabi_dcmpgt(a: f64, b: f64) callconv(.AAPCS) i32 {
+ return @boolToInt(comparef.cmpf2(f64, comparef.GE, a, b) == .Greater);
+}
lib/compiler_rt/gesf2.zig
@@ -0,0 +1,36 @@
+///! The quoted behavior definitions are from
+///! https://gcc.gnu.org/onlinedocs/gcc-12.1.0/gccint/Soft-float-library-routines.html#Soft-float-library-routines
+const common = @import("./common.zig");
+const comparef = @import("./comparef.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_fcmpge, .{ .name = "__aeabi_fcmpge", .linkage = common.linkage });
+ @export(__aeabi_fcmpgt, .{ .name = "__aeabi_fcmpgt", .linkage = common.linkage });
+ } else {
+ @export(__gesf2, .{ .name = "__gesf2", .linkage = common.linkage });
+ @export(__gtsf2, .{ .name = "__gtsf2", .linkage = common.linkage });
+ }
+}
+
+/// "These functions return a value greater than or equal to zero if neither
+/// argument is NaN, and a is greater than or equal to b."
+fn __gesf2(a: f32, b: f32) callconv(.C) i32 {
+ return @enumToInt(comparef.cmpf2(f32, comparef.GE, a, b));
+}
+
+/// "These functions return a value greater than zero if neither argument is NaN,
+/// and a is strictly greater than b."
+fn __gtsf2(a: f32, b: f32) callconv(.C) i32 {
+ return __gesf2(a, b);
+}
+
+fn __aeabi_fcmpge(a: f32, b: f32) callconv(.AAPCS) i32 {
+ return comparef.cmpf2(f32, comparef.GE, a, b) != .Less;
+}
+
+fn __aeabi_fcmpgt(a: f32, b: f32) callconv(.AAPCS) i32 {
+ return @boolToInt(comparef.cmpf2(f32, comparef.LE, a, b) == .Greater);
+}
lib/compiler_rt/getf2.zig
@@ -0,0 +1,36 @@
+///! The quoted behavior definitions are from
+///! https://gcc.gnu.org/onlinedocs/gcc-12.1.0/gccint/Soft-float-library-routines.html#Soft-float-library-routines
+const common = @import("./common.zig");
+const comparef = @import("./comparef.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__gekf2, .{ .name = "__gekf2", .linkage = common.linkage });
+ @export(__gtkf2, .{ .name = "__gtkf2", .linkage = common.linkage });
+ } else {
+ @export(__getf2, .{ .name = "__getf2", .linkage = common.linkage });
+ @export(__gttf2, .{ .name = "__gttf2", .linkage = common.linkage });
+ }
+}
+
+/// "These functions return a value greater than or equal to zero if neither
+/// argument is NaN, and a is greater than or equal to b."
+fn __getf2(a: f128, b: f128) callconv(.C) i32 {
+ return @enumToInt(comparef.cmpf2(f128, comparef.GE, a, b));
+}
+
+/// "These functions return a value greater than zero if neither argument is NaN,
+/// and a is strictly greater than b."
+fn __gttf2(a: f128, b: f128) callconv(.C) i32 {
+ return __getf2(a, b);
+}
+
+fn __gekf2(a: f128, b: f128) callconv(.C) i32 {
+ return __getf2(a, b);
+}
+
+fn __gtkf2(a: f128, b: f128) callconv(.C) i32 {
+ return __getf2(a, b);
+}
lib/compiler_rt/gexf2.zig
@@ -0,0 +1,17 @@
+const common = @import("./common.zig");
+const comparef = @import("./comparef.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__gexf2, .{ .name = "__gexf2", .linkage = common.linkage });
+ @export(__gtxf2, .{ .name = "__gtxf2", .linkage = common.linkage });
+}
+
+fn __gexf2(a: f80, b: f80) callconv(.C) i32 {
+ return @enumToInt(comparef.cmp_f80(comparef.GE, a, b));
+}
+
+fn __gtxf2(a: f80, b: f80) callconv(.C) i32 {
+ return __gexf2(a, b);
+}
lib/compiler_rt/int_to_float.zig
@@ -0,0 +1,58 @@
+const Int = @import("std").meta.Int;
+const math = @import("std").math;
+
+pub fn intToFloat(comptime T: type, x: anytype) T {
+ if (x == 0) return 0;
+
+ // Various constants whose values follow from the type parameters.
+ // Any reasonable optimizer will fold and propagate all of these.
+ const Z = Int(.unsigned, @bitSizeOf(@TypeOf(x)));
+ const uT = Int(.unsigned, @bitSizeOf(T));
+ const inf = math.inf(T);
+ const float_bits = @bitSizeOf(T);
+ const int_bits = @bitSizeOf(@TypeOf(x));
+ const exp_bits = math.floatExponentBits(T);
+ const fractional_bits = math.floatFractionalBits(T);
+ const exp_bias = math.maxInt(Int(.unsigned, exp_bits - 1));
+ const implicit_bit = if (T != f80) @as(uT, 1) << fractional_bits else 0;
+ const max_exp = exp_bias;
+
+ // Sign
+ var abs_val = math.absCast(x);
+ const sign_bit = if (x < 0) @as(uT, 1) << (float_bits - 1) else 0;
+ var result: uT = sign_bit;
+
+ // Compute significand
+ var exp = int_bits - @clz(Z, abs_val) - 1;
+ if (int_bits <= fractional_bits or exp <= fractional_bits) {
+ const shift_amt = fractional_bits - @intCast(math.Log2Int(uT), exp);
+
+ // Shift up result to line up with the significand - no rounding required
+ result = (@intCast(uT, abs_val) << shift_amt);
+ result ^= implicit_bit; // Remove implicit integer bit
+ } else {
+ var shift_amt = @intCast(math.Log2Int(Z), exp - fractional_bits);
+ const exact_tie: bool = @ctz(Z, abs_val) == shift_amt - 1;
+
+ // Shift down result and remove implicit integer bit
+ result = @intCast(uT, (abs_val >> (shift_amt - 1))) ^ (implicit_bit << 1);
+
+ // Round result, including round-to-even for exact ties
+ result = ((result + 1) >> 1) & ~@as(uT, @boolToInt(exact_tie));
+ }
+
+ // Compute exponent
+ if ((int_bits > max_exp) and (exp > max_exp)) // If exponent too large, overflow to infinity
+ return @bitCast(T, sign_bit | @bitCast(uT, inf));
+
+ result += (@as(uT, exp) + exp_bias) << math.floatMantissaBits(T);
+
+ // If the result included a carry, we need to restore the explicit integer bit
+ if (T == f80) result |= 1 << fractional_bits;
+
+ return @bitCast(T, sign_bit | result);
+}
+
+test {
+ _ = @import("floatXiYf_test.zig");
+}
lib/compiler_rt/muldf3.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const mulf3 = @import("./mulf3.zig").mulf3;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_dmul, .{ .name = "__aeabi_dmul", .linkage = common.linkage });
+ } else {
+ @export(__muldf3, .{ .name = "__muldf3", .linkage = common.linkage });
+ }
+}
+
+fn __muldf3(a: f64, b: f64) callconv(.C) f64 {
+ return mulf3(f64, a, b);
+}
+
+fn __aeabi_dmul(a: f64, b: f64) callconv(.C) f64 {
+ return mulf3(f64, a, b);
+}
lib/compiler_rt/mulXf3.zig → lib/compiler_rt/mulf3.zig
@@ -1,60 +1,11 @@
-// Ported from:
-//
-// https://github.com/llvm/llvm-project/blob/2ffb1b0413efa9a24eb3c49e710e36f92e2cb50b/compiler-rt/lib/builtins/fp_mul_impl.inc
-
const std = @import("std");
const math = std.math;
const builtin = @import("builtin");
-const arch = builtin.cpu.arch;
-const is_test = builtin.is_test;
-const linkage: std.builtin.GlobalLinkage = if (builtin.is_test) .Internal else .Weak;
-pub const panic = @import("common.zig").panic;
-
-comptime {
- @export(__mulsf3, .{ .name = "__mulsf3", .linkage = linkage });
- @export(__muldf3, .{ .name = "__muldf3", .linkage = linkage });
- @export(__mulxf3, .{ .name = "__mulxf3", .linkage = linkage });
- @export(__multf3, .{ .name = "__multf3", .linkage = linkage });
-
- if (!is_test) {
- if (arch.isARM() or arch.isThumb()) {
- @export(__aeabi_fmul, .{ .name = "__aeabi_fmul", .linkage = linkage });
- @export(__aeabi_dmul, .{ .name = "__aeabi_dmul", .linkage = linkage });
- }
-
- if (arch.isPPC() or arch.isPPC64()) {
- @export(__mulkf3, .{ .name = "__mulkf3", .linkage = linkage });
- }
- }
-}
-
-pub fn __mulkf3(a: f128, b: f128) callconv(.C) f128 {
- return @call(.{ .modifier = .always_inline }, __multf3, .{ a, b });
-}
-pub fn __multf3(a: f128, b: f128) callconv(.C) f128 {
- return mulXf3(f128, a, b);
-}
-pub fn __mulxf3(a: f80, b: f80) callconv(.C) f80 {
- return mulXf3(f80, a, b);
-}
-pub fn __muldf3(a: f64, b: f64) callconv(.C) f64 {
- return mulXf3(f64, a, b);
-}
-pub fn __mulsf3(a: f32, b: f32) callconv(.C) f32 {
- return mulXf3(f32, a, b);
-}
-
-pub fn __aeabi_fmul(a: f32, b: f32) callconv(.C) f32 {
- @setRuntimeSafety(false);
- return @call(.{ .modifier = .always_inline }, __mulsf3, .{ a, b });
-}
-
-pub fn __aeabi_dmul(a: f64, b: f64) callconv(.C) f64 {
- @setRuntimeSafety(false);
- return @call(.{ .modifier = .always_inline }, __muldf3, .{ a, b });
-}
+const common = @import("./common.zig");
-pub fn mulXf3(comptime T: type, a: T, b: T) T {
+/// Ported from:
+/// https://github.com/llvm/llvm-project/blob/2ffb1b0413efa9a24eb3c49e710e36f92e2cb50b/compiler-rt/lib/builtins/fp_mul_impl.inc
+pub inline fn mulf3(comptime T: type, a: T, b: T) T {
@setRuntimeSafety(builtin.is_test);
const typeWidth = @typeInfo(T).Float.bits;
const significandBits = math.floatMantissaBits(T);
@@ -145,7 +96,7 @@ pub fn mulXf3(comptime T: type, a: T, b: T) T {
var productHi: ZSignificand = undefined;
var productLo: ZSignificand = undefined;
const left_align_shift = ZSignificandBits - fractionalBits - 1;
- wideMultiply(ZSignificand, aSignificand, bSignificand << left_align_shift, &productHi, &productLo);
+ common.wideMultiply(ZSignificand, aSignificand, bSignificand << left_align_shift, &productHi, &productLo);
var productExponent: i32 = @intCast(i32, aExponent + bExponent) - exponentBias + scale;
@@ -207,141 +158,9 @@ pub fn mulXf3(comptime T: type, a: T, b: T) T {
return @bitCast(T, result);
}
-fn wideMultiply(comptime Z: type, a: Z, b: Z, hi: *Z, lo: *Z) void {
- @setRuntimeSafety(builtin.is_test);
- switch (Z) {
- u16 => {
- // 16x16 --> 32 bit multiply
- const product = @as(u32, a) * @as(u32, b);
- hi.* = @intCast(u16, product >> 16);
- lo.* = @truncate(u16, product);
- },
- u32 => {
- // 32x32 --> 64 bit multiply
- const product = @as(u64, a) * @as(u64, b);
- hi.* = @intCast(u32, product >> 32);
- lo.* = @truncate(u32, product);
- },
- u64 => {
- const S = struct {
- fn loWord(x: u64) u64 {
- return @truncate(u32, x);
- }
- fn hiWord(x: u64) u64 {
- return @intCast(u32, x >> 32);
- }
- };
- // 64x64 -> 128 wide multiply for platforms that don't have such an operation;
- // many 64-bit platforms have this operation, but they tend to have hardware
- // floating-point, so we don't bother with a special case for them here.
- // Each of the component 32x32 -> 64 products
- const plolo: u64 = S.loWord(a) * S.loWord(b);
- const plohi: u64 = S.loWord(a) * S.hiWord(b);
- const philo: u64 = S.hiWord(a) * S.loWord(b);
- const phihi: u64 = S.hiWord(a) * S.hiWord(b);
- // Sum terms that contribute to lo in a way that allows us to get the carry
- const r0: u64 = S.loWord(plolo);
- const r1: u64 = S.hiWord(plolo) +% S.loWord(plohi) +% S.loWord(philo);
- lo.* = r0 +% (r1 << 32);
- // Sum terms contributing to hi with the carry from lo
- hi.* = S.hiWord(plohi) +% S.hiWord(philo) +% S.hiWord(r1) +% phihi;
- },
- u128 => {
- const Word_LoMask = @as(u64, 0x00000000ffffffff);
- const Word_HiMask = @as(u64, 0xffffffff00000000);
- const Word_FullMask = @as(u64, 0xffffffffffffffff);
- const S = struct {
- fn Word_1(x: u128) u64 {
- return @truncate(u32, x >> 96);
- }
- fn Word_2(x: u128) u64 {
- return @truncate(u32, x >> 64);
- }
- fn Word_3(x: u128) u64 {
- return @truncate(u32, x >> 32);
- }
- fn Word_4(x: u128) u64 {
- return @truncate(u32, x);
- }
- };
- // 128x128 -> 256 wide multiply for platforms that don't have such an operation;
- // many 64-bit platforms have this operation, but they tend to have hardware
- // floating-point, so we don't bother with a special case for them here.
-
- const product11: u64 = S.Word_1(a) * S.Word_1(b);
- const product12: u64 = S.Word_1(a) * S.Word_2(b);
- const product13: u64 = S.Word_1(a) * S.Word_3(b);
- const product14: u64 = S.Word_1(a) * S.Word_4(b);
- const product21: u64 = S.Word_2(a) * S.Word_1(b);
- const product22: u64 = S.Word_2(a) * S.Word_2(b);
- const product23: u64 = S.Word_2(a) * S.Word_3(b);
- const product24: u64 = S.Word_2(a) * S.Word_4(b);
- const product31: u64 = S.Word_3(a) * S.Word_1(b);
- const product32: u64 = S.Word_3(a) * S.Word_2(b);
- const product33: u64 = S.Word_3(a) * S.Word_3(b);
- const product34: u64 = S.Word_3(a) * S.Word_4(b);
- const product41: u64 = S.Word_4(a) * S.Word_1(b);
- const product42: u64 = S.Word_4(a) * S.Word_2(b);
- const product43: u64 = S.Word_4(a) * S.Word_3(b);
- const product44: u64 = S.Word_4(a) * S.Word_4(b);
-
- const sum0: u128 = @as(u128, product44);
- const sum1: u128 = @as(u128, product34) +%
- @as(u128, product43);
- const sum2: u128 = @as(u128, product24) +%
- @as(u128, product33) +%
- @as(u128, product42);
- const sum3: u128 = @as(u128, product14) +%
- @as(u128, product23) +%
- @as(u128, product32) +%
- @as(u128, product41);
- const sum4: u128 = @as(u128, product13) +%
- @as(u128, product22) +%
- @as(u128, product31);
- const sum5: u128 = @as(u128, product12) +%
- @as(u128, product21);
- const sum6: u128 = @as(u128, product11);
-
- const r0: u128 = (sum0 & Word_FullMask) +%
- ((sum1 & Word_LoMask) << 32);
- const r1: u128 = (sum0 >> 64) +%
- ((sum1 >> 32) & Word_FullMask) +%
- (sum2 & Word_FullMask) +%
- ((sum3 << 32) & Word_HiMask);
-
- lo.* = r0 +% (r1 << 64);
- hi.* = (r1 >> 64) +%
- (sum1 >> 96) +%
- (sum2 >> 64) +%
- (sum3 >> 32) +%
- sum4 +%
- (sum5 << 32) +%
- (sum6 << 64);
- },
- else => @compileError("unsupported"),
- }
-}
-
-/// Returns a power-of-two integer type that is large enough to contain
-/// the significand of T, including an explicit integer bit
-fn PowerOfTwoSignificandZ(comptime T: type) type {
- const bits = math.ceilPowerOfTwoAssert(u16, math.floatFractionalBits(T) + 1);
- return std.meta.Int(.unsigned, bits);
-}
-
-fn normalize(comptime T: type, significand: *PowerOfTwoSignificandZ(T)) i32 {
- @setRuntimeSafety(builtin.is_test);
- const Z = PowerOfTwoSignificandZ(T);
- const integerBit = @as(Z, 1) << math.floatFractionalBits(T);
-
- const shift = @clz(Z, significand.*) - @clz(Z, integerBit);
- significand.* <<= @intCast(math.Log2Int(Z), shift);
- return @as(i32, 1) - shift;
-}
-
-// Returns `true` if the right shift is inexact (i.e. any bit shifted out is non-zero)
-//
-// This is analogous to an shr version of `@shlWithOverflow`
+/// Returns `true` if the right shift is inexact (i.e. any bit shifted out is non-zero)
+///
+/// This is analogous to an shr version of `@shlWithOverflow`
fn wideShrWithTruncation(comptime Z: type, hi: *Z, lo: *Z, count: u32) bool {
@setRuntimeSafety(builtin.is_test);
const typeWidth = @typeInfo(Z).Int.bits;
@@ -363,6 +182,22 @@ fn wideShrWithTruncation(comptime Z: type, hi: *Z, lo: *Z, count: u32) bool {
return inexact;
}
+fn normalize(comptime T: type, significand: *PowerOfTwoSignificandZ(T)) i32 {
+ const Z = PowerOfTwoSignificandZ(T);
+ const integerBit = @as(Z, 1) << math.floatFractionalBits(T);
+
+ const shift = @clz(Z, significand.*) - @clz(Z, integerBit);
+ significand.* <<= @intCast(math.Log2Int(Z), shift);
+ return @as(i32, 1) - shift;
+}
+
+/// Returns a power-of-two integer type that is large enough to contain
+/// the significand of T, including an explicit integer bit
+fn PowerOfTwoSignificandZ(comptime T: type) type {
+ const bits = math.ceilPowerOfTwoAssert(u16, math.floatFractionalBits(T) + 1);
+ return std.meta.Int(.unsigned, bits);
+}
+
test {
- _ = @import("mulXf3_test.zig");
+ _ = @import("mulf3_test.zig");
}
lib/compiler_rt/mulXf3_test.zig → lib/compiler_rt/mulf3_test.zig
@@ -7,10 +7,10 @@ const math = std.math;
const qnan128 = @bitCast(f128, @as(u128, 0x7fff800000000000) << 64);
const inf128 = @bitCast(f128, @as(u128, 0x7fff000000000000) << 64);
-const __multf3 = @import("mulXf3.zig").__multf3;
-const __mulxf3 = @import("mulXf3.zig").__mulxf3;
-const __muldf3 = @import("mulXf3.zig").__muldf3;
-const __mulsf3 = @import("mulXf3.zig").__mulsf3;
+const __multf3 = @import("mulf3.zig").__multf3;
+const __mulxf3 = @import("mulf3.zig").__mulxf3;
+const __muldf3 = @import("mulf3.zig").__muldf3;
+const __mulsf3 = @import("mulf3.zig").__mulsf3;
// return true if equal
// use two 64-bit integers intead of one 128-bit integer
lib/compiler_rt/mulsf3.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const mulf3 = @import("./mulf3.zig").mulf3;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_fmul, .{ .name = "__aeabi_fmul", .linkage = common.linkage });
+ } else {
+ @export(__mulsf3, .{ .name = "__mulsf3", .linkage = common.linkage });
+ }
+}
+
+fn __mulsf3(a: f32, b: f32) callconv(.C) f32 {
+ return mulf3(f32, a, b);
+}
+
+fn __aeabi_fmul(a: f32, b: f32) callconv(.C) f32 {
+ return mulf3(f32, a, b);
+}
lib/compiler_rt/multf3.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const mulf3 = @import("./mulf3.zig").mulf3;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__mulkf3, .{ .name = "__mulkf3", .linkage = common.linkage });
+ } else {
+ @export(__multf3, .{ .name = "__multf3", .linkage = common.linkage });
+ }
+}
+
+fn __multf3(a: f128, b: f128) callconv(.C) f128 {
+ return mulf3(f128, a, b);
+}
+
+fn __mulkf3(a: f128, b: f128) callconv(.C) f128 {
+ return mulf3(f128, a, b);
+}
lib/compiler_rt/mulxf3.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const mulf3 = @import("./mulf3.zig").mulf3;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__mulxf3, .{ .name = "__mulxf3", .linkage = common.linkage });
+}
+
+pub fn __mulxf3(a: f80, b: f80) callconv(.C) f80 {
+ return mulf3(f80, a, b);
+}
lib/compiler_rt/sparc.zig
@@ -50,7 +50,7 @@ const FCMP = enum(i32) {
// Basic arithmetic
pub fn _Qp_add(c: *f128, a: *f128, b: *f128) callconv(.C) void {
- c.* = @import("addXf3.zig").__addtf3(a.*, b.*);
+ c.* = @import("addf3.zig").__addtf3(a.*, b.*);
}
pub fn _Qp_div(c: *f128, a: *f128, b: *f128) callconv(.C) void {
@@ -58,11 +58,11 @@ pub fn _Qp_div(c: *f128, a: *f128, b: *f128) callconv(.C) void {
}
pub fn _Qp_mul(c: *f128, a: *f128, b: *f128) callconv(.C) void {
- c.* = @import("mulXf3.zig").__multf3(a.*, b.*);
+ c.* = @import("mulf3.zig").__multf3(a.*, b.*);
}
pub fn _Qp_sub(c: *f128, a: *f128, b: *f128) callconv(.C) void {
- c.* = @import("addXf3.zig").__subtf3(a.*, b.*);
+ c.* = @import("addf3.zig").__subtf3(a.*, b.*);
}
// Comparison
lib/compiler_rt/subdf3.zig
@@ -0,0 +1,21 @@
+const common = @import("./common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_dsub, .{ .name = "__aeabi_dsub", .linkage = common.linkage });
+ } else {
+ @export(__subdf3, .{ .name = "__subdf3", .linkage = common.linkage });
+ }
+}
+
+fn __subdf3(a: f64, b: f64) callconv(.C) f64 {
+ const neg_b = @bitCast(f64, @bitCast(u64, b) ^ (@as(u64, 1) << 63));
+ return a + neg_b;
+}
+
+fn __aeabi_dsub(a: f64, b: f64) callconv(.AAPCS) f64 {
+ const neg_b = @bitCast(f64, @bitCast(u64, b) ^ (@as(u64, 1) << 63));
+ return a + neg_b;
+}
lib/compiler_rt/subsf3.zig
@@ -0,0 +1,21 @@
+const common = @import("./common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_fsub, .{ .name = "__aeabi_fsub", .linkage = common.linkage });
+ } else {
+ @export(__subsf3, .{ .name = "__subsf3", .linkage = common.linkage });
+ }
+}
+
+fn __subsf3(a: f32, b: f32) callconv(.C) f32 {
+ const neg_b = @bitCast(f32, @bitCast(u32, b) ^ (@as(u32, 1) << 31));
+ return a + neg_b;
+}
+
+fn __aeabi_fsub(a: f32, b: f32) callconv(.AAPCS) f32 {
+ const neg_b = @bitCast(f32, @bitCast(u32, b) ^ (@as(u32, 1) << 31));
+ return a + neg_b;
+}
lib/compiler_rt/subtf3.zig
@@ -0,0 +1,21 @@
+const common = @import("./common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__subkf3, .{ .name = "__subkf3", .linkage = common.linkage });
+ } else {
+ @export(__subtf3, .{ .name = "__subtf3", .linkage = common.linkage });
+ }
+}
+
+fn __subtf3(a: f128, b: f128) callconv(.C) f128 {
+ const neg_b = @bitCast(f128, @bitCast(u128, b) ^ (@as(u128, 1) << 127));
+ return a + neg_b;
+}
+
+fn __subkf3(a: f128, b: f128) callconv(.C) f128 {
+ const neg_b = @bitCast(f128, @bitCast(u128, b) ^ (@as(u128, 1) << 127));
+ return a + neg_b;
+}
lib/compiler_rt/subxf3.zig
@@ -0,0 +1,15 @@
+const std = @import("std");
+const common = @import("./common.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__subxf3, .{ .name = "__subxf3", .linkage = common.linkage });
+}
+
+fn __subxf3(a: f80, b: f80) callconv(.C) f80 {
+ var b_rep = std.math.break_f80(b);
+ b_rep.exp ^= 0x8000;
+ const neg_b = std.math.make_f80(b_rep);
+ return a + neg_b;
+}
lib/compiler_rt/trunc_f80.zig
@@ -1,183 +0,0 @@
-const std = @import("std");
-const builtin = @import("builtin");
-const arch = builtin.cpu.arch;
-const testing = std.testing;
-const is_test = builtin.is_test;
-const linkage: std.builtin.GlobalLinkage = if (builtin.is_test) .Internal else .Weak;
-pub const panic = @import("common.zig").panic;
-
-comptime {
- @export(__truncxfhf2, .{ .name = "__truncxfhf2", .linkage = linkage });
- @export(__truncxfsf2, .{ .name = "__truncxfsf2", .linkage = linkage });
- @export(__truncxfdf2, .{ .name = "__truncxfdf2", .linkage = linkage });
- @export(__trunctfxf2, .{ .name = "__trunctfxf2", .linkage = linkage });
-}
-
-// AArch64 is the only ABI (at the moment) to support f16 arguments without the
-// need for extending them to wider fp types.
-const F16T = if (arch.isAARCH64()) f16 else u16;
-
-pub fn __truncxfhf2(a: f80) callconv(.C) F16T {
- return @bitCast(F16T, trunc(f16, a));
-}
-
-pub fn __truncxfsf2(a: f80) callconv(.C) f32 {
- return trunc(f32, a);
-}
-
-pub fn __truncxfdf2(a: f80) callconv(.C) f64 {
- return trunc(f64, a);
-}
-
-inline fn trunc(comptime dst_t: type, a: f80) dst_t {
- @setRuntimeSafety(builtin.is_test);
-
- const dst_rep_t = std.meta.Int(.unsigned, @typeInfo(dst_t).Float.bits);
- const src_sig_bits = std.math.floatMantissaBits(f80) - 1; // -1 for the integer bit
- const dst_sig_bits = std.math.floatMantissaBits(dst_t);
-
- const src_exp_bias = 16383;
-
- const round_mask = (1 << (src_sig_bits - dst_sig_bits)) - 1;
- const halfway = 1 << (src_sig_bits - dst_sig_bits - 1);
-
- const dst_bits = @typeInfo(dst_t).Float.bits;
- const dst_exp_bits = dst_bits - dst_sig_bits - 1;
- const dst_inf_exp = (1 << dst_exp_bits) - 1;
- const dst_exp_bias = dst_inf_exp >> 1;
-
- const underflow = src_exp_bias + 1 - dst_exp_bias;
- const overflow = src_exp_bias + dst_inf_exp - dst_exp_bias;
-
- const dst_qnan = 1 << (dst_sig_bits - 1);
- const dst_nan_mask = dst_qnan - 1;
-
- // Break a into a sign and representation of the absolute value
- var a_rep = std.math.break_f80(a);
- const sign = a_rep.exp & 0x8000;
- a_rep.exp &= 0x7FFF;
- a_rep.fraction &= 0x7FFFFFFFFFFFFFFF;
- var abs_result: dst_rep_t = undefined;
-
- if (a_rep.exp -% underflow < a_rep.exp -% overflow) {
- // The exponent of a is within the range of normal numbers in the
- // destination format. We can convert by simply right-shifting with
- // rounding and adjusting the exponent.
- abs_result = @as(dst_rep_t, a_rep.exp) << dst_sig_bits;
- abs_result |= @truncate(dst_rep_t, a_rep.fraction >> (src_sig_bits - dst_sig_bits));
- abs_result -%= @as(dst_rep_t, src_exp_bias - dst_exp_bias) << dst_sig_bits;
-
- const round_bits = a_rep.fraction & round_mask;
- if (round_bits > halfway) {
- // Round to nearest
- abs_result += 1;
- } else if (round_bits == halfway) {
- // Ties to even
- abs_result += abs_result & 1;
- }
- } else if (a_rep.exp == 0x7FFF and a_rep.fraction != 0) {
- // a is NaN.
- // Conjure the result by beginning with infinity, setting the qNaN
- // bit and inserting the (truncated) trailing NaN field.
- abs_result = @intCast(dst_rep_t, dst_inf_exp) << dst_sig_bits;
- abs_result |= dst_qnan;
- abs_result |= @intCast(dst_rep_t, (a_rep.fraction >> (src_sig_bits - dst_sig_bits)) & dst_nan_mask);
- } else if (a_rep.exp >= overflow) {
- // a overflows to infinity.
- abs_result = @intCast(dst_rep_t, dst_inf_exp) << dst_sig_bits;
- } else {
- // a underflows on conversion to the destination type or is an exact
- // zero. The result may be a denormal or zero. Extract the exponent
- // to get the shift amount for the denormalization.
- const shift = src_exp_bias - dst_exp_bias - a_rep.exp;
-
- // Right shift by the denormalization amount with sticky.
- if (shift > src_sig_bits) {
- abs_result = 0;
- } else {
- const sticky = @boolToInt(a_rep.fraction << @intCast(u6, shift) != 0);
- const denormalized_significand = a_rep.fraction >> @intCast(u6, shift) | sticky;
- abs_result = @intCast(dst_rep_t, denormalized_significand >> (src_sig_bits - dst_sig_bits));
- const round_bits = denormalized_significand & round_mask;
- if (round_bits > halfway) {
- // Round to nearest
- abs_result += 1;
- } else if (round_bits == halfway) {
- // Ties to even
- abs_result += abs_result & 1;
- }
- }
- }
-
- const result align(@alignOf(dst_t)) = abs_result | @as(dst_rep_t, sign) << dst_bits - 16;
- return @bitCast(dst_t, result);
-}
-
-pub fn __trunctfxf2(a: f128) callconv(.C) f80 {
- const src_sig_bits = std.math.floatMantissaBits(f128);
- const dst_sig_bits = std.math.floatMantissaBits(f80) - 1; // -1 for the integer bit
-
- // Various constants whose values follow from the type parameters.
- // Any reasonable optimizer will fold and propagate all of these.
- const src_bits = @typeInfo(f128).Float.bits;
- const src_exp_bits = src_bits - src_sig_bits - 1;
- const src_inf_exp = 0x7FFF;
-
- const src_inf = src_inf_exp << src_sig_bits;
- const src_sign_mask = 1 << (src_sig_bits + src_exp_bits);
- const src_abs_mask = src_sign_mask - 1;
- const round_mask = (1 << (src_sig_bits - dst_sig_bits)) - 1;
- const halfway = 1 << (src_sig_bits - dst_sig_bits - 1);
-
- // Break a into a sign and representation of the absolute value
- const a_rep = @bitCast(u128, a);
- const a_abs = a_rep & src_abs_mask;
- const sign: u16 = if (a_rep & src_sign_mask != 0) 0x8000 else 0;
- const integer_bit = 1 << 63;
-
- var res: std.math.F80 = undefined;
-
- if (a_abs > src_inf) {
- // a is NaN.
- // Conjure the result by beginning with infinity, setting the qNaN
- // bit and inserting the (truncated) trailing NaN field.
- res.exp = 0x7fff;
- res.fraction = 0x8000000000000000;
- res.fraction |= @truncate(u64, a_abs >> (src_sig_bits - dst_sig_bits));
- } else {
- // The exponent of a is within the range of normal numbers in the
- // destination format. We can convert by simply right-shifting with
- // rounding, adding the explicit integer bit, and adjusting the exponent
- res.fraction = @truncate(u64, a_abs >> (src_sig_bits - dst_sig_bits)) | integer_bit;
- res.exp = @truncate(u16, a_abs >> src_sig_bits);
-
- const round_bits = a_abs & round_mask;
- if (round_bits > halfway) {
- // Round to nearest
- const carry = @boolToInt(@addWithOverflow(u64, res.fraction, 1, &res.fraction));
- res.exp += carry;
- res.fraction |= @as(u64, carry) << 63; // Restore integer bit after carry
- } else if (round_bits == halfway) {
- // Ties to even
- const carry = @boolToInt(@addWithOverflow(u64, res.fraction, res.fraction & 1, &res.fraction));
- res.exp += carry;
- res.fraction |= @as(u64, carry) << 63; // Restore integer bit after carry
- }
- if (res.exp == 0) res.fraction &= ~@as(u64, integer_bit); // Remove integer bit for de-normals
- }
-
- res.exp |= sign;
- return std.math.make_f80(res);
-}
-
-fn test__trunctfxf2(a: f128, expected: f80) !void {
- const x = __trunctfxf2(a);
- try testing.expect(x == expected);
-}
-
-test {
- try test__trunctfxf2(1.5, 1.5);
- try test__trunctfxf2(2.5, 2.5);
- try test__trunctfxf2(-2.5, -2.5);
- try test__trunctfxf2(0.0, 0.0);
-}
lib/compiler_rt/truncdfhf2.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const truncf = @import("./truncf.zig").truncf;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_d2h, .{ .name = "__aeabi_d2h", .linkage = common.linkage });
+ } else {
+ @export(__truncdfhf2, .{ .name = "__truncdfhf2", .linkage = common.linkage });
+ }
+}
+
+fn __truncdfhf2(a: f64) callconv(.C) common.F16T {
+ return @bitCast(common.F16T, truncf(f16, f64, a));
+}
+
+fn __aeabi_d2h(a: f64) callconv(.AAPCS) u16 {
+ return @bitCast(common.F16T, truncf(f16, f64, a));
+}
lib/compiler_rt/truncdfsf2.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const truncf = @import("./truncf.zig").truncf;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_d2f, .{ .name = "__aeabi_d2f", .linkage = common.linkage });
+ } else {
+ @export(__truncdfsf2, .{ .name = "__truncdfsf2", .linkage = common.linkage });
+ }
+}
+
+fn __truncdfsf2(a: f64) callconv(.C) f32 {
+ return truncf(f32, f64, a);
+}
+
+fn __aeabi_d2f(a: f64) callconv(.AAPCS) f32 {
+ return truncf(f32, f64, a);
+}
lib/compiler_rt/truncXfYf2.zig → lib/compiler_rt/truncf.zig
@@ -1,93 +1,6 @@
const std = @import("std");
-const builtin = @import("builtin");
-const arch = builtin.cpu.arch;
-const is_test = builtin.is_test;
-const linkage: std.builtin.GlobalLinkage = if (builtin.is_test) .Internal else .Weak;
-pub const panic = @import("common.zig").panic;
-
-comptime {
- @export(__truncdfhf2, .{ .name = "__truncdfhf2", .linkage = linkage });
- @export(__trunctfhf2, .{ .name = "__trunctfhf2", .linkage = linkage });
- @export(__trunctfdf2, .{ .name = "__trunctfdf2", .linkage = linkage });
- @export(__trunctfsf2, .{ .name = "__trunctfsf2", .linkage = linkage });
-
- @export(__truncdfsf2, .{ .name = "__truncdfsf2", .linkage = linkage });
- @export(__truncsfhf2, .{ .name = "__truncsfhf2", .linkage = linkage });
-
- if (!is_test) {
- @export(__gnu_f2h_ieee, .{ .name = "__gnu_f2h_ieee", .linkage = linkage });
-
- if (arch.isARM() or arch.isThumb()) {
- @export(__aeabi_d2h, .{ .name = "__aeabi_d2h", .linkage = linkage });
- @export(__aeabi_f2h, .{ .name = "__aeabi_f2h", .linkage = linkage });
- @export(__aeabi_d2f, .{ .name = "__aeabi_d2f", .linkage = linkage });
- }
-
- if (arch.isPPC() or arch.isPPC64()) {
- @export(__trunckfsf2, .{ .name = "__trunckfsf2", .linkage = linkage });
- @export(__trunckfdf2, .{ .name = "__trunckfdf2", .linkage = linkage });
- }
- }
-}
-// AArch64 is the only ABI (at the moment) to support f16 arguments without the
-// need for extending them to wider fp types.
-// TODO remove this; do this type selection in the language rather than
-// here in compiler-rt.
-const F16T = if (arch.isAARCH64()) f16 else u16;
-
-pub fn __truncsfhf2(a: f32) callconv(.C) F16T {
- return @bitCast(F16T, truncXfYf2(f16, f32, a));
-}
-
-pub fn __gnu_f2h_ieee(a: f32) callconv(.C) F16T {
- return @call(.{ .modifier = .always_inline }, __truncsfhf2, .{a});
-}
-
-pub fn __truncdfhf2(a: f64) callconv(.C) F16T {
- return @bitCast(F16T, truncXfYf2(f16, f64, a));
-}
-
-pub fn __trunctfhf2(a: f128) callconv(.C) F16T {
- return @bitCast(F16T, truncXfYf2(f16, f128, a));
-}
-
-pub fn __trunctfsf2(a: f128) callconv(.C) f32 {
- return truncXfYf2(f32, f128, a);
-}
-
-pub fn __trunckfsf2(a: f128) callconv(.C) f32 {
- return truncXfYf2(f32, f128, a);
-}
-
-pub fn __trunctfdf2(a: f128) callconv(.C) f64 {
- return truncXfYf2(f64, f128, a);
-}
-
-pub fn __trunckfdf2(a: f128) callconv(.C) f64 {
- return truncXfYf2(f64, f128, a);
-}
-
-pub fn __truncdfsf2(a: f64) callconv(.C) f32 {
- return truncXfYf2(f32, f64, a);
-}
-
-pub fn __aeabi_d2f(a: f64) callconv(.AAPCS) f32 {
- @setRuntimeSafety(false);
- return truncXfYf2(f32, f64, a);
-}
-
-pub fn __aeabi_d2h(a: f64) callconv(.AAPCS) u16 {
- @setRuntimeSafety(false);
- return @bitCast(F16T, truncXfYf2(f16, f64, a));
-}
-
-pub fn __aeabi_f2h(a: f32) callconv(.AAPCS) u16 {
- @setRuntimeSafety(false);
- return @bitCast(F16T, truncXfYf2(f16, f32, a));
-}
-
-inline fn truncXfYf2(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t {
+pub inline fn truncf(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t {
const src_rep_t = std.meta.Int(.unsigned, @typeInfo(src_t).Float.bits);
const dst_rep_t = std.meta.Int(.unsigned, @typeInfo(dst_t).Float.bits);
const srcSigBits = std.math.floatMantissaBits(src_t);
@@ -187,6 +100,88 @@ inline fn truncXfYf2(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t
return @bitCast(dst_t, result);
}
+pub inline fn trunc_f80(comptime dst_t: type, a: f80) dst_t {
+ const dst_rep_t = std.meta.Int(.unsigned, @typeInfo(dst_t).Float.bits);
+ const src_sig_bits = std.math.floatMantissaBits(f80) - 1; // -1 for the integer bit
+ const dst_sig_bits = std.math.floatMantissaBits(dst_t);
+
+ const src_exp_bias = 16383;
+
+ const round_mask = (1 << (src_sig_bits - dst_sig_bits)) - 1;
+ const halfway = 1 << (src_sig_bits - dst_sig_bits - 1);
+
+ const dst_bits = @typeInfo(dst_t).Float.bits;
+ const dst_exp_bits = dst_bits - dst_sig_bits - 1;
+ const dst_inf_exp = (1 << dst_exp_bits) - 1;
+ const dst_exp_bias = dst_inf_exp >> 1;
+
+ const underflow = src_exp_bias + 1 - dst_exp_bias;
+ const overflow = src_exp_bias + dst_inf_exp - dst_exp_bias;
+
+ const dst_qnan = 1 << (dst_sig_bits - 1);
+ const dst_nan_mask = dst_qnan - 1;
+
+ // Break a into a sign and representation of the absolute value
+ var a_rep = std.math.break_f80(a);
+ const sign = a_rep.exp & 0x8000;
+ a_rep.exp &= 0x7FFF;
+ a_rep.fraction &= 0x7FFFFFFFFFFFFFFF;
+ var abs_result: dst_rep_t = undefined;
+
+ if (a_rep.exp -% underflow < a_rep.exp -% overflow) {
+ // The exponent of a is within the range of normal numbers in the
+ // destination format. We can convert by simply right-shifting with
+ // rounding and adjusting the exponent.
+ abs_result = @as(dst_rep_t, a_rep.exp) << dst_sig_bits;
+ abs_result |= @truncate(dst_rep_t, a_rep.fraction >> (src_sig_bits - dst_sig_bits));
+ abs_result -%= @as(dst_rep_t, src_exp_bias - dst_exp_bias) << dst_sig_bits;
+
+ const round_bits = a_rep.fraction & round_mask;
+ if (round_bits > halfway) {
+ // Round to nearest
+ abs_result += 1;
+ } else if (round_bits == halfway) {
+ // Ties to even
+ abs_result += abs_result & 1;
+ }
+ } else if (a_rep.exp == 0x7FFF and a_rep.fraction != 0) {
+ // a is NaN.
+ // Conjure the result by beginning with infinity, setting the qNaN
+ // bit and inserting the (truncated) trailing NaN field.
+ abs_result = @intCast(dst_rep_t, dst_inf_exp) << dst_sig_bits;
+ abs_result |= dst_qnan;
+ abs_result |= @intCast(dst_rep_t, (a_rep.fraction >> (src_sig_bits - dst_sig_bits)) & dst_nan_mask);
+ } else if (a_rep.exp >= overflow) {
+ // a overflows to infinity.
+ abs_result = @intCast(dst_rep_t, dst_inf_exp) << dst_sig_bits;
+ } else {
+ // a underflows on conversion to the destination type or is an exact
+ // zero. The result may be a denormal or zero. Extract the exponent
+ // to get the shift amount for the denormalization.
+ const shift = src_exp_bias - dst_exp_bias - a_rep.exp;
+
+ // Right shift by the denormalization amount with sticky.
+ if (shift > src_sig_bits) {
+ abs_result = 0;
+ } else {
+ const sticky = @boolToInt(a_rep.fraction << @intCast(u6, shift) != 0);
+ const denormalized_significand = a_rep.fraction >> @intCast(u6, shift) | sticky;
+ abs_result = @intCast(dst_rep_t, denormalized_significand >> (src_sig_bits - dst_sig_bits));
+ const round_bits = denormalized_significand & round_mask;
+ if (round_bits > halfway) {
+ // Round to nearest
+ abs_result += 1;
+ } else if (round_bits == halfway) {
+ // Ties to even
+ abs_result += abs_result & 1;
+ }
+ }
+ }
+
+ const result align(@alignOf(dst_t)) = abs_result | @as(dst_rep_t, sign) << dst_bits - 16;
+ return @bitCast(dst_t, result);
+}
+
test {
- _ = @import("truncXfYf2_test.zig");
+ _ = @import("truncf_test.zig");
}
lib/compiler_rt/truncXfYf2_test.zig → lib/compiler_rt/truncf_test.zig
@@ -1,5 +1,7 @@
const std = @import("std");
+const testing = std.testing;
const __truncsfhf2 = @import("truncXfYf2.zig").__truncsfhf2;
+const __trunctfxf2 = @import("trunctfxf2.zig").__trunctfxf2;
fn test__truncsfhf2(a: u32, expected: u16) !void {
const actual = @bitCast(u16, __truncsfhf2(@bitCast(f32, a)));
@@ -294,3 +296,15 @@ test "trunctfhf2" {
test__trunctfhf2(0x1.234eebb5faa678f4488693abcdefp+453, 0x7c00);
test__trunctfhf2(0x1.edcba9bb8c76a5a43dd21f334634p-43, 0x0);
}
+
+test "trunctfxf2" {
+ try test__trunctfxf2(1.5, 1.5);
+ try test__trunctfxf2(2.5, 2.5);
+ try test__trunctfxf2(-2.5, -2.5);
+ try test__trunctfxf2(0.0, 0.0);
+}
+
+fn test__trunctfxf2(a: f128, expected: f80) !void {
+ const x = __trunctfxf2(a);
+ try testing.expect(x == expected);
+}
lib/compiler_rt/truncsfhf2.zig
@@ -0,0 +1,26 @@
+const common = @import("./common.zig");
+const truncf = @import("./truncf.zig").truncf;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_gnu_abi) {
+ @export(__gnu_f2h_ieee, .{ .name = "__gnu_f2h_ieee", .linkage = common.linkage });
+ } else if (common.want_aeabi) {
+ @export(__aeabi_f2h, .{ .name = "__aeabi_f2h", .linkage = common.linkage });
+ } else {
+ @export(__truncsfhf2, .{ .name = "__truncsfhf2", .linkage = common.linkage });
+ }
+}
+
+fn __truncsfhf2(a: f32) callconv(.C) common.F16T {
+ return @bitCast(common.F16T, truncf(f16, f32, a));
+}
+
+fn __gnu_f2h_ieee(a: f32) callconv(.C) common.F16T {
+ return @bitCast(common.F16T, truncf(f16, f32, a));
+}
+
+fn __aeabi_f2h(a: f32) callconv(.AAPCS) u16 {
+ return @bitCast(common.F16T, truncf(f16, f32, a));
+}
lib/compiler_rt/trunctfdf2.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const truncf = @import("./truncf.zig").truncf;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__trunckfdf2, .{ .name = "__trunckfdf2", .linkage = common.linkage });
+ } else {
+ @export(__trunctfdf2, .{ .name = "__trunctfdf2", .linkage = common.linkage });
+ }
+}
+
+fn __trunctfdf2(a: f128) callconv(.C) f64 {
+ return truncf(f64, f128, a);
+}
+
+fn __trunckfdf2(a: f128) callconv(.C) f64 {
+ return truncf(f64, f128, a);
+}
lib/compiler_rt/trunctfhf2.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const truncf = @import("./truncf.zig").truncf;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__trunctfhf2, .{ .name = "__trunctfhf2", .linkage = common.linkage });
+}
+
+fn __trunctfhf2(a: f128) callconv(.C) common.F16T {
+ return @bitCast(common.F16T, truncf(f16, f128, a));
+}
lib/compiler_rt/trunctfsf2.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const truncf = @import("./truncf.zig").truncf;
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__trunckfsf2, .{ .name = "__trunckfsf2", .linkage = common.linkage });
+ } else {
+ @export(__trunctfsf2, .{ .name = "__trunctfsf2", .linkage = common.linkage });
+ }
+}
+
+fn __trunctfsf2(a: f128) callconv(.C) f32 {
+ return truncf(f32, f128, a);
+}
+
+fn __trunckfsf2(a: f128) callconv(.C) f32 {
+ return truncf(f32, f128, a);
+}
lib/compiler_rt/trunctfxf2.zig
@@ -0,0 +1,66 @@
+const math = @import("std").math;
+const common = @import("./common.zig");
+const trunc_f80 = @import("./truncf.zig").trunc_f80;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__trunctfxf2, .{ .name = "__trunctfxf2", .linkage = common.linkage });
+}
+
+fn __trunctfxf2(a: f128) callconv(.C) f80 {
+ const src_sig_bits = math.floatMantissaBits(f128);
+ const dst_sig_bits = math.floatMantissaBits(f80) - 1; // -1 for the integer bit
+
+ // Various constants whose values follow from the type parameters.
+ // Any reasonable optimizer will fold and propagate all of these.
+ const src_bits = @typeInfo(f128).Float.bits;
+ const src_exp_bits = src_bits - src_sig_bits - 1;
+ const src_inf_exp = 0x7FFF;
+
+ const src_inf = src_inf_exp << src_sig_bits;
+ const src_sign_mask = 1 << (src_sig_bits + src_exp_bits);
+ const src_abs_mask = src_sign_mask - 1;
+ const round_mask = (1 << (src_sig_bits - dst_sig_bits)) - 1;
+ const halfway = 1 << (src_sig_bits - dst_sig_bits - 1);
+
+ // Break a into a sign and representation of the absolute value
+ const a_rep = @bitCast(u128, a);
+ const a_abs = a_rep & src_abs_mask;
+ const sign: u16 = if (a_rep & src_sign_mask != 0) 0x8000 else 0;
+ const integer_bit = 1 << 63;
+
+ var res: math.F80 = undefined;
+
+ if (a_abs > src_inf) {
+ // a is NaN.
+ // Conjure the result by beginning with infinity, setting the qNaN
+ // bit and inserting the (truncated) trailing NaN field.
+ res.exp = 0x7fff;
+ res.fraction = 0x8000000000000000;
+ res.fraction |= @truncate(u64, a_abs >> (src_sig_bits - dst_sig_bits));
+ } else {
+ // The exponent of a is within the range of normal numbers in the
+ // destination format. We can convert by simply right-shifting with
+ // rounding, adding the explicit integer bit, and adjusting the exponent
+ res.fraction = @truncate(u64, a_abs >> (src_sig_bits - dst_sig_bits)) | integer_bit;
+ res.exp = @truncate(u16, a_abs >> src_sig_bits);
+
+ const round_bits = a_abs & round_mask;
+ if (round_bits > halfway) {
+ // Round to nearest
+ const carry = @boolToInt(@addWithOverflow(u64, res.fraction, 1, &res.fraction));
+ res.exp += carry;
+ res.fraction |= @as(u64, carry) << 63; // Restore integer bit after carry
+ } else if (round_bits == halfway) {
+ // Ties to even
+ const carry = @boolToInt(@addWithOverflow(u64, res.fraction, res.fraction & 1, &res.fraction));
+ res.exp += carry;
+ res.fraction |= @as(u64, carry) << 63; // Restore integer bit after carry
+ }
+ if (res.exp == 0) res.fraction &= ~@as(u64, integer_bit); // Remove integer bit for de-normals
+ }
+
+ res.exp |= sign;
+ return math.make_f80(res);
+}
lib/compiler_rt/truncxfdf2.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const trunc_f80 = @import("./truncf.zig").trunc_f80;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__truncxfdf2, .{ .name = "__truncxfdf2", .linkage = common.linkage });
+}
+
+fn __truncxfdf2(a: f80) callconv(.C) f64 {
+ return trunc_f80(f64, a);
+}
lib/compiler_rt/truncxfhf2.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const trunc_f80 = @import("./truncf.zig").trunc_f80;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__truncxfhf2, .{ .name = "__truncxfhf2", .linkage = common.linkage });
+}
+
+fn __truncxfhf2(a: f80) callconv(.C) common.F16T {
+ return @bitCast(common.F16T, trunc_f80(f16, a));
+}
lib/compiler_rt/truncxfsf2.zig
@@ -0,0 +1,12 @@
+const common = @import("./common.zig");
+const trunc_f80 = @import("./truncf.zig").trunc_f80;
+
+pub const panic = common.panic;
+
+comptime {
+ @export(__truncxfsf2, .{ .name = "__truncxfsf2", .linkage = common.linkage });
+}
+
+fn __truncxfsf2(a: f80) callconv(.C) f32 {
+ return trunc_f80(f32, a);
+}
lib/compiler_rt/unorddf2.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const comparef = @import("./comparef.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_dcmpun, .{ .name = "__aeabi_dcmpun", .linkage = common.linkage });
+ } else {
+ @export(__unorddf2, .{ .name = "__unorddf2", .linkage = common.linkage });
+ }
+}
+
+fn __unorddf2(a: f64, b: f64) callconv(.C) i32 {
+ return comparef.unordcmp(f64, a, b);
+}
+
+fn __aeabi_dcmpun(a: f64, b: f64) callconv(.AAPCS) i32 {
+ return comparef.unordcmp(f64, a, b);
+}
lib/compiler_rt/unordsf2.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const comparef = @import("./comparef.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_aeabi) {
+ @export(__aeabi_fcmpun, .{ .name = "__aeabi_fcmpun", .linkage = common.linkage });
+ } else {
+ @export(__unordsf2, .{ .name = "__unordsf2", .linkage = common.linkage });
+ }
+}
+
+fn __unordsf2(a: f32, b: f32) callconv(.C) i32 {
+ return comparef.unordcmp(f32, a, b);
+}
+
+fn __aeabi_fcmpun(a: f32, b: f32) callconv(.AAPCS) i32 {
+ return comparef.unordcmp(f32, a, b);
+}
lib/compiler_rt/unordtf2.zig
@@ -0,0 +1,20 @@
+const common = @import("./common.zig");
+const comparef = @import("./comparef.zig");
+
+pub const panic = common.panic;
+
+comptime {
+ if (common.want_ppc_abi) {
+ @export(__unordkf2, .{ .name = "__unordkf2", .linkage = common.linkage });
+ } else {
+ @export(__unordtf2, .{ .name = "__unordtf2", .linkage = common.linkage });
+ }
+}
+
+fn __unordtf2(a: f128, b: f128) callconv(.C) i32 {
+ return comparef.unordcmp(f128, a, b);
+}
+
+fn __unordkf2(a: f128, b: f128) callconv(.C) i32 {
+ return comparef.unordcmp(f128, a, b);
+}
lib/compiler_rt.zig
@@ -1,18 +1,59 @@
-const builtin = @import("builtin");
pub const panic = @import("compiler_rt/common.zig").panic;
comptime {
- // TODO moving these around makes or breaks compilation of zig1.o for some reason
- // Perhaps, until we switch to stage2, exports should be duplicated between this file
- // and files included as a standalone units?
_ = @import("compiler_rt/atomics.zig");
- _ = @import("compiler_rt/addXf3.zig");
- _ = @import("compiler_rt/mulXf3.zig");
- _ = @import("compiler_rt/compareXf2.zig");
- _ = @import("compiler_rt/extendXfYf2.zig");
- _ = @import("compiler_rt/extend_f80.zig");
- _ = @import("compiler_rt/truncXfYf2.zig");
- _ = @import("compiler_rt/trunc_f80.zig");
+
+ _ = @import("compiler_rt/addf3.zig");
+ _ = @import("compiler_rt/addsf3.zig");
+ _ = @import("compiler_rt/addtf3.zig");
+ _ = @import("compiler_rt/addxf3.zig");
+ _ = @import("compiler_rt/subdf3.zig");
+ _ = @import("compiler_rt/subsf3.zig");
+ _ = @import("compiler_rt/subtf3.zig");
+ _ = @import("compiler_rt/subxf3.zig");
+
+ _ = @import("compiler_rt/mulf3.zig");
+ _ = @import("compiler_rt/muldf3.zig");
+ _ = @import("compiler_rt/mulsf3.zig");
+ _ = @import("compiler_rt/multf3.zig");
+ _ = @import("compiler_rt/mulxf3.zig");
+
+ _ = @import("compiler_rt/comparef.zig");
+ _ = @import("compiler_rt/cmpsf2.zig");
+ _ = @import("compiler_rt/cmpdf2.zig");
+ _ = @import("compiler_rt/cmptf2.zig");
+ _ = @import("compiler_rt/cmpxf2.zig");
+ _ = @import("compiler_rt/gesf2.zig");
+ _ = @import("compiler_rt/gedf2.zig");
+ _ = @import("compiler_rt/getf2.zig");
+ _ = @import("compiler_rt/gexf2.zig");
+ _ = @import("compiler_rt/unordsf2.zig");
+ _ = @import("compiler_rt/unorddf2.zig");
+ _ = @import("compiler_rt/unordtf2.zig");
+
+ _ = @import("compiler_rt/extendf.zig");
+ _ = @import("compiler_rt/extenddftf2.zig");
+ _ = @import("compiler_rt/extenddfxf2.zig");
+ _ = @import("compiler_rt/extendhfsf2.zig");
+ _ = @import("compiler_rt/extendhftf2.zig");
+ _ = @import("compiler_rt/extendhfxf2.zig");
+ _ = @import("compiler_rt/extendsfdf2.zig");
+ _ = @import("compiler_rt/extendsftf2.zig");
+ _ = @import("compiler_rt/extendsfxf2.zig");
+ _ = @import("compiler_rt/extendxftf2.zig");
+
+ _ = @import("compiler_rt/truncf.zig");
+ _ = @import("compiler_rt/truncsfhf2.zig");
+ _ = @import("compiler_rt/truncdfhf2.zig");
+ _ = @import("compiler_rt/truncdfsf2.zig");
+ _ = @import("compiler_rt/trunctfhf2.zig");
+ _ = @import("compiler_rt/trunctfsf2.zig");
+ _ = @import("compiler_rt/trunctfdf2.zig");
+ _ = @import("compiler_rt/trunctfxf2.zig");
+ _ = @import("compiler_rt/truncxfhf2.zig");
+ _ = @import("compiler_rt/truncxfsf2.zig");
+ _ = @import("compiler_rt/truncxfdf2.zig");
+
_ = @import("compiler_rt/divtf3.zig");
_ = @import("compiler_rt/divsf3.zig");
_ = @import("compiler_rt/divdf3.zig");
@@ -43,35 +84,101 @@ comptime {
_ = @import("compiler_rt/udivti3.zig");
_ = @import("compiler_rt/udivmodti4.zig");
_ = @import("compiler_rt/umodti3.zig");
- _ = @import("compiler_rt/floatXiYf.zig");
- _ = @import("compiler_rt/fixXfYi.zig");
+
+ _ = @import("compiler_rt/int_to_float.zig");
+ _ = @import("compiler_rt/floatsihf.zig");
+ _ = @import("compiler_rt/floatsisf.zig");
+ _ = @import("compiler_rt/floatsidf.zig");
+ _ = @import("compiler_rt/floatsitf.zig");
+ _ = @import("compiler_rt/floatsixf.zig");
+ _ = @import("compiler_rt/floatdihf.zig");
+ _ = @import("compiler_rt/floatdisf.zig");
+ _ = @import("compiler_rt/floatdidf.zig");
+ _ = @import("compiler_rt/floatditf.zig");
+ _ = @import("compiler_rt/floatdixf.zig");
+ _ = @import("compiler_rt/floattihf.zig");
+ _ = @import("compiler_rt/floattisf.zig");
+ _ = @import("compiler_rt/floattidf.zig");
+ _ = @import("compiler_rt/floattitf.zig");
+ _ = @import("compiler_rt/floattixf.zig");
+ _ = @import("compiler_rt/floatundihf.zig");
+ _ = @import("compiler_rt/floatundisf.zig");
+ _ = @import("compiler_rt/floatundidf.zig");
+ _ = @import("compiler_rt/floatunditf.zig");
+ _ = @import("compiler_rt/floatundixf.zig");
+ _ = @import("compiler_rt/floatunsihf.zig");
+ _ = @import("compiler_rt/floatunsisf.zig");
+ _ = @import("compiler_rt/floatunsidf.zig");
+ _ = @import("compiler_rt/floatunsitf.zig");
+ _ = @import("compiler_rt/floatunsixf.zig");
+ _ = @import("compiler_rt/floatuntihf.zig");
+ _ = @import("compiler_rt/floatuntisf.zig");
+ _ = @import("compiler_rt/floatuntidf.zig");
+ _ = @import("compiler_rt/floatuntitf.zig");
+ _ = @import("compiler_rt/floatuntixf.zig");
+
+ _ = @import("compiler_rt/float_to_int.zig");
+ _ = @import("compiler_rt/fixhfsi.zig");
+ _ = @import("compiler_rt/fixhfdi.zig");
+ _ = @import("compiler_rt/fixhfti.zig");
+ _ = @import("compiler_rt/fixsfsi.zig");
+ _ = @import("compiler_rt/fixsfdi.zig");
+ _ = @import("compiler_rt/fixsfti.zig");
+ _ = @import("compiler_rt/fixdfsi.zig");
+ _ = @import("compiler_rt/fixdfdi.zig");
+ _ = @import("compiler_rt/fixdfti.zig");
+ _ = @import("compiler_rt/fixtfsi.zig");
+ _ = @import("compiler_rt/fixtfdi.zig");
+ _ = @import("compiler_rt/fixtfti.zig");
+ _ = @import("compiler_rt/fixxfsi.zig");
+ _ = @import("compiler_rt/fixxfdi.zig");
+ _ = @import("compiler_rt/fixxfti.zig");
+ _ = @import("compiler_rt/fixunshfsi.zig");
+ _ = @import("compiler_rt/fixunshfdi.zig");
+ _ = @import("compiler_rt/fixunshfti.zig");
+ _ = @import("compiler_rt/fixunssfsi.zig");
+ _ = @import("compiler_rt/fixunssfdi.zig");
+ _ = @import("compiler_rt/fixunssfti.zig");
+ _ = @import("compiler_rt/fixunsdfsi.zig");
+ _ = @import("compiler_rt/fixunsdfdi.zig");
+ _ = @import("compiler_rt/fixunsdfti.zig");
+ _ = @import("compiler_rt/fixunstfsi.zig");
+ _ = @import("compiler_rt/fixunstfdi.zig");
+ _ = @import("compiler_rt/fixunstfti.zig");
+ _ = @import("compiler_rt/fixunsxfsi.zig");
+ _ = @import("compiler_rt/fixunsxfdi.zig");
+ _ = @import("compiler_rt/fixunsxfti.zig");
+
_ = @import("compiler_rt/count0bits.zig");
_ = @import("compiler_rt/parity.zig");
_ = @import("compiler_rt/popcount.zig");
_ = @import("compiler_rt/bswap.zig");
_ = @import("compiler_rt/int.zig");
_ = @import("compiler_rt/shift.zig");
+
_ = @import("compiler_rt/negXi2.zig");
+
_ = @import("compiler_rt/muldi3.zig");
+
_ = @import("compiler_rt/absv.zig");
+ _ = @import("compiler_rt/absvsi2.zig");
+ _ = @import("compiler_rt/absvdi2.zig");
+ _ = @import("compiler_rt/absvti2.zig");
+
_ = @import("compiler_rt/negv.zig");
_ = @import("compiler_rt/addo.zig");
_ = @import("compiler_rt/subo.zig");
_ = @import("compiler_rt/mulo.zig");
_ = @import("compiler_rt/cmp.zig");
+
_ = @import("compiler_rt/negXf2.zig");
+
_ = @import("compiler_rt/os_version_check.zig");
_ = @import("compiler_rt/emutls.zig");
_ = @import("compiler_rt/arm.zig");
_ = @import("compiler_rt/aulldiv.zig");
_ = @import("compiler_rt/aullrem.zig");
- _ = @import("compiler_rt/sparc.zig");
_ = @import("compiler_rt/clear_cache.zig");
- // missing: Floating point raised to integer power
-
- // missing: Complex arithmetic
- // (a + ib) * (c + id)
- // (a + ib) / (c + id)
-
+ _ = @import("compiler_rt/sparc.zig");
}
src/compiler_rt.zig
@@ -200,8 +200,8 @@ const sources = &[_][]const u8{
"compiler_rt/umodti3.zig",
"compiler_rt/truncXfYf2.zig",
"compiler_rt/trunc_f80.zig",
- "compiler_rt/addXf3.zig",
- "compiler_rt/mulXf3.zig",
+ "compiler_rt/addf3.zig",
+ "compiler_rt/mulf3.zig",
"compiler_rt/divsf3.zig",
"compiler_rt/divdf3.zig",
"compiler_rt/divxf3.zig",
CMakeLists.txt
@@ -480,7 +480,14 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/lib/std/sort.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/absv.zig"
- "${CMAKE_SOURCE_DIR}/lib/compiler_rt/addXf3.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/addf3.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/addsf3.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/addtf3.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/addxf3.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/subdf3.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/subsf3.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/subtf3.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/subxf3.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/addo.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/arm.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/atomics.zig"
@@ -491,7 +498,18 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/clear_cache.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/cmp.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/common.zig"
- "${CMAKE_SOURCE_DIR}/lib/compiler_rt/compareXf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/comparef.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/cmpsf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/cmpdf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/cmptf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/cmpxf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/gesf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/gedf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/getf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/gexf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/unordsf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/unorddf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/unordtf2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/cos.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/count0bits.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/divdf3.zig"
@@ -502,11 +520,79 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/emutls.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/exp.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/exp2.zig"
- "${CMAKE_SOURCE_DIR}/lib/compiler_rt/extendXfYf2.zig"
- "${CMAKE_SOURCE_DIR}/lib/compiler_rt/extend_f80.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/extendf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/extenddftf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/extenddfxf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/extendhfsf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/extendhftf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/extendhfxf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/extendsfdf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/extendsftf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/extendsfxf2.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/extendxftf2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/fabs.zig"
- "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixXfYi.zig"
- "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatXiYf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/int_to_float.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatsihf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatsisf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatsidf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatsitf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatsixf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatdihf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatdisf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatdidf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatditf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatdixf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floattihf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floattisf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floattidf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floattitf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floattixf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatundihf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatundisf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatundidf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatunditf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatundixf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatunsihf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatunsisf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatunsidf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatunsitf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatunsixf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatuntihf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatuntisf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatuntidf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatuntitf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/floatuntixf.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/float_to_int.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixhfsi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixhfdi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixhfti.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixsfsi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixsfdi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixsfti.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixdfsi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixdfdi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixdfti.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixtfsi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixtfdi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixtfti.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixxfsi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixxfdi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixxfti.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunshfsi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunshfdi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunshfti.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunssfsi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunssfdi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunssfti.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunsdfsi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunsdfdi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunsdfti.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunstfsi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunstfdi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunstfti.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunsxfsi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunsxfdi.zig"
+ "${CMAKE_SOURCE_DIR}/lib/compiler_rt/fixunsxfti.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/floor.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/fma.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/fmax.zig"
@@ -517,7 +603,6 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/log10.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/log2.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/modti3.zig"
- "${CMAKE_SOURCE_DIR}/lib/compiler_rt/mulXf3.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/muldi3.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/mulo.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/multi3.zig"
@@ -541,8 +626,6 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/tan.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/trig.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/trunc.zig"
- "${CMAKE_SOURCE_DIR}/lib/compiler_rt/truncXfYf2.zig"
- "${CMAKE_SOURCE_DIR}/lib/compiler_rt/trunc_f80.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/udivmod.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/udivmodti4.zig"
"${CMAKE_SOURCE_DIR}/lib/compiler_rt/udivti3.zig"