Commit 1bab854868
Changed files (14)
lib
include
src
codegen
lib/include/zig.h
@@ -1,103 +1,108 @@
#undef linux
+#define __STDC_WANT_IEC_60559_TYPES_EXT__
+#include <float.h>
+#include <limits.h>
+#include <stdint.h>
+
+#if defined(__has_builtin)
+#define zig_has_builtin(builtin) __has_builtin(__builtin_##builtin)
+#else
+#define zig_has_builtin(builtin) 0
+#endif
+
+#if defined(__has_attribute)
+#define zig_has_attribute(attribute) __has_attribute(attribute)
+#else
+#define zig_has_attribute(attribute) 0
+#endif
+
#if __STDC_VERSION__ >= 201112L
-#define zig_noreturn _Noreturn
#define zig_threadlocal thread_local
#elif __GNUC__
-#define zig_noreturn __attribute__ ((noreturn))
#define zig_threadlocal __thread
#elif _MSC_VER
-#define zig_noreturn __declspec(noreturn)
#define zig_threadlocal __declspec(thread)
#else
-#define zig_noreturn
#define zig_threadlocal zig_threadlocal_unavailable
#endif
-#if defined(_MSC_VER)
-#define ZIG_NAKED __declspec(naked)
+#if zig_has_attribute(naked)
+#define zig_naked __attribute__((naked))
+#elif defined(_MSC_VER)
+#define zig_naked __declspec(naked)
#else
-#define ZIG_NAKED __attribute__((naked))
+#define zig_naked zig_naked_unavailable
#endif
-#if __GNUC__
-#define ZIG_COLD __attribute__ ((cold))
+#if zig_has_attribute(cold)
+#define zig_cold __attribute__((cold))
#else
-#define ZIG_COLD
+#define zig_cold
#endif
#if __STDC_VERSION__ >= 199901L
-#define ZIG_RESTRICT restrict
+#define zig_restrict restrict
#elif defined(__GNUC__)
-#define ZIG_RESTRICT __restrict
+#define zig_restrict __restrict
#else
-#define ZIG_RESTRICT
+#define zig_restrict
#endif
#if __STDC_VERSION__ >= 201112L
-#include <stdalign.h>
-#define ZIG_ALIGN(alignment) alignas(alignment)
-#elif defined(__GNUC__)
-#define ZIG_ALIGN(alignment) __attribute__((aligned(alignment)))
-#else
-#define ZIG_ALIGN(alignment) zig_compile_error("the C compiler being used does not support aligning variables")
-#endif
-
-#if __STDC_VERSION__ >= 199901L
-#include <stdbool.h>
+#define zig_align(alignment) _Alignas(alignment)
+#elif zig_has_attribute(aligned)
+#define zig_align(alignment) __attribute__((aligned(alignment)))
+#elif _MSC_VER
#else
-#define bool unsigned char
-#define true 1
-#define false 0
+#error the C compiler being used does not support aligning variables
#endif
-#if defined(__GNUC__)
+#if zig_has_builtin(unreachable)
#define zig_unreachable() __builtin_unreachable()
#else
#define zig_unreachable()
#endif
-#ifdef __cplusplus
-#define ZIG_EXTERN_C extern "C"
+#if defined(__cplusplus)
+#define zig_extern_c extern "C"
#else
-#define ZIG_EXTERN_C
+#define zig_extern_c
#endif
-#if defined(_MSC_VER)
-#define zig_breakpoint() __debugbreak()
-#elif defined(__MINGW32__) || defined(__MINGW64__)
-#define zig_breakpoint() __debugbreak()
-#elif defined(__clang__)
+#if zig_has_builtin(debugtrap)
#define zig_breakpoint() __builtin_debugtrap()
-#elif defined(__GNUC__)
+#elif zig_has_builtin(trap)
#define zig_breakpoint() __builtin_trap()
+#elif defined(_MSC_VER) || defined(__MINGW32__) || defined(__MINGW64__)
+#define zig_breakpoint() __debugbreak()
#elif defined(__i386__) || defined(__x86_64__)
#define zig_breakpoint() __asm__ volatile("int $0x03");
#else
#define zig_breakpoint() raise(SIGTRAP)
#endif
-#if defined(_MSC_VER)
-#define zig_return_address() _ReturnAddress()
-#elif defined(__GNUC__)
+#if zig_has_builtin(return_address)
#define zig_return_address() __builtin_extract_return_addr(__builtin_return_address(0))
+#elif defined(_MSC_VER)
+#define zig_return_address() _ReturnAddress()
#else
#define zig_return_address() 0
#endif
-#if defined(__GNUC__)
+#if zig_has_builtin(frame_address)
#define zig_frame_address() __builtin_frame_address(0)
#else
#define zig_frame_address() 0
#endif
-#if defined(__GNUC__)
+#if zig_has_builtin(prefetch)
#define zig_prefetch(addr, rw, locality) __builtin_prefetch(addr, rw, locality)
#else
#define zig_prefetch(addr, rw, locality)
#endif
-#if defined(__clang__)
+#if zig_has_builtin(memory_size) && zig_has_builtin(memory_grow)
#define zig_wasm_memory_size(index) __builtin_wasm_memory_size(index)
#define zig_wasm_memory_grow(index, delta) __builtin_wasm_memory_grow(index, delta)
#else
@@ -130,8 +135,8 @@
#define memory_order_acq_rel __ATOMIC_ACQ_REL
#define memory_order_seq_cst __ATOMIC_SEQ_CST
#define zig_atomic(type) type
-#define zig_cmpxchg_strong(obj, expected, desired, succ, fail) __atomic_compare_exchange_n(obj, &(expected), desired, false, succ, fail)
-#define zig_cmpxchg_weak(obj, expected, desired, succ, fail) __atomic_compare_exchange_n(obj, &(expected), desired, true , succ, fail)
+#define zig_cmpxchg_strong(obj, expected, desired, succ, fail) __atomic_compare_exchange_n(obj, &(expected), desired, zig_false, succ, fail)
+#define zig_cmpxchg_weak(obj, expected, desired, succ, fail) __atomic_compare_exchange_n(obj, &(expected), desired, zig_true , succ, fail)
#define zig_atomicrmw_xchg(obj, arg, order) __atomic_exchange_n(obj, arg, order)
#define zig_atomicrmw_add(obj, arg, order) __atomic_fetch_add (obj, arg, order)
#define zig_atomicrmw_sub(obj, arg, order) __atomic_fetch_sub (obj, arg, order)
@@ -168,1366 +173,1258 @@
#define zig_fence(order) zig_unimplemented()
#endif
-#include <stdint.h>
-#include <stddef.h>
-#include <limits.h>
+#if __STDC_VERSION__ >= 201112L
+#define zig_noreturn _Noreturn void
+#define zig_threadlocal thread_local
+#elif __GNUC__
+#define zig_noreturn __attribute__ ((noreturn)) void
+#define zig_threadlocal __thread
+#elif _MSC_VER
+#define zig_noreturn __declspec(noreturn) void
+#define zig_threadlocal __declspec(thread)
+#else
+#define zig_noreturn void
+#define zig_threadlocal zig_threadlocal_unavailable
+#endif
-#define int128_t __int128
-#define uint128_t unsigned __int128
-#define UINT128_MAX (((uint128_t)UINT64_MAX<<64|UINT64_MAX))
-ZIG_EXTERN_C void *memcpy (void *ZIG_RESTRICT, const void *ZIG_RESTRICT, size_t);
-ZIG_EXTERN_C void *memset (void *, int, size_t);
-ZIG_EXTERN_C int64_t __addodi4(int64_t lhs, int64_t rhs, int *overflow);
-ZIG_EXTERN_C int128_t __addoti4(int128_t lhs, int128_t rhs, int *overflow);
-ZIG_EXTERN_C uint64_t __uaddodi4(uint64_t lhs, uint64_t rhs, int *overflow);
-ZIG_EXTERN_C uint128_t __uaddoti4(uint128_t lhs, uint128_t rhs, int *overflow);
-ZIG_EXTERN_C int32_t __subosi4(int32_t lhs, int32_t rhs, int *overflow);
-ZIG_EXTERN_C int64_t __subodi4(int64_t lhs, int64_t rhs, int *overflow);
-ZIG_EXTERN_C int128_t __suboti4(int128_t lhs, int128_t rhs, int *overflow);
-ZIG_EXTERN_C uint32_t __usubosi4(uint32_t lhs, uint32_t rhs, int *overflow);
-ZIG_EXTERN_C uint64_t __usubodi4(uint64_t lhs, uint64_t rhs, int *overflow);
-ZIG_EXTERN_C uint128_t __usuboti4(uint128_t lhs, uint128_t rhs, int *overflow);
-ZIG_EXTERN_C int64_t __mulodi4(int64_t lhs, int64_t rhs, int *overflow);
-ZIG_EXTERN_C int128_t __muloti4(int128_t lhs, int128_t rhs, int *overflow);
-ZIG_EXTERN_C uint64_t __umulodi4(uint64_t lhs, uint64_t rhs, int *overflow);
-ZIG_EXTERN_C uint128_t __umuloti4(uint128_t lhs, uint128_t rhs, int *overflow);
-
-
-static inline uint8_t zig_addw_u8(uint8_t lhs, uint8_t rhs, uint8_t max) {
- uint8_t thresh = max - rhs;
- if (lhs > thresh) {
- return lhs - thresh - 1;
- } else {
- return lhs + rhs;
- }
+#define zig_bitSizeOf(T) (CHAR_BIT * sizeof(T))
+
+typedef void zig_void;
+
+#if defined(__cplusplus)
+typedef bool zig_bool;
+#define zig_false false
+#define zig_true true
+#else
+#if __STDC_VERSION__ >= 199901L
+typedef _Bool zig_bool;
+#else
+typedef char zig_bool;
+#endif
+#define zig_false ((zig_bool)0)
+#define zig_true ((zig_bool)1)
+#endif
+
+typedef uintptr_t zig_usize;
+typedef intptr_t zig_isize;
+typedef signed short int zig_c_short;
+typedef unsigned short int zig_c_ushort;
+typedef signed int zig_c_int;
+typedef unsigned int zig_c_uint;
+typedef signed long int zig_c_long;
+typedef unsigned long int zig_c_ulong;
+typedef signed long long int zig_c_longlong;
+typedef unsigned long long int zig_c_ulonglong;
+typedef long double zig_c_longdouble;
+
+typedef uint8_t zig_u8;
+typedef int8_t zig_i8;
+typedef uint16_t zig_u16;
+typedef int16_t zig_i16;
+typedef uint16_t zig_u16;
+typedef int16_t zig_i16;
+typedef uint32_t zig_u32;
+typedef int32_t zig_i32;
+typedef uint64_t zig_u64;
+typedef int64_t zig_i64;
+
+#define zig_as_u8(val) UINT8_C(val)
+#define zig_as_i8(val) INT8_C(val)
+#define zig_as_u16(val) UINT16_C(val)
+#define zig_as_i16(val) INT16_C(val)
+#define zig_as_u32(val) UINT32_C(val)
+#define zig_as_i32(val) INT32_C(val)
+#define zig_as_u64(val) UINT64_C(val)
+#define zig_as_i64(val) INT64_C(val)
+
+#define zig_minInt_u8 zig_as_u8(0)
+#define zig_maxInt_u8 UINT8_MAX
+#define zig_minInt_i8 INT8_MIN
+#define zig_maxInt_i8 INT8_MAX
+#define zig_minInt_u16 zig_as_u16(0)
+#define zig_maxInt_u16 UINT16_MAX
+#define zig_minInt_i16 INT16_MIN
+#define zig_maxInt_i16 INT16_MAX
+#define zig_minInt_u32 zig_as_u32(0)
+#define zig_maxInt_u32 UINT32_MAX
+#define zig_minInt_i32 INT32_MIN
+#define zig_maxInt_i32 INT32_MAX
+#define zig_minInt_u64 zig_as_u64(0)
+#define zig_maxInt_u64 UINT64_MAX
+#define zig_minInt_i64 INT64_MIN
+#define zig_maxInt_i64 INT64_MAX
+
+#if FLT_MANT_DIG == 11
+typedef float zig_f16;
+#elif DBL_MANT_DIG == 11
+typedef double zig_f16;
+#elif LDBL_MANT_DIG == 11
+typedef long double zig_f16;
+#elif FLT16_MANT_DIG == 11
+typedef _Float16 zig_f16;
+#endif
+
+#if FLT_MANT_DIG == 24
+typedef float zig_f32;
+#elif DBL_MANT_DIG == 24
+typedef double zig_f32;
+#elif LDBL_MANT_DIG == 24
+typedef long double zig_f32;
+#elif FLT32_MANT_DIG == 24
+typedef _Float32 zig_f32;
+#endif
+
+#if FLT_MANT_DIG == 53
+typedef float zig_f64;
+#elif DBL_MANT_DIG == 53
+typedef double zig_f64;
+#elif LDBL_MANT_DIG == 53
+typedef long double zig_f64;
+#elif FLT64_MANT_DIG == 53
+typedef _Float64 zig_f64;
+#endif
+
+#if FLT_MANT_DIG == 64
+typedef float zig_f80;
+#elif DBL_MANT_DIG == 64
+typedef double zig_f80;
+#elif LDBL_MANT_DIG == 64
+typedef long double zig_f80;
+#elif FLT80_MANT_DIG == 64
+typedef _Float80 zig_f80;
+#elif defined(__SIZEOF_FLOAT80__)
+typedef __float80 zig_f80;
+#endif
+
+#if FLT_MANT_DIG == 113
+typedef float zig_f128;
+#elif DBL_MANT_DIG == 113
+typedef double zig_f128;
+#elif LDBL_MANT_DIG == 113
+typedef long double zig_f128;
+#elif FLT128_MANT_DIG == 113
+typedef _Float128 zig_f128;
+#elif defined(__SIZEOF_FLOAT128__)
+typedef __float128 zig_f128;
+#endif
+
+zig_extern_c void *memcpy (void *zig_restrict, void const *zig_restrict, zig_usize);
+zig_extern_c void *memset (void *, int, zig_usize);
+
+/* ==================== 8/16/32/64-bit Integer Routines ===================== */
+
+#define zig_maxInt(Type, bits) zig_shr_##Type(zig_maxInt_##Type, (zig_bitSizeOf(zig_##Type) - bits))
+#define zig_minInt(Type, bits) zig_not_##Type(zig_maxInt(Type, bits), bits)
+
+#define zig_int_helpers(w) \
+ static inline zig_u##w zig_shl_u##w(zig_u##w lhs, zig_u8 rhs) { \
+ return lhs << rhs; \
+ } \
+\
+ static inline zig_i##w zig_shl_i##w(zig_i##w lhs, zig_u8 rhs) { \
+ return lhs << rhs; \
+ } \
+\
+ static inline zig_u##w zig_shr_u##w(zig_u##w lhs, zig_u8 rhs) { \
+ return lhs >> rhs; \
+ } \
+\
+ static inline zig_i##w zig_shr_i##w(zig_i##w lhs, zig_u8 rhs) { \
+ zig_i##w sign_mask = lhs < zig_as_i##w(0) ? zig_as_i##w(-1) : zig_as_i##w(0); \
+ return ((lhs ^ sign_mask) >> rhs) ^ sign_mask; \
+ } \
+\
+ static inline zig_u##w zig_not_u##w(zig_u##w val, zig_u8 bits) { \
+ return val ^ zig_maxInt(u##w, bits); \
+ } \
+\
+ static inline zig_i##w zig_not_i##w(zig_i##w val, zig_u8 bits) { \
+ (void)bits; \
+ return ~val; \
+ } \
+\
+ static inline zig_u##w zig_wrap_u##w(zig_u##w val, zig_u8 bits) { \
+ return val & zig_maxInt(u##w, bits); \
+ } \
+\
+ static inline zig_i##w zig_wrap_i##w(zig_i##w val, zig_u8 bits) { \
+ return (val & zig_as_u##w(1) << (bits - zig_as_u8(1))) != 0 \
+ ? val | zig_minInt(i##w, bits) : val & zig_maxInt(i##w, bits); \
+ } \
+\
+ static inline zig_u##w zig_div_floor_u##w(zig_u##w lhs, zig_u##w rhs) { \
+ return lhs / rhs; \
+ } \
+\
+ static inline zig_i##w zig_div_floor_i##w(zig_i##w lhs, zig_i##w rhs) { \
+ return lhs / rhs - (((lhs ^ rhs) & (lhs % rhs)) < zig_as_i##w(0)); \
+ } \
+\
+ static inline zig_u##w zig_mod_u##w(zig_u##w lhs, zig_u##w rhs) { \
+ return lhs % rhs; \
+ } \
+\
+ static inline zig_i##w zig_mod_i##w(zig_i##w lhs, zig_i##w rhs) { \
+ zig_i##w rem = lhs % rhs; \
+ return rem + (((lhs ^ rhs) & rem) < zig_as_i##w(0) ? rhs : zig_as_i##w(0)); \
+ }
+zig_int_helpers(8)
+zig_int_helpers(16)
+zig_int_helpers(32)
+zig_int_helpers(64)
+
+static inline zig_bool zig_addo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_u8 bits) {
+#if zig_has_builtin(add_overflow)
+ zig_u32 full_res;
+ zig_bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
+ *res = zig_wrap_u32(full_res, bits);
+ return overflow || full_res < zig_minInt(u32, bits) || full_res > zig_maxInt(u32, bits);
+#else
+ *res = zig_addw_u32(lhs, rhs, bits);
+ return *res < lhs;
+#endif
}
-static inline int8_t zig_addw_i8(int8_t lhs, int8_t rhs, int8_t min, int8_t max) {
- if ((lhs > 0) && (rhs > 0)) {
- int8_t thresh = max - rhs;
- if (lhs > thresh) {
- return min + lhs - thresh - 1;
- }
- } else if ((lhs < 0) && (rhs < 0)) {
- int8_t thresh = min - rhs;
- if (lhs < thresh) {
- return max + lhs - thresh + 1;
- }
- }
- return lhs + rhs;
+zig_extern_c zig_i32 __addosi4(zig_i32 lhs, zig_i32 rhs, zig_c_int *overflow);
+static inline zig_bool zig_addo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) {
+#if zig_has_builtin(add_overflow)
+ zig_i32 full_res;
+ zig_bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
+#else
+ zig_c_int overflow_int;
+ zig_u32 full_res = __addosi4(lhs, rhs, &overflow_int);
+ zig_bool overflow = overflow_int != 0;
+#endif
+ *res = zig_wrap_i32(full_res, bits);
+ return overflow || full_res < zig_minInt(i32, bits) || full_res > zig_maxInt(i32, bits);
}
-static inline uint16_t zig_addw_u16(uint16_t lhs, uint16_t rhs, uint16_t max) {
- uint16_t thresh = max - rhs;
- if (lhs > thresh) {
- return lhs - thresh - 1;
- } else {
- return lhs + rhs;
- }
+static inline zig_bool zig_addo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) {
+#if zig_has_builtin(add_overflow)
+ zig_u64 full_res;
+ zig_bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
+ *res = zig_wrap_u64(full_res, bits);
+ return overflow || full_res < zig_minInt(u64, bits) || full_res > zig_maxInt(u64, bits);
+#else
+ *res = zig_addw_u64(lhs, rhs, bits);
+ return *res < lhs;
+#endif
}
-static inline int16_t zig_addw_i16(int16_t lhs, int16_t rhs, int16_t min, int16_t max) {
- if ((lhs > 0) && (rhs > 0)) {
- int16_t thresh = max - rhs;
- if (lhs > thresh) {
- return min + lhs - thresh - 1;
- }
- } else if ((lhs < 0) && (rhs < 0)) {
- int16_t thresh = min - rhs;
- if (lhs < thresh) {
- return max + lhs - thresh + 1;
- }
- }
- return lhs + rhs;
+zig_extern_c zig_i64 __addodi4(zig_i64 lhs, zig_i64 rhs, zig_c_int *overflow);
+static inline zig_bool zig_addo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) {
+#if zig_has_builtin(add_overflow)
+ zig_i64 full_res;
+ zig_bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
+#else
+ zig_c_int overflow_int;
+ zig_u64 full_res = __addodi4(lhs, rhs, &overflow_int);
+ zig_bool overflow = overflow_int != 0;
+#endif
+ *res = zig_wrap_i64(full_res, bits);
+ return overflow || full_res < zig_minInt(i64, bits) || full_res > zig_maxInt(i64, bits);
}
-static inline uint32_t zig_addw_u32(uint32_t lhs, uint32_t rhs, uint32_t max) {
- uint32_t thresh = max - rhs;
- if (lhs > thresh) {
- return lhs - thresh - 1;
- } else {
- return lhs + rhs;
- }
+static inline zig_bool zig_addo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) {
+#if zig_has_builtin(add_overflow)
+ zig_u8 full_res;
+ zig_bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
+ *res = zig_wrap_u8(full_res, bits);
+ return overflow || full_res < zig_minInt(u8, bits) || full_res > zig_maxInt(u8, bits);
+#else
+ return zig_addo_u32(res, lhs, rhs, bits);
+#endif
}
-static inline int32_t zig_addw_i32(int32_t lhs, int32_t rhs, int32_t min, int32_t max) {
- if ((lhs > 0) && (rhs > 0)) {
- int32_t thresh = max - rhs;
- if (lhs > thresh) {
- return min + lhs - thresh - 1;
- }
- } else if ((lhs < 0) && (rhs < 0)) {
- int32_t thresh = min - rhs;
- if (lhs < thresh) {
- return max + lhs - thresh + 1;
- }
- }
- return lhs + rhs;
+static inline zig_bool zig_addo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) {
+#if zig_has_builtin(add_overflow)
+ zig_i8 full_res;
+ zig_bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
+ *res = zig_wrap_i8(full_res, bits);
+ return overflow || full_res < zig_minInt(i8, bits) || full_res > zig_maxInt(i8, bits);
+#else
+ return zig_addo_i32(res, lhs, rhs, bits);
+#endif
}
-static inline uint64_t zig_addw_u64(uint64_t lhs, uint64_t rhs, uint64_t max) {
- uint64_t thresh = max - rhs;
- if (lhs > thresh) {
- return lhs - thresh - 1;
- } else {
- return lhs + rhs;
- }
+static inline zig_bool zig_addo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) {
+#if zig_has_builtin(add_overflow)
+ zig_u16 full_res;
+ zig_bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
+ *res = zig_wrap_u16(full_res, bits);
+ return overflow || full_res < zig_minInt(u16, bits) || full_res > zig_maxInt(u16, bits);
+#else
+ return zig_addo_u32(res, lhs, rhs, bits);
+#endif
}
-static inline int64_t zig_addw_i64(int64_t lhs, int64_t rhs, int64_t min, int64_t max) {
- if ((lhs > 0) && (rhs > 0)) {
- int64_t thresh = max - rhs;
- if (lhs > thresh) {
- return min + lhs - thresh - 1;
- }
- } else if ((lhs < 0) && (rhs < 0)) {
- int64_t thresh = min - rhs;
- if (lhs < thresh) {
- return max + lhs - thresh + 1;
- }
- }
- return lhs + rhs;
+static inline zig_bool zig_addo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) {
+#if zig_has_builtin(add_overflow)
+ zig_i16 full_res;
+ zig_bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
+ *res = zig_wrap_i16(full_res, bits);
+ return overflow || full_res < zig_minInt(i16, bits) || full_res > zig_maxInt(i16, bits);
+#else
+ return zig_addo_i32(res, lhs, rhs, bits);
+#endif
}
-static inline intptr_t zig_addw_isize(intptr_t lhs, intptr_t rhs, intptr_t min, intptr_t max) {
- return (intptr_t)(((uintptr_t)lhs) + ((uintptr_t)rhs));
+static inline zig_bool zig_subo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_u8 bits) {
+#if zig_has_builtin(sub_overflow)
+ zig_u32 full_res;
+ zig_bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
+ *res = zig_wrap_u32(full_res, bits);
+ return overflow || full_res < zig_minInt(u32, bits) || full_res > zig_maxInt(u32, bits);
+#else
+ *res = zig_subw_u32(lhs, rhs, bits);
+ return *res > lhs;
+#endif
}
-static inline short zig_addw_short(short lhs, short rhs, short min, short max) {
- return (short)(((unsigned short)lhs) + ((unsigned short)rhs));
+zig_extern_c zig_i32 __subosi4(zig_i32 lhs, zig_i32 rhs, zig_c_int *overflow);
+static inline zig_bool zig_subo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) {
+#if zig_has_builtin(sub_overflow)
+ zig_i32 full_res;
+ zig_bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
+#else
+ zig_c_int overflow_int;
+ zig_u32 full_res = __subosi4(lhs, rhs, &overflow_int);
+ zig_bool overflow = overflow_int != 0;
+#endif
+ *res = zig_wrap_i32(full_res, bits);
+ return overflow || full_res < zig_minInt(i32, bits) || full_res > zig_maxInt(i32, bits);
}
-static inline int zig_addw_int(int lhs, int rhs, int min, int max) {
- return (int)(((unsigned)lhs) + ((unsigned)rhs));
+static inline zig_bool zig_subo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) {
+#if zig_has_builtin(sub_overflow)
+ zig_u64 full_res;
+ zig_bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
+ *res = zig_wrap_u64(full_res, bits);
+ return overflow || full_res < zig_minInt(u64, bits) || full_res > zig_maxInt(u64, bits);
+#else
+ *res = zig_subw_u64(lhs, rhs, bits);
+ return *res > lhs;
+#endif
}
-static inline long zig_addw_long(long lhs, long rhs, long min, long max) {
- return (long)(((unsigned long)lhs) + ((unsigned long)rhs));
+zig_extern_c zig_i64 __subodi4(zig_i64 lhs, zig_i64 rhs, zig_c_int *overflow);
+static inline zig_bool zig_subo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) {
+#if zig_has_builtin(sub_overflow)
+ zig_i64 full_res;
+ zig_bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
+#else
+ zig_c_int overflow_int;
+ zig_u64 full_res = __subodi4(lhs, rhs, &overflow_int);
+ zig_bool overflow = overflow_int != 0;
+#endif
+ *res = zig_wrap_i64(full_res, bits);
+ return overflow || full_res < zig_minInt(i64, bits) || full_res > zig_maxInt(i64, bits);
}
-static inline long long zig_addw_longlong(long long lhs, long long rhs, long long min, long long max) {
- return (long long)(((unsigned long long)lhs) + ((unsigned long long)rhs));
+static inline zig_bool zig_subo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) {
+#if zig_has_builtin(sub_overflow)
+ zig_u8 full_res;
+ zig_bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
+ *res = zig_wrap_u8(full_res, bits);
+ return overflow || full_res < zig_minInt(u8, bits) || full_res > zig_maxInt(u8, bits);
+#else
+ return zig_subo_u32(res, lhs, rhs, bits);
+#endif
}
-static inline uint8_t zig_subw_u8(uint8_t lhs, uint8_t rhs, uint8_t max) {
- if (lhs < rhs) {
- return max - rhs - lhs + 1;
- } else {
- return lhs - rhs;
- }
+static inline zig_bool zig_subo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) {
+#if zig_has_builtin(sub_overflow)
+ zig_i8 full_res;
+ zig_bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
+ *res = zig_wrap_i8(full_res, bits);
+ return overflow || full_res < zig_minInt(i8, bits) || full_res > zig_maxInt(i8, bits);
+#else
+ return zig_subo_i32(res, lhs, rhs, bits);
+#endif
}
-static inline int8_t zig_subw_i8(int8_t lhs, int8_t rhs, int8_t min, int8_t max) {
- if ((lhs > 0) && (rhs < 0)) {
- int8_t thresh = lhs - max;
- if (rhs < thresh) {
- return min + (thresh - rhs - 1);
- }
- } else if ((lhs < 0) && (rhs > 0)) {
- int8_t thresh = lhs - min;
- if (rhs > thresh) {
- return max - (rhs - thresh - 1);
- }
- }
- return lhs - rhs;
+static inline zig_bool zig_subo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) {
+#if zig_has_builtin(sub_overflow)
+ zig_u16 full_res;
+ zig_bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
+ *res = zig_wrap_u16(full_res, bits);
+ return overflow || full_res < zig_minInt(u16, bits) || full_res > zig_maxInt(u16, bits);
+#else
+ return zig_subo_u32(res, lhs, rhs, bits);
+#endif
}
-static inline uint16_t zig_subw_u16(uint16_t lhs, uint16_t rhs, uint16_t max) {
- if (lhs < rhs) {
- return max - rhs - lhs + 1;
- } else {
- return lhs - rhs;
- }
+static inline zig_bool zig_subo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) {
+#if zig_has_builtin(sub_overflow)
+ zig_i16 full_res;
+ zig_bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
+ *res = zig_wrap_i16(full_res, bits);
+ return overflow || full_res < zig_minInt(i16, bits) || full_res > zig_maxInt(i16, bits);
+#else
+ return zig_subo_i32(res, lhs, rhs, bits);
+#endif
}
-static inline int16_t zig_subw_i16(int16_t lhs, int16_t rhs, int16_t min, int16_t max) {
- if ((lhs > 0) && (rhs < 0)) {
- int16_t thresh = lhs - max;
- if (rhs < thresh) {
- return min + (thresh - rhs - 1);
- }
- } else if ((lhs < 0) && (rhs > 0)) {
- int16_t thresh = lhs - min;
- if (rhs > thresh) {
- return max - (rhs - thresh - 1);
- }
- }
- return lhs - rhs;
+static inline zig_bool zig_mulo_u32(zig_u32 *res, zig_u32 lhs, zig_u32 rhs, zig_u8 bits) {
+#if zig_has_builtin(mul_overflow)
+ zig_u32 full_res;
+ zig_bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
+ *res = zig_wrap_u32(full_res, bits);
+ return overflow || full_res < zig_minInt(u32, bits) || full_res > zig_maxInt(u32, bits);
+#else
+ *res = zig_mulw_u32(lhs, rhs, bits);
+ return rhs != zig_as_u32(0) && lhs > zig_maxInt(u32, bits) / rhs;
+#endif
}
-static inline uint32_t zig_subw_u32(uint32_t lhs, uint32_t rhs, uint32_t max) {
- if (lhs < rhs) {
- return max - rhs - lhs + 1;
- } else {
- return lhs - rhs;
- }
+zig_extern_c zig_i32 __mulosi4(zig_i32 lhs, zig_i32 rhs, zig_c_int *overflow);
+static inline zig_bool zig_mulo_i32(zig_i32 *res, zig_i32 lhs, zig_i32 rhs, zig_u8 bits) {
+#if zig_has_builtin(mul_overflow)
+ zig_i32 full_res;
+ zig_bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
+#else
+ zig_c_int overflow_int;
+ zig_u32 full_res = __mulosi4(lhs, rhs, &overflow_int);
+ zig_bool overflow = overflow_int != 0;
+#endif
+ *res = zig_wrap_i32(full_res, bits);
+ return overflow || full_res < zig_minInt(i32, bits) || full_res > zig_maxInt(i32, bits);
}
-static inline int32_t zig_subw_i32(int32_t lhs, int32_t rhs, int32_t min, int32_t max) {
- if ((lhs > 0) && (rhs < 0)) {
- int32_t thresh = lhs - max;
- if (rhs < thresh) {
- return min + (thresh - rhs - 1);
- }
- } else if ((lhs < 0) && (rhs > 0)) {
- int32_t thresh = lhs - min;
- if (rhs > thresh) {
- return max - (rhs - thresh - 1);
- }
- }
- return lhs - rhs;
+static inline zig_bool zig_mulo_u64(zig_u64 *res, zig_u64 lhs, zig_u64 rhs, zig_u8 bits) {
+#if zig_has_builtin(mul_overflow)
+ zig_u64 full_res;
+ zig_bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
+ *res = zig_wrap_u64(full_res, bits);
+ return overflow || full_res < zig_minInt(u64, bits) || full_res > zig_maxInt(u64, bits);
+#else
+ *res = zig_mulw_u64(lhs, rhs, bits);
+ return rhs != zig_as_u64(0) && lhs > zig_maxInt(u64, bits) / rhs;
+#endif
}
-static inline uint64_t zig_subw_u64(uint64_t lhs, uint64_t rhs, uint64_t max) {
- if (lhs < rhs) {
- return max - rhs - lhs + 1;
- } else {
- return lhs - rhs;
- }
+zig_extern_c zig_i64 __mulodi4(zig_i64 lhs, zig_i64 rhs, zig_c_int *overflow);
+static inline zig_bool zig_mulo_i64(zig_i64 *res, zig_i64 lhs, zig_i64 rhs, zig_u8 bits) {
+#if zig_has_builtin(mul_overflow)
+ zig_i64 full_res;
+ zig_bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
+#else
+ zig_c_int overflow_int;
+ zig_u64 full_res = __mulodi4(lhs, rhs, &overflow_int);
+ zig_bool overflow = overflow_int != 0;
+#endif
+ *res = zig_wrap_i64(full_res, bits);
+ return overflow || full_res < zig_minInt(i64, bits) || full_res > zig_maxInt(i64, bits);
}
-static inline int64_t zig_subw_i64(int64_t lhs, int64_t rhs, int64_t min, int64_t max) {
- if ((lhs > 0) && (rhs < 0)) {
- int64_t thresh = lhs - max;
- if (rhs < thresh) {
- return min + (thresh - rhs - 1);
- }
- } else if ((lhs < 0) && (rhs > 0)) {
- int64_t thresh = lhs - min;
- if (rhs > thresh) {
- return max - (rhs - thresh - 1);
- }
- }
- return lhs - rhs;
+static inline zig_bool zig_mulo_u8(zig_u8 *res, zig_u8 lhs, zig_u8 rhs, zig_u8 bits) {
+#if zig_has_builtin(mul_overflow)
+ zig_u8 full_res;
+ zig_bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
+ *res = zig_wrap_u8(full_res, bits);
+ return overflow || full_res < zig_minInt(u8, bits) || full_res > zig_maxInt(u8, bits);
+#else
+ return zig_mulo_u32(res, lhs, rhs, bits);
+#endif
}
-static inline intptr_t zig_subw_isize(intptr_t lhs, intptr_t rhs, intptr_t min, intptr_t max) {
- return (intptr_t)(((uintptr_t)lhs) - ((uintptr_t)rhs));
+static inline zig_bool zig_mulo_i8(zig_i8 *res, zig_i8 lhs, zig_i8 rhs, zig_u8 bits) {
+#if zig_has_builtin(mul_overflow)
+ zig_i8 full_res;
+ zig_bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
+ *res = zig_wrap_i8(full_res, bits);
+ return overflow || full_res < zig_minInt(i8, bits) || full_res > zig_maxInt(i8, bits);
+#else
+ return zig_mulo_i32(res, lhs, rhs, bits);
+#endif
}
-static inline short zig_subw_short(short lhs, short rhs, short min, short max) {
- return (short)(((unsigned short)lhs) - ((unsigned short)rhs));
+static inline zig_bool zig_mulo_u16(zig_u16 *res, zig_u16 lhs, zig_u16 rhs, zig_u8 bits) {
+#if zig_has_builtin(mul_overflow)
+ zig_u16 full_res;
+ zig_bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
+ *res = zig_wrap_u16(full_res, bits);
+ return overflow || full_res < zig_minInt(u16, bits) || full_res > zig_maxInt(u16, bits);
+#else
+ return zig_mulo_u32(res, lhs, rhs, bits);
+#endif
}
-static inline int zig_subw_int(int lhs, int rhs, int min, int max) {
- return (int)(((unsigned)lhs) - ((unsigned)rhs));
+static inline zig_bool zig_mulo_i16(zig_i16 *res, zig_i16 lhs, zig_i16 rhs, zig_u8 bits) {
+#if zig_has_builtin(mul_overflow)
+ zig_i16 full_res;
+ zig_bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
+ *res = zig_wrap_i16(full_res, bits);
+ return overflow || full_res < zig_minInt(i16, bits) || full_res > zig_maxInt(i16, bits);
+#else
+ return zig_mulo_i32(res, lhs, rhs, bits);
+#endif
}
-static inline long zig_subw_long(long lhs, long rhs, long min, long max) {
- return (long)(((unsigned long)lhs) - ((unsigned long)rhs));
+#define zig_int_builtins(w) \
+ static inline zig_u##w zig_shlw_u##w(zig_u##w lhs, zig_u8 rhs, zig_u8 bits) { \
+ return zig_wrap_u##w(zig_shl_u##w(lhs, rhs), bits); \
+ } \
+\
+ static inline zig_i##w zig_shlw_i##w(zig_i##w lhs, zig_u8 rhs, zig_u8 bits) { \
+ return zig_wrap_i##w((zig_i##w)zig_shl_u##w((zig_u##w)lhs, (zig_u##w)rhs), bits); \
+ } \
+\
+ static inline zig_u##w zig_addw_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \
+ return zig_wrap_u##w(lhs + rhs, bits); \
+ } \
+\
+ static inline zig_i##w zig_addw_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \
+ return zig_wrap_i##w((zig_i##w)((zig_u##w)lhs + (zig_u##w)rhs), bits); \
+ } \
+\
+ static inline zig_u##w zig_subw_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \
+ return zig_wrap_u##w(lhs - rhs, bits); \
+ } \
+\
+ static inline zig_i##w zig_subw_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \
+ return zig_wrap_i##w((zig_i##w)((zig_u##w)lhs - (zig_u##w)rhs), bits); \
+ } \
+\
+ static inline zig_u##w zig_mulw_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \
+ return zig_wrap_u##w(lhs * rhs, bits); \
+ } \
+\
+ static inline zig_i##w zig_mulw_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \
+ return zig_wrap_i##w((zig_i##w)((zig_u##w)lhs * (zig_u##w)rhs), bits); \
+ } \
+\
+ static inline zig_bool zig_shlo_u##w(zig_u##w *res, zig_u##w lhs, zig_u8 rhs, zig_u8 bits) { \
+ *res = zig_shlw_u##w(lhs, rhs, bits); \
+ return (lhs & zig_maxInt_u##w << (bits - rhs)) != zig_as_u##w(0); \
+ } \
+\
+ static inline zig_bool zig_shlo_i##w(zig_i##w *res, zig_i##w lhs, zig_u8 rhs, zig_u8 bits) { \
+ *res = zig_shlw_i##w(lhs, rhs, bits); \
+ zig_i##w mask = (zig_i##w)(zig_maxInt_u##w << (bits - rhs - 1)); \
+ return (lhs & mask) != zig_as_i##w(0) && (lhs & mask) != mask; \
+ } \
+\
+ static inline zig_u##w zig_shls_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \
+ zig_u##w res; \
+ if (rhs >= bits) return lhs != zig_as_u##w(0) ? zig_maxInt(u##w, bits) : lhs; \
+ return zig_shlo_u##w(&res, lhs, (zig_u8)rhs, bits) ? zig_maxInt(u##w, bits) : res; \
+ } \
+\
+ static inline zig_i##w zig_shls_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \
+ zig_i##w res; \
+ if ((zig_u##w)rhs < (zig_u##w)bits && !zig_shlo_i##w(&res, lhs, rhs, bits)) return res; \
+ return lhs < zig_as_i##w(0) ? zig_minInt(i##w, bits) : zig_maxInt(i##w, bits); \
+ } \
+\
+ static inline zig_u##w zig_adds_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \
+ zig_u##w res; \
+ return zig_addo_u##w(&res, lhs, rhs, bits) ? zig_maxInt(u##w, bits) : res; \
+ } \
+\
+ static inline zig_i##w zig_adds_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \
+ zig_i##w res; \
+ if (!zig_addo_i##w(&res, lhs, rhs, bits)) return res; \
+ return res >= zig_as_i##w(0) ? zig_minInt(i##w, bits) : zig_maxInt(i##w, bits); \
+ } \
+\
+ static inline zig_u##w zig_subs_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \
+ zig_u##w res; \
+ return zig_subo_u##w(&res, lhs, rhs, bits) ? zig_minInt(u##w, bits) : res; \
+ } \
+\
+ static inline zig_i##w zig_subs_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \
+ zig_i##w res; \
+ if (!zig_subo_i##w(&res, lhs, rhs, bits)) return res; \
+ return res >= zig_as_i##w(0) ? zig_minInt(i##w, bits) : zig_maxInt(i##w, bits); \
+ } \
+\
+ static inline zig_u##w zig_muls_u##w(zig_u##w lhs, zig_u##w rhs, zig_u8 bits) { \
+ zig_u##w res; \
+ return zig_mulo_u##w(&res, lhs, rhs, bits) ? zig_maxInt(u##w, bits) : res; \
+ } \
+\
+ static inline zig_i##w zig_muls_i##w(zig_i##w lhs, zig_i##w rhs, zig_u8 bits) { \
+ zig_i##w res; \
+ if (!zig_mulo_i##w(&res, lhs, rhs, bits)) return res; \
+ return (lhs ^ rhs) < zig_as_i##w(0) ? zig_minInt(i##w, bits) : zig_maxInt(i##w, bits); \
+ }
+zig_int_builtins(8)
+zig_int_builtins(16)
+zig_int_builtins(32)
+zig_int_builtins(64)
+
+#define zig_builtin8(name, val) __builtin_##name(val)
+typedef zig_c_uint zig_Builtin8;
+
+#define zig_builtin16(name, val) __builtin_##name(val)
+typedef zig_c_uint zig_Builtin16;
+
+#if INT_MIN <= INT32_MIN
+#define zig_builtin32(name, val) __builtin_##name(val)
+typedef zig_c_uint zig_Builtin32;
+#elif LONG_MIN <= INT32_MIN
+#define zig_builtin32(name, val) __builtin_##name##l(val)
+typedef zig_c_ulong zig_Builtin32;
+#endif
+
+#if INT_MIN <= INT64_MIN
+#define zig_builtin64(name, val) __builtin_##name(val)
+typedef zig_c_uint zig_Builtin64;
+#elif LONG_MIN <= INT64_MIN
+#define zig_builtin64(name, val) __builtin_##name##l(val)
+typedef zig_c_ulong zig_Builtin64;
+#elif LLONG_MIN <= INT64_MIN
+#define zig_builtin64(name, val) __builtin_##name##ll(val)
+typedef zig_c_ulonglong zig_Builtin64;
+#endif
+
+#if zig_has_builtin(clz)
+#define zig_builtin_clz(w) \
+ static inline zig_u8 zig_clz_u##w(zig_u##w val, zig_u8 bits) { \
+ if (val == 0) return bits; \
+ return zig_builtin##w(clz, val) - (zig_bitSizeOf(zig_Builtin##w) - bits); \
+ } \
+\
+ static inline zig_u8 zig_clz_i##w(zig_i##w val, zig_u8 bits) { \
+ return zig_clz_u##w((zig_u##w)val, bits); \
+ }
+zig_builtin_clz(8)
+zig_builtin_clz(16)
+zig_builtin_clz(32)
+zig_builtin_clz(64)
+#endif
+
+#if zig_has_builtin(ctz)
+#define zig_builtin_ctz(w) \
+ static inline zig_u8 zig_ctz_u##w(zig_u##w val, zig_u8 bits) { \
+ if (val == 0) return bits; \
+ return zig_builtin##w(ctz, val); \
+ } \
+\
+ static inline zig_u8 zig_ctz_i##w(zig_i##w val, zig_u8 bits) { \
+ return zig_ctz_u##w((zig_u##w)val, bits); \
+ }
+zig_builtin_ctz(8)
+zig_builtin_ctz(16)
+zig_builtin_ctz(32)
+zig_builtin_ctz(64)
+#endif
+
+#if zig_has_builtin(popcount)
+#define zig_builtin_popcount(w) \
+ static inline zig_u8 zig_popcount_u##w(zig_u##w val, zig_u8 bits) { \
+ (void)bits; \
+ return zig_builtin##w(popcount, val); \
+ } \
+\
+ static inline zig_u8 zig_popcount_i##w(zig_i##w val, zig_u8 bits) { \
+ \
+ return zig_popcount_u##w((zig_u##w)val, bits); \
+ }
+zig_builtin_popcount(8)
+zig_builtin_popcount(16)
+zig_builtin_popcount(32)
+zig_builtin_popcount(64)
+#endif
+
+static inline zig_u8 zig_byte_swap_u8(zig_u8 val, zig_u8 bits) {
+ return zig_wrap_u8(val >> (8 - bits), bits);
}
-static inline long long zig_subw_longlong(long long lhs, long long rhs, long long min, long long max) {
- return (long long)(((unsigned long long)lhs) - ((unsigned long long)rhs));
+static inline zig_i8 zig_byte_swap_i8(zig_i8 val, zig_u8 bits) {
+ return zig_wrap_i8((zig_i8)zig_byte_swap_u8((zig_u8)val, bits), bits);
}
-static inline bool zig_addo_i8(int8_t lhs, int8_t rhs, int8_t *res, int8_t min, int8_t max) {
-#if defined(__GNUC__) && INT8_MAX == INT_MAX
- if (min == INT8_MIN && max == INT8_MAX) {
- return __builtin_sadd_overflow(lhs, rhs, (int*)res);
- }
-#elif defined(__GNUC__) && INT8_MAX == LONG_MAX
- if (min == INT8_MIN && max == INT8_MAX) {
- return __builtin_saddl_overflow(lhs, rhs, (long*)res);
- }
-#elif defined(__GNUC__) && INT8_MAX == LLONG_MAX
- if (min == INT8_MIN && max == INT8_MAX) {
- return __builtin_saddll_overflow(lhs, rhs, (long long*)res);
- }
+static inline zig_u16 zig_byte_swap_u16(zig_u16 val, zig_u8 bits) {
+ zig_u16 full_res;
+#if zig_has_builtin(bswap16)
+ full_res = __builtin_bswap16(val);
+#else
+ full_res = (zig_u16)zig_byte_swap_u8((zig_u8)(val >> 0)) << 8 |
+ (zig_u16)zig_byte_swap_u8((zig_u8)(val >> 8)) >> 0;
#endif
- int16_t big_result = (int16_t)lhs + (int16_t)rhs;
- if (big_result > max) {
- *res = big_result - ((int16_t)max - (int16_t)min);
- return true;
- }
- if (big_result < min) {
- *res = big_result + ((int16_t)max - (int16_t)min);
- return true;
- }
- *res = big_result;
- return false;
+ return zig_wrap_u16(full_res >> (16 - bits), bits);
}
-static inline bool zig_addo_i16(int16_t lhs, int16_t rhs, int16_t *res, int16_t min, int16_t max) {
-#if defined(__GNUC__) && INT16_MAX == INT_MAX
- if (min == INT16_MIN && max == INT16_MAX) {
- return __builtin_sadd_overflow(lhs, rhs, (int*)res);
- }
-#elif defined(__GNUC__) && INT16_MAX == LONG_MAX
- if (min == INT16_MIN && max == INT16_MAX) {
- return __builtin_saddl_overflow(lhs, rhs, (long*)res);
- }
-#elif defined(__GNUC__) && INT16_MAX == LLONG_MAX
- if (min == INT16_MIN && max == INT16_MAX) {
- return __builtin_saddll_overflow(lhs, rhs, (long long*)res);
- }
-#endif
- int32_t big_result = (int32_t)lhs + (int32_t)rhs;
- if (big_result > max) {
- *res = big_result - ((int32_t)max - (int32_t)min);
- return true;
- }
- if (big_result < min) {
- *res = big_result + ((int32_t)max - (int32_t)min);
- return true;
- }
- *res = big_result;
- return false;
+static inline zig_i16 zig_byte_swap_i16(zig_i16 val, zig_u8 bits) {
+ return zig_wrap_i16((zig_i16)zig_byte_swap_u16((zig_u16)val, bits), bits);
}
-static inline bool zig_addo_i32(int32_t lhs, int32_t rhs, int32_t *res, int32_t min, int32_t max) {
-#if defined(__GNUC__) && INT32_MAX == INT_MAX
- if (min == INT32_MIN && max == INT32_MAX) {
- return __builtin_sadd_overflow(lhs, rhs, (int*)res);
- }
-#elif defined(__GNUC__) && INT32_MAX == LONG_MAX
- if (min == INT32_MIN && max == INT32_MAX) {
- return __builtin_saddl_overflow(lhs, rhs, (long*)res);
- }
-#elif defined(__GNUC__) && INT32_MAX == LLONG_MAX
- if (min == INT32_MIN && max == INT32_MAX) {
- return __builtin_saddll_overflow(lhs, rhs, (long long*)res);
- }
-#endif
- int64_t big_result = (int64_t)lhs + (int64_t)rhs;
- if (big_result > max) {
- *res = big_result - ((int64_t)max - (int64_t)min);
- return true;
- }
- if (big_result < min) {
- *res = big_result + ((int64_t)max - (int64_t)min);
- return true;
- }
- *res = big_result;
- return false;
-}
-
-static inline bool zig_addo_i64(int64_t lhs, int64_t rhs, int64_t *res, int64_t min, int64_t max) {
- bool overflow;
-#if defined(__GNUC__) && INT64_MAX == INT_MAX
- overflow = __builtin_sadd_overflow(lhs, rhs, (int*)res);
-#elif defined(__GNUC__) && INT64_MAX == LONG_MAX
- overflow = __builtin_saddl_overflow(lhs, rhs, (long*)res);
-#elif defined(__GNUC__) && INT64_MAX == LLONG_MAX
- overflow = __builtin_saddll_overflow(lhs, rhs, (long long*)res);
+static inline zig_u32 zig_byte_swap_u32(zig_u32 val, zig_u8 bits) {
+ zig_u32 full_res;
+#if zig_has_builtin(bswap32)
+ full_res = __builtin_bswap32(val);
#else
- int int_overflow;
- *res = __addodi4(lhs, rhs, &int_overflow);
- overflow = int_overflow != 0;
-#endif
- if (!overflow) {
- if (*res > max) {
- // TODO adjust the result to be the truncated bits
- return true;
- } else if (*res < min) {
- // TODO adjust the result to be the truncated bits
- return true;
- }
- }
- return overflow;
+ full_res = (zig_u32)zig_byte_swap_u16((zig_u16)(val >> 0)) << 16 |
+ (zig_u32)zig_byte_swap_u16((zig_u16)(val >> 16)) >> 0;
+#endif
+ return zig_wrap_u32(full_res >> (32 - bits), bits);
}
-static inline bool zig_addo_i128(int128_t lhs, int128_t rhs, int128_t *res, int128_t min, int128_t max) {
- bool overflow;
-#if defined(__GNUC__) && INT128_MAX == INT_MAX
- overflow = __builtin_sadd_overflow(lhs, rhs, (int*)res);
-#elif defined(__GNUC__) && INT128_MAX == LONG_MAX
- overflow = __builtin_saddl_overflow(lhs, rhs, (long*)res);
-#elif defined(__GNUC__) && INT128_MAX == LLONG_MAX
- overflow = __builtin_saddll_overflow(lhs, rhs, (long long*)res);
-#else
- int int_overflow;
- *res = __addoti4(lhs, rhs, &int_overflow);
- overflow = int_overflow != 0;
-#endif
- if (!overflow) {
- if (*res > max) {
- // TODO adjust the result to be the truncated bits
- return true;
- } else if (*res < min) {
- // TODO adjust the result to be the truncated bits
- return true;
- }
- }
- return overflow;
+static inline zig_i32 zig_byte_swap_i32(zig_i32 val, zig_u8 bits) {
+ return zig_wrap_i32((zig_i32)zig_byte_swap_u32((zig_u32)val, bits), bits);
}
-static inline bool zig_addo_u8(uint8_t lhs, uint8_t rhs, uint8_t *res, uint8_t max) {
-#if defined(__GNUC__) && UINT8_MAX == UINT_MAX
- if (max == UINT8_MAX) {
- return __builtin_uadd_overflow(lhs, rhs, (unsigned int*)res);
- }
-#elif defined(__GNUC__) && UINT8_MAX == ULONG_MAX
- if (max == UINT8_MAX) {
- return __builtin_uaddl_overflow(lhs, rhs, (unsigned long*)res);
- }
-#elif defined(__GNUC__) && UINT8_MAX == ULLONG_MAX
- if (max == UINT8_MAX) {
- return __builtin_uaddll_overflow(lhs, rhs, (unsigned long long*)res);
- }
+static inline zig_u64 zig_byte_swap_u64(zig_u64 val, zig_u8 bits) {
+ zig_u64 full_res;
+#if zig_has_builtin(bswap64)
+ full_res = __builtin_bswap64(val);
+#else
+ full_res = (zig_u64)zig_byte_swap_u32((zig_u32)(val >> 0)) << 32 |
+ (zig_u64)zig_byte_swap_u32((zig_u32)(val >> 32)) >> 0;
#endif
- uint16_t big_result = (uint16_t)lhs + (uint16_t)rhs;
- if (big_result > max) {
- *res = big_result - max - 1;
- return true;
- }
- *res = big_result;
- return false;
+ return zig_wrap_u64(full_res >> (64 - bits), bits);
}
-static inline uint16_t zig_addo_u16(uint16_t lhs, uint16_t rhs, uint16_t *res, uint16_t max) {
-#if defined(__GNUC__) && UINT16_MAX == UINT_MAX
- if (max == UINT16_MAX) {
- return __builtin_uadd_overflow(lhs, rhs, (unsigned int*)res);
- }
-#elif defined(__GNUC__) && UINT16_MAX == ULONG_MAX
- if (max == UINT16_MAX) {
- return __builtin_uaddl_overflow(lhs, rhs, (unsigned long*)res);
- }
-#elif defined(__GNUC__) && UINT16_MAX == ULLONG_MAX
- if (max == UINT16_MAX) {
- return __builtin_uaddll_overflow(lhs, rhs, (unsigned long long*)res);
- }
-#endif
- uint32_t big_result = (uint32_t)lhs + (uint32_t)rhs;
- if (big_result > max) {
- *res = big_result - max - 1;
- return true;
- }
- *res = big_result;
- return false;
+static inline zig_i64 zig_byte_swap_i64(zig_i64 val, zig_u8 bits) {
+ return zig_wrap_i64((zig_i64)zig_byte_swap_u64((zig_u64)val, bits), bits);
}
-static inline uint32_t zig_addo_u32(uint32_t lhs, uint32_t rhs, uint32_t *res, uint32_t max) {
-#if defined(__GNUC__) && UINT32_MAX == UINT_MAX
- if (max == UINT32_MAX) {
- return __builtin_uadd_overflow(lhs, rhs, (unsigned int*)res);
- }
-#elif defined(__GNUC__) && UINT32_MAX == ULONG_MAX
- if (max == UINT32_MAX) {
- return __builtin_uaddl_overflow(lhs, rhs, (unsigned long*)res);
- }
-#elif defined(__GNUC__) && UINT32_MAX == ULLONG_MAX
- if (max == UINT32_MAX) {
- return __builtin_uaddll_overflow(lhs, rhs, (unsigned long long*)res);
- }
+static inline zig_u8 zig_bit_reverse_u8(zig_u8 val, zig_u8 bits) {
+ zig_u8 full_res;
+#if zig_has_builtin(bitreverse8)
+ full_res = __builtin_bitreverse8(val);
+#else
+ static zig_u8 const lut[0x10] = {
+ 0b0000, 0b1000, 0b0100, 0b1100,
+ 0b0010, 0b1010, 0b0110, 0b1110,
+ 0b0001, 0b1001, 0b0101, 0b1101,
+ 0b0011, 0b1011, 0b0111, 0b1111,
+ };
+ full_res = lut[val >> 0 & 0xF] << 4 | lut[val >> 4 & 0xF] << 0;
#endif
- uint64_t big_result = (uint64_t)lhs + (uint64_t)rhs;
- if (big_result > max) {
- *res = big_result - max - 1;
- return true;
- }
- *res = big_result;
- return false;
-}
-
-static inline uint64_t zig_addo_u64(uint64_t lhs, uint64_t rhs, uint64_t *res, uint64_t max) {
- bool overflow;
-#if defined(__GNUC__) && UINT64_MAX == UINT_MAX
- overflow = __builtin_uadd_overflow(lhs, rhs, (unsigned int*)res);
-#elif defined(__GNUC__) && UINT64_MAX == ULONG_MAX
- overflow = __builtin_uaddl_overflow(lhs, rhs, (unsigned long*)res);
-#elif defined(__GNUC__) && UINT64_MAX == ULLONG_MAX
- overflow = __builtin_uaddll_overflow(lhs, rhs, (unsigned long long*)res);
+ return zig_wrap_u8(full_res >> (8 - bits), bits);
+}
+
+static inline zig_i8 zig_bit_reverse_i8(zig_i8 val, zig_u8 bits) {
+ return zig_wrap_i8((zig_i8)zig_bit_reverse_u8((zig_u8)val, bits), bits);
+}
+
+static inline zig_u16 zig_bit_reverse_u16(zig_u16 val, zig_u8 bits) {
+ zig_u16 full_res;
+#if zig_has_builtin(bitreverse16)
+ full_res = __builtin_bitreverse16(val);
#else
- int int_overflow;
- *res = __uaddodi4(lhs, rhs, &int_overflow);
- overflow = int_overflow != 0;
+ full_res = (zig_u16)zig_bit_reverse_u8((zig_u8)(val >> 0)) << 8 |
+ (zig_u16)zig_bit_reverse_u8((zig_u8)(val >> 8)) >> 0;
#endif
- if (*res > max && !overflow) {
- *res -= max - 1;
- return true;
- }
- return overflow;
+ return zig_wrap_u16(full_res >> (16 - bits), bits);
}
-static inline uint128_t zig_addo_u128(uint128_t lhs, uint128_t rhs, uint128_t *res, uint128_t max) {
- int overflow;
- *res = __uaddoti4(lhs, rhs, &overflow);
- if (*res > max && overflow == 0) {
- *res -= max - 1;
- return true;
- }
- return overflow != 0;
+static inline zig_i16 zig_bit_reverse_i16(zig_i16 val, zig_u8 bits) {
+ return zig_wrap_i16((zig_i16)zig_bit_reverse_u16((zig_u16)val, bits), bits);
}
-static inline bool zig_subo_i8(int8_t lhs, int8_t rhs, int8_t *res, int8_t min, int8_t max) {
-#if defined(__GNUC__) && INT8_MAX == INT_MAX
- if (min == INT8_MIN && max == INT8_MAX) {
- return __builtin_ssub_overflow(lhs, rhs, (int*)res);
- }
-#elif defined(__GNUC__) && INT8_MAX == LONG_MAX
- if (min == INT8_MIN && max == INT8_MAX) {
- return __builtin_ssubl_overflow(lhs, rhs, (long*)res);
- }
-#elif defined(__GNUC__) && INT8_MAX == LLONG_MAX
- if (min == INT8_MIN && max == INT8_MAX) {
- return __builtin_ssubll_overflow(lhs, rhs, (long long*)res);
- }
+static inline zig_u32 zig_bit_reverse_u32(zig_u32 val, zig_u8 bits) {
+ zig_u32 full_res;
+#if zig_has_builtin(bitreverse32)
+ full_res = __builtin_bitreverse32(val);
+#else
+ full_res = (zig_u32)zig_bit_reverse_u16((zig_u16)(val >> 0)) << 16 |
+ (zig_u32)zig_bit_reverse_u16((zig_u16)(val >> 16)) >> 0;
#endif
- int16_t big_result = (int16_t)lhs - (int16_t)rhs;
- if (big_result > max) {
- *res = big_result - ((int16_t)max - (int16_t)min);
- return true;
- }
- if (big_result < min) {
- *res = big_result + ((int16_t)max - (int16_t)min);
- return true;
- }
- *res = big_result;
- return false;
+ return zig_wrap_u32(full_res >> (32 - bits), bits);
}
-static inline bool zig_subo_i16(int16_t lhs, int16_t rhs, int16_t *res, int16_t min, int16_t max) {
-#if defined(__GNUC__) && INT16_MAX == INT_MAX
- if (min == INT16_MIN && max == INT16_MAX) {
- return __builtin_ssub_overflow(lhs, rhs, (int*)res);
- }
-#elif defined(__GNUC__) && INT16_MAX == LONG_MAX
- if (min == INT16_MIN && max == INT16_MAX) {
- return __builtin_ssubl_overflow(lhs, rhs, (long*)res);
- }
-#elif defined(__GNUC__) && INT16_MAX == LLONG_MAX
- if (min == INT16_MIN && max == INT16_MAX) {
- return __builtin_ssubll_overflow(lhs, rhs, (long long*)res);
- }
+static inline zig_i32 zig_bit_reverse_i32(zig_i32 val, zig_u8 bits) {
+ return zig_wrap_i32((zig_i32)zig_bit_reverse_u32((zig_u32)val, bits), bits);
+}
+
+static inline zig_u64 zig_bit_reverse_u64(zig_u64 val, zig_u8 bits) {
+ zig_u64 full_res;
+#if zig_has_builtin(bitreverse64)
+ full_res = __builtin_bitreverse64(val);
+#else
+ full_res = (zig_u64)zig_bit_reverse_u32((zig_u32)(val >> 0)) << 32 |
+ (zig_u64)zig_bit_reverse_u32((zig_u32)(val >> 32)) >> 0;
#endif
- int32_t big_result = (int32_t)lhs - (int32_t)rhs;
- if (big_result > max) {
- *res = big_result - ((int32_t)max - (int32_t)min);
- return true;
- }
- if (big_result < min) {
- *res = big_result + ((int32_t)max - (int32_t)min);
- return true;
- }
- *res = big_result;
- return false;
+ return zig_wrap_u64(full_res >> (64 - bits), bits);
}
-static inline bool zig_subo_i32(int32_t lhs, int32_t rhs, int32_t *res, int32_t min, int32_t max) {
-#if defined(__GNUC__) && INT32_MAX == INT_MAX
- if (min == INT32_MIN && max == INT32_MAX) {
- return __builtin_ssub_overflow(lhs, rhs, (int*)res);
- }
-#elif defined(__GNUC__) && INT32_MAX == LONG_MAX
- if (min == INT32_MIN && max == INT32_MAX) {
- return __builtin_ssubl_overflow(lhs, rhs, (long*)res);
- }
-#elif defined(__GNUC__) && INT32_MAX == LLONG_MAX
- if (min == INT32_MIN && max == INT32_MAX) {
- return __builtin_ssubll_overflow(lhs, rhs, (long long*)res);
- }
+static inline zig_i64 zig_bit_reverse_i64(zig_i64 val, zig_u8 bits) {
+ return zig_wrap_i64((zig_i64)zig_bit_reverse_u64((zig_u64)val, bits), bits);
+}
+
+/* ======================== 128-bit Integer Routines ======================== */
+
+#if !defined(zig_has_int128)
+# if defined(__SIZEOF_INT128__)
+# define zig_has_int128 1
+# else
+# define zig_has_int128 0
+# endif
#endif
- int64_t big_result = (int64_t)lhs - (int64_t)rhs;
- if (big_result > max) {
- *res = big_result - ((int64_t)max - (int64_t)min);
- return true;
+
+#if zig_has_int128
+
+typedef unsigned __int128 zig_u128;
+typedef signed __int128 zig_i128;
+
+#define zig_as_u128(hi, lo) ((zig_u128)(hi)<<64|(lo))
+#define zig_as_i128(hi, lo) ((zig_i128)zig_as_u128(hi, lo))
+#define zig_hi_u128(val) ((zig_u64)((val) >> 64))
+#define zig_lo_u128(val) ((zig_u64)((val) >> 0))
+#define zig_hi_i128(val) ((zig_i64)((val) >> 64))
+#define zig_lo_i128(val) ((zig_u64)((val) >> 0))
+#define zig_bitcast_u128(val) ((zig_u128)(val))
+#define zig_bitcast_i128(val) ((zig_i128)(val))
+#define zig_cmp_int128(ZigType, CType) \
+ static inline zig_i8 zig_cmp_##ZigType(CType lhs, CType rhs) { \
+ return (lhs > rhs) - (lhs < rhs); \
}
- if (big_result < min) {
- *res = big_result + ((int64_t)max - (int64_t)min);
- return true;
+#define zig_bit_int128(ZigType, CType, operation, operator) \
+ static inline CType zig_##operation##_##ZigType(CType lhs, CType rhs) { \
+ return lhs operator rhs; \
}
- *res = big_result;
- return false;
-}
-
-static inline bool zig_subo_i64(int64_t lhs, int64_t rhs, int64_t *res, int64_t min, int64_t max) {
- bool overflow;
-#if defined(__GNUC__) && INT64_MAX == INT_MAX
- overflow = __builtin_ssub_overflow(lhs, rhs, (int*)res);
-#elif defined(__GNUC__) && INT64_MAX == LONG_MAX
- overflow = __builtin_ssubl_overflow(lhs, rhs, (long*)res);
-#elif defined(__GNUC__) && INT64_MAX == LLONG_MAX
- overflow = __builtin_ssubll_overflow(lhs, rhs, (long long*)res);
+
+#else /* zig_has_int128 */
+
+#if __LITTLE_ENDIAN__ || _MSC_VER
+typedef struct { zig_align(16) zig_u64 lo; zig_u64 hi; } zig_u128;
+typedef struct { zig_align(16) zig_u64 lo; zig_i64 hi; } zig_i128;
#else
- int int_overflow;
- *res = __subodi4(lhs, rhs, &int_overflow);
- overflow = int_overflow != 0;
-#endif
- if (!overflow) {
- if (*res > max) {
- // TODO adjust the result to be the truncated bits
- return true;
- } else if (*res < min) {
- // TODO adjust the result to be the truncated bits
- return true;
- }
+typedef struct { zig_align(16) zig_u64 hi; zig_u64 lo; } zig_u128;
+typedef struct { zig_align(16) zig_i64 hi; zig_u64 lo; } zig_i128;
+#endif
+
+#define zig_as_u128(hi, lo) ((zig_u128){ .h##i = (hi), .l##o = (lo) })
+#define zig_as_i128(hi, lo) ((zig_i128){ .h##i = (hi), .l##o = (lo) })
+#define zig_hi_u128(val) ((val).hi)
+#define zig_lo_u128(val) ((val).lo)
+#define zig_hi_i128(val) ((val).hi)
+#define zig_lo_i128(val) ((val).lo)
+#define zig_bitcast_u128(val) zig_as_u128((zig_u64)(val).hi, (val).lo)
+#define zig_bitcast_i128(val) zig_as_i128((zig_i64)(val).hi, (val).lo)
+#define zig_cmp_int128(ZigType, CType) \
+ static inline zig_c_int zig_cmp_##ZigType(CType lhs, CType rhs) { \
+ return (lhs.hi == rhs.hi) \
+ ? (lhs.lo > rhs.lo) - (lhs.lo < rhs.lo) \
+ : (lhs.hi > rhs.hi) - (lhs.hi < rhs.hi); \
+ }
+#define zig_bit_int128(ZigType, CType, operation, operator) \
+ static inline CType zig_##operation##_##ZigType(CType lhs, CType rhs) { \
+ return (CType){ .hi = lhs.hi operator rhs.hi, .lo = lhs.lo operator rhs.lo }; \
}
- return overflow;
+
+#endif /* zig_has_int128 */
+
+#define zig_minInt_u128 zig_as_u128(zig_minInt_u64, zig_minInt_u64)
+#define zig_maxInt_u128 zig_as_u128(zig_maxInt_u64, zig_maxInt_u64)
+#define zig_minInt_i128 zig_as_i128(zig_minInt_i64, zig_minInt_u64)
+#define zig_maxInt_i128 zig_as_i128(zig_maxInt_i64, zig_maxInt_u64)
+
+zig_cmp_int128(u128, zig_u128)
+zig_cmp_int128(i128, zig_i128)
+
+zig_bit_int128(u128, zig_u128, and, &)
+zig_bit_int128(i128, zig_i128, and, &)
+
+zig_bit_int128(u128, zig_u128, or, |)
+zig_bit_int128(i128, zig_i128, or, |)
+
+zig_bit_int128(u128, zig_u128, xor, ^)
+zig_bit_int128(i128, zig_i128, xor, ^)
+
+static inline zig_u128 zig_shr_u128(zig_u128 lhs, zig_u8 rhs);
+
+#if zig_has_int128
+
+static inline zig_u128 zig_not_u128(zig_u128 val, zig_u8 bits) {
+ return val ^ zig_maxInt(u128, bits);
}
-static inline bool zig_subo_i128(int128_t lhs, int128_t rhs, int128_t *res, int128_t min, int128_t max) {
- bool overflow;
-#if defined(__GNUC__) && INT128_MAX == INT_MAX
- overflow = __builtin_ssub_overflow(lhs, rhs, (int*)res);
-#elif defined(__GNUC__) && INT128_MAX == LONG_MAX
- overflow = __builtin_ssubl_overflow(lhs, rhs, (long*)res);
-#elif defined(__GNUC__) && INT128_MAX == LLONG_MAX
- overflow = __builtin_ssubll_overflow(lhs, rhs, (long long*)res);
-#else
- int int_overflow;
- *res = __suboti4(lhs, rhs, &int_overflow);
- overflow = int_overflow != 0;
-#endif
- if (!overflow) {
- if (*res > max) {
- // TODO adjust the result to be the truncated bits
- return true;
- } else if (*res < min) {
- // TODO adjust the result to be the truncated bits
- return true;
- }
- }
- return overflow;
+static inline zig_i128 zig_not_i128(zig_i128 val, zig_u8 bits) {
+ (void)bits;
+ return ~val;
}
-static inline bool zig_subo_u8(uint8_t lhs, uint8_t rhs, uint8_t *res, uint8_t max) {
-#if defined(__GNUC__) && UINT8_MAX == UINT_MAX
- return __builtin_usub_overflow(lhs, rhs, (unsigned int*)res);
-#elif defined(__GNUC__) && UINT8_MAX == ULONG_MAX
- return __builtin_usubl_overflow(lhs, rhs, (unsigned long*)res);
-#elif defined(__GNUC__) && UINT8_MAX == ULLONG_MAX
- return __builtin_usubll_overflow(lhs, rhs, (unsigned long long*)res);
-#endif
- if (rhs > lhs) {
- *res = max - (rhs - lhs - 1);
- return true;
- }
- *res = lhs - rhs;
- return false;
+static inline zig_u128 zig_shr_u128(zig_u128 lhs, zig_u8 rhs) {
+ return lhs >> rhs;
}
-static inline uint16_t zig_subo_u16(uint16_t lhs, uint16_t rhs, uint16_t *res, uint16_t max) {
-#if defined(__GNUC__) && UINT16_MAX == UINT_MAX
- return __builtin_usub_overflow(lhs, rhs, (unsigned int*)res);
-#elif defined(__GNUC__) && UINT16_MAX == ULONG_MAX
- return __builtin_usubl_overflow(lhs, rhs, (unsigned long*)res);
-#elif defined(__GNUC__) && UINT16_MAX == ULLONG_MAX
- return __builtin_usubll_overflow(lhs, rhs, (unsigned long long*)res);
-#endif
- if (rhs > lhs) {
- *res = max - (rhs - lhs - 1);
- return true;
- }
- *res = lhs - rhs;
- return false;
-}
-
-static inline uint32_t zig_subo_u32(uint32_t lhs, uint32_t rhs, uint32_t *res, uint32_t max) {
- if (max == UINT32_MAX) {
-#if defined(__GNUC__) && UINT32_MAX == UINT_MAX
- return __builtin_usub_overflow(lhs, rhs, (unsigned int*)res);
-#elif defined(__GNUC__) && UINT32_MAX == ULONG_MAX
- return __builtin_usubl_overflow(lhs, rhs, (unsigned long*)res);
-#elif defined(__GNUC__) && UINT32_MAX == ULLONG_MAX
- return __builtin_usubll_overflow(lhs, rhs, (unsigned long long*)res);
-#endif
- int int_overflow;
- *res = __usubosi4(lhs, rhs, &int_overflow);
- return int_overflow != 0;
- } else {
- if (rhs > lhs) {
- *res = max - (rhs - lhs - 1);
- return true;
- }
- *res = lhs - rhs;
- return false;
- }
+static inline zig_u128 zig_shl_u128(zig_u128 lhs, zig_u8 rhs) {
+ return lhs << rhs;
}
-static inline uint64_t zig_subo_u64(uint64_t lhs, uint64_t rhs, uint64_t *res, uint64_t max) {
- if (max == UINT64_MAX) {
-#if defined(__GNUC__) && UINT64_MAX == UINT_MAX
- return __builtin_usub_overflow(lhs, rhs, (unsigned int*)res);
-#elif defined(__GNUC__) && UINT64_MAX == ULONG_MAX
- return __builtin_usubl_overflow(lhs, rhs, (unsigned long*)res);
-#elif defined(__GNUC__) && UINT64_MAX == ULLONG_MAX
- return __builtin_usubll_overflow(lhs, rhs, (unsigned long long*)res);
-#else
- int int_overflow;
- *res = __usubodi4(lhs, rhs, &int_overflow);
- return int_overflow != 0;
-#endif
- } else {
- if (rhs > lhs) {
- *res = max - (rhs - lhs - 1);
- return true;
- }
- *res = lhs - rhs;
- return false;
- }
+static inline zig_i128 zig_shl_i128(zig_i128 lhs, zig_u8 rhs) {
+ return lhs << rhs;
}
-static inline uint128_t zig_subo_u128(uint128_t lhs, uint128_t rhs, uint128_t *res, uint128_t max) {
- if (max == UINT128_MAX) {
- int int_overflow;
- *res = __usuboti4(lhs, rhs, &int_overflow);
- return int_overflow != 0;
- } else {
- if (rhs > lhs) {
- *res = max - (rhs - lhs - 1);
- return true;
- }
- *res = lhs - rhs;
- return false;
- }
+static inline zig_u128 zig_add_u128(zig_u128 lhs, zig_u128 rhs) {
+ return lhs + rhs;
}
-static inline bool zig_mulo_i8(int8_t lhs, int8_t rhs, int8_t *res, int8_t min, int8_t max) {
-#if defined(__GNUC__) && INT8_MAX == INT_MAX
- if (min == INT8_MIN && max == INT8_MAX) {
- return __builtin_smul_overflow(lhs, rhs, (int*)res);
- }
-#elif defined(__GNUC__) && INT8_MAX == LONG_MAX
- if (min == INT8_MIN && max == INT8_MAX) {
- return __builtin_smull_overflow(lhs, rhs, (long*)res);
- }
-#elif defined(__GNUC__) && INT8_MAX == LLONG_MAX
- if (min == INT8_MIN && max == INT8_MAX) {
- return __builtin_smulll_overflow(lhs, rhs, (long long*)res);
- }
-#endif
- int16_t big_result = (int16_t)lhs * (int16_t)rhs;
- if (big_result > max) {
- *res = big_result - ((int16_t)max - (int16_t)min);
- return true;
- }
- if (big_result < min) {
- *res = big_result + ((int16_t)max - (int16_t)min);
- return true;
- }
- *res = big_result;
- return false;
+static inline zig_i128 zig_add_i128(zig_i128 lhs, zig_i128 rhs) {
+ return lhs + rhs;
}
-static inline bool zig_mulo_i16(int16_t lhs, int16_t rhs, int16_t *res, int16_t min, int16_t max) {
-#if defined(__GNUC__) && INT16_MAX == INT_MAX
- if (min == INT16_MIN && max == INT16_MAX) {
- return __builtin_smul_overflow(lhs, rhs, (int*)res);
- }
-#elif defined(__GNUC__) && INT16_MAX == LONG_MAX
- if (min == INT16_MIN && max == INT16_MAX) {
- return __builtin_smull_overflow(lhs, rhs, (long*)res);
- }
-#elif defined(__GNUC__) && INT16_MAX == LLONG_MAX
- if (min == INT16_MIN && max == INT16_MAX) {
- return __builtin_smulll_overflow(lhs, rhs, (long long*)res);
- }
-#endif
- int32_t big_result = (int32_t)lhs * (int32_t)rhs;
- if (big_result > max) {
- *res = big_result - ((int32_t)max - (int32_t)min);
- return true;
- }
- if (big_result < min) {
- *res = big_result + ((int32_t)max - (int32_t)min);
- return true;
- }
- *res = big_result;
- return false;
+static inline zig_u128 zig_sub_u128(zig_u128 lhs, zig_u128 rhs) {
+ return lhs - rhs;
}
-static inline bool zig_mulo_i32(int32_t lhs, int32_t rhs, int32_t *res, int32_t min, int32_t max) {
-#if defined(__GNUC__) && INT32_MAX == INT_MAX
- if (min == INT32_MIN && max == INT32_MAX) {
- return __builtin_smul_overflow(lhs, rhs, (int*)res);
- }
-#elif defined(__GNUC__) && INT32_MAX == LONG_MAX
- if (min == INT32_MIN && max == INT32_MAX) {
- return __builtin_smull_overflow(lhs, rhs, (long*)res);
- }
-#elif defined(__GNUC__) && INT32_MAX == LLONG_MAX
- if (min == INT32_MIN && max == INT32_MAX) {
- return __builtin_smulll_overflow(lhs, rhs, (long long*)res);
- }
-#endif
- int64_t big_result = (int64_t)lhs * (int64_t)rhs;
- if (big_result > max) {
- *res = big_result - ((int64_t)max - (int64_t)min);
- return true;
- }
- if (big_result < min) {
- *res = big_result + ((int64_t)max - (int64_t)min);
- return true;
- }
- *res = big_result;
- return false;
-}
-
-static inline bool zig_mulo_i64(int64_t lhs, int64_t rhs, int64_t *res, int64_t min, int64_t max) {
- bool overflow;
-#if defined(__GNUC__) && INT64_MAX == INT_MAX
- overflow = __builtin_smul_overflow(lhs, rhs, (int*)res);
-#elif defined(__GNUC__) && INT64_MAX == LONG_MAX
- overflow = __builtin_smull_overflow(lhs, rhs, (long*)res);
-#elif defined(__GNUC__) && INT64_MAX == LLONG_MAX
- overflow = __builtin_smulll_overflow(lhs, rhs, (long long*)res);
-#else
- int int_overflow;
- *res = __mulodi4(lhs, rhs, &int_overflow);
- overflow = int_overflow != 0;
-#endif
- if (!overflow) {
- if (*res > max) {
- // TODO adjust the result to be the truncated bits
- return true;
- } else if (*res < min) {
- // TODO adjust the result to be the truncated bits
- return true;
- }
- }
- return overflow;
+static inline zig_i128 zig_sub_i128(zig_i128 lhs, zig_i128 rhs) {
+ return lhs - rhs;
}
-static inline bool zig_mulo_i128(int128_t lhs, int128_t rhs, int128_t *res, int128_t min, int128_t max) {
- bool overflow;
-#if defined(__GNUC__) && INT128_MAX == INT_MAX
- overflow = __builtin_smul_overflow(lhs, rhs, (int*)res);
-#elif defined(__GNUC__) && INT128_MAX == LONG_MAX
- overflow = __builtin_smull_overflow(lhs, rhs, (long*)res);
-#elif defined(__GNUC__) && INT128_MAX == LLONG_MAX
- overflow = __builtin_smulll_overflow(lhs, rhs, (long long*)res);
-#else
- int int_overflow;
- *res = __muloti4(lhs, rhs, &int_overflow);
- overflow = int_overflow != 0;
-#endif
- if (!overflow) {
- if (*res > max) {
- // TODO adjust the result to be the truncated bits
- return true;
- } else if (*res < min) {
- // TODO adjust the result to be the truncated bits
- return true;
- }
- }
- return overflow;
+static inline zig_u128 zig_mul_u128(zig_u128 lhs, zig_u128 rhs) {
+ return lhs * rhs;
}
-static inline bool zig_mulo_u8(uint8_t lhs, uint8_t rhs, uint8_t *res, uint8_t max) {
-#if defined(__GNUC__) && UINT8_MAX == UINT_MAX
- if (max == UINT8_MAX) {
- return __builtin_umul_overflow(lhs, rhs, (unsigned int*)res);
- }
-#elif defined(__GNUC__) && UINT8_MAX == ULONG_MAX
- if (max == UINT8_MAX) {
- return __builtin_umull_overflow(lhs, rhs, (unsigned long*)res);
- }
-#elif defined(__GNUC__) && UINT8_MAX == ULLONG_MAX
- if (max == UINT8_MAX) {
- return __builtin_umulll_overflow(lhs, rhs, (unsigned long long*)res);
- }
-#endif
- uint16_t big_result = (uint16_t)lhs * (uint16_t)rhs;
- if (big_result > max) {
- *res = big_result - max - 1;
- return true;
- }
- *res = big_result;
- return false;
+static inline zig_i128 zig_mul_i128(zig_i128 lhs, zig_i128 rhs) {
+ return lhs * rhs;
}
-static inline uint16_t zig_mulo_u16(uint16_t lhs, uint16_t rhs, uint16_t *res, uint16_t max) {
-#if defined(__GNUC__) && UINT16_MAX == UINT_MAX
- if (max == UINT16_MAX) {
- return __builtin_umul_overflow(lhs, rhs, (unsigned int*)res);
- }
-#elif defined(__GNUC__) && UINT16_MAX == ULONG_MAX
- if (max == UINT16_MAX) {
- return __builtin_umull_overflow(lhs, rhs, (unsigned long*)res);
- }
-#elif defined(__GNUC__) && UINT16_MAX == ULLONG_MAX
- if (max == UINT16_MAX) {
- return __builtin_umulll_overflow(lhs, rhs, (unsigned long long*)res);
- }
-#endif
- uint32_t big_result = (uint32_t)lhs * (uint32_t)rhs;
- if (big_result > max) {
- *res = big_result - max - 1;
- return true;
- }
- *res = big_result;
- return false;
+static inline zig_u128 zig_div_trunc_u128(zig_u128 lhs, zig_u128 rhs) {
+ return lhs / rhs;
}
-static inline uint32_t zig_mulo_u32(uint32_t lhs, uint32_t rhs, uint32_t *res, uint32_t max) {
-#if defined(__GNUC__) && UINT32_MAX == UINT_MAX
- if (max == UINT32_MAX) {
- return __builtin_umul_overflow(lhs, rhs, (unsigned int*)res);
- }
-#elif defined(__GNUC__) && UINT32_MAX == ULONG_MAX
- if (max == UINT32_MAX) {
- return __builtin_umull_overflow(lhs, rhs, (unsigned long*)res);
- }
-#elif defined(__GNUC__) && UINT32_MAX == ULLONG_MAX
- if (max == UINT32_MAX) {
- return __builtin_umulll_overflow(lhs, rhs, (unsigned long long*)res);
- }
-#endif
- uint64_t big_result = (uint64_t)lhs * (uint64_t)rhs;
- if (big_result > max) {
- *res = big_result - max - 1;
- return true;
- }
- *res = big_result;
- return false;
-}
-
-static inline uint64_t zig_mulo_u64(uint64_t lhs, uint64_t rhs, uint64_t *res, uint64_t max) {
- bool overflow;
-#if defined(__GNUC__) && UINT64_MAX == UINT_MAX
- overflow = __builtin_umul_overflow(lhs, rhs, (unsigned int*)res);
-#elif defined(__GNUC__) && UINT64_MAX == ULONG_MAX
- overflow = __builtin_umull_overflow(lhs, rhs, (unsigned long*)res);
-#elif defined(__GNUC__) && UINT64_MAX == ULLONG_MAX
- overflow = __builtin_umulll_overflow(lhs, rhs, (unsigned long long*)res);
-#else
- int int_overflow;
- *res = __umulodi4(lhs, rhs, &int_overflow);
- overflow = int_overflow != 0;
-#endif
- if (*res > max && !overflow) {
- *res -= max - 1;
- return true;
- }
- return overflow;
+static inline zig_i128 zig_div_trunc_i128(zig_i128 lhs, zig_i128 rhs) {
+ return lhs / rhs;
}
-static inline uint128_t zig_mulo_u128(uint128_t lhs, uint128_t rhs, uint128_t *res, uint128_t max) {
- int overflow;
- *res = __umuloti4(lhs, rhs, &overflow);
- if (*res > max && overflow == 0) {
- *res -= max - 1;
- return true;
- }
- return overflow != 0;
+static inline zig_u128 zig_rem_u128(zig_u128 lhs, zig_u128 rhs) {
+ return lhs % rhs;
}
-static inline float zig_bitcast_f32_u32(uint32_t arg) {
- float dest;
- memcpy(&dest, &arg, sizeof dest);
- return dest;
+static inline zig_i128 zig_rem_i128(zig_i128 lhs, zig_i128 rhs) {
+ return lhs % rhs;
}
-static inline float zig_bitcast_f64_u64(uint64_t arg) {
- double dest;
- memcpy(&dest, &arg, sizeof dest);
- return dest;
+static inline zig_i128 zig_div_floor_i128(zig_i128 lhs, zig_i128 rhs) {
+ return zig_div_trunc_i128(lhs, rhs) - (((lhs ^ rhs) & zig_rem_i128(lhs, rhs)) < zig_as_i128(0, 0));
}
-#define zig_add_sat_u(ZT, T) static inline T zig_adds_##ZT(T x, T y, T max) { \
- return (x > max - y) ? max : x + y; \
-}
-
-#define zig_add_sat_s(ZT, T, T2) static inline T zig_adds_##ZT(T2 x, T2 y, T2 min, T2 max) { \
- T2 res = x + y; \
- return (res < min) ? min : (res > max) ? max : res; \
-}
-
-zig_add_sat_u( u8, uint8_t)
-zig_add_sat_s( i8, int8_t, int16_t)
-zig_add_sat_u(u16, uint16_t)
-zig_add_sat_s(i16, int16_t, int32_t)
-zig_add_sat_u(u32, uint32_t)
-zig_add_sat_s(i32, int32_t, int64_t)
-zig_add_sat_u(u64, uint64_t)
-zig_add_sat_s(i64, int64_t, int128_t)
-zig_add_sat_s(isize, intptr_t, int128_t)
-zig_add_sat_s(short, short, int)
-zig_add_sat_s(int, int, long)
-zig_add_sat_s(long, long, long long)
-
-#define zig_sub_sat_u(ZT, T) static inline T zig_subs_##ZT(T x, T y, T max) { \
- return (x > max + y) ? max : x - y; \
+static inline zig_i128 zig_mod_i128(zig_i128 lhs, zig_i128 rhs) {
+ zig_i128 rem = zig_rem_i128(lhs, rhs);
+ return rem + (((lhs ^ rhs) & rem) < zig_as_i128(0, 0) ? rhs : zig_as_i128(0, 0));
}
-#define zig_sub_sat_s(ZT, T, T2) static inline T zig_subs_##ZT(T2 x, T2 y, T2 min, T2 max) { \
- T2 res = x - y; \
- return (res < min) ? min : (res > max) ? max : res; \
-}
-
-zig_sub_sat_u( u8, uint8_t)
-zig_sub_sat_s( i8, int8_t, int16_t)
-zig_sub_sat_u(u16, uint16_t)
-zig_sub_sat_s(i16, int16_t, int32_t)
-zig_sub_sat_u(u32, uint32_t)
-zig_sub_sat_s(i32, int32_t, int64_t)
-zig_sub_sat_u(u64, uint64_t)
-zig_sub_sat_s(i64, int64_t, int128_t)
-zig_sub_sat_s(isize, intptr_t, int128_t)
-zig_sub_sat_s(short, short, int)
-zig_sub_sat_s(int, int, long)
-zig_sub_sat_s(long, long, long long)
-
+#else /* zig_has_int128 */
-#define zig_mul_sat_u(ZT, T, T2) static inline T zig_muls_##ZT(T2 x, T2 y, T2 max) { \
- T2 res = x * y; \
- return (res > max) ? max : res; \
+static inline zig_u128 zig_not_u128(zig_u128 val, zig_u8 bits) {
+ return (zig_u128){ .hi = zig_not_u64(val.hi, bits - zig_as_u8(64)), .lo = zig_not_u64(val.lo, zig_as_u8(64)) };
}
-#define zig_mul_sat_s(ZT, T, T2) static inline T zig_muls_##ZT(T2 x, T2 y, T2 min, T2 max) { \
- T2 res = x * y; \
- return (res < min) ? min : (res > max) ? max : res; \
+static inline zig_i128 zig_not_i128(zig_i128 val, zig_u8 bits) {
+ return (zig_i128){ .hi = zig_not_i64(val.hi, bits - zig_as_u8(64)), .lo = zig_not_u64(val.lo, zig_as_u8(64)) };
}
-zig_mul_sat_u(u8, uint8_t, uint16_t)
-zig_mul_sat_s(i8, int8_t, int16_t)
-zig_mul_sat_u(u16, uint16_t, uint32_t)
-zig_mul_sat_s(i16, int16_t, int32_t)
-zig_mul_sat_u(u32, uint32_t, uint64_t)
-zig_mul_sat_s(i32, int32_t, int64_t)
-zig_mul_sat_u(u64, uint64_t, uint128_t)
-zig_mul_sat_s(i64, int64_t, int128_t)
-zig_mul_sat_s(isize, intptr_t, int128_t)
-zig_mul_sat_s(short, short, int)
-zig_mul_sat_s(int, int, long)
-zig_mul_sat_s(long, long, long long)
+static inline zig_u128 zig_shr_u128(zig_u128 lhs, zig_u8 rhs) {
+ if (rhs >= zig_as_u8(64)) return (zig_u128){ .hi = lhs.hi << (rhs - zig_as_u8(64)), .lo = zig_minInt_u64 };
+ return (zig_u128){ .hi = lhs.hi << rhs | lhs.lo >> (zig_as_u8(64) - rhs), .lo = lhs.lo << rhs };
+}
-#define zig_shl_sat_u(ZT, T, bits) static inline T zig_shls_##ZT(T x, T y, T max) { \
- if(x == 0) return 0; \
- T bits_set = 64 - __builtin_clzll(x); \
- return (bits_set + y > bits) ? max : x << y; \
+static inline zig_u128 zig_shl_u128(zig_u128 lhs, zig_u8 rhs) {
+ if (rhs >= zig_as_u8(64)) return (zig_u128){ .hi = lhs.hi << (rhs - zig_as_u8(64)), .lo = zig_minInt_u64 };
+ return (zig_u128){ .hi = lhs.hi << rhs | lhs.lo >> (zig_as_u8(64) - rhs), .lo = lhs.lo << rhs };
}
-#define zig_shl_sat_s(ZT, T, bits) static inline T zig_shls_##ZT(T x, T y, T min, T max) { \
- if(x == 0) return 0; \
- T x_twos_comp = x < 0 ? -x : x; \
- T bits_set = 64 - __builtin_clzll(x_twos_comp); \
- T min_or_max = (x < 0) ? min : max; \
- return (y + bits_set > bits ) ? min_or_max : x << y; \
+static inline zig_i128 zig_shl_i128(zig_i128 lhs, zig_u8 rhs) {
+ if (rhs >= zig_as_u8(64)) return (zig_i128){ .hi = lhs.hi << (rhs - zig_as_u8(64)), .lo = zig_minInt_u64 };
+ return (zig_i128){ .hi = lhs.hi << rhs | lhs.lo >> (zig_as_u8(64) - rhs), .lo = lhs.lo << rhs };
}
-zig_shl_sat_u(u8, uint8_t, 8)
-zig_shl_sat_s(i8, int8_t, 7)
-zig_shl_sat_u(u16, uint16_t, 16)
-zig_shl_sat_s(i16, int16_t, 15)
-zig_shl_sat_u(u32, uint32_t, 32)
-zig_shl_sat_s(i32, int32_t, 31)
-zig_shl_sat_u(u64, uint64_t, 64)
-zig_shl_sat_s(i64, int64_t, 63)
-zig_shl_sat_s(isize, intptr_t, ((sizeof(intptr_t)) * CHAR_BIT - 1))
-zig_shl_sat_s(short, short, ((sizeof(short )) * CHAR_BIT - 1))
-zig_shl_sat_s(int, int, ((sizeof(int )) * CHAR_BIT - 1))
-zig_shl_sat_s(long, long, ((sizeof(long )) * CHAR_BIT - 1))
+static inline zig_u128 zig_add_u128(zig_u128 lhs, zig_u128 rhs) {
+ zig_u128 res;
+ res.hi = lhs.hi + rhs.hi + zig_addo_u64(&res.lo, lhs.lo, rhs.lo, zig_maxInt_u64);
+ return res;
+}
-#define zig_bitsizeof(T) (CHAR_BIT * sizeof(T))
-#define zig_bit_mask(T, bit_width) \
- ((bit_width) == zig_bitsizeof(T) \
- ? ((T)-1) \
- : (((T)1 << (T)(bit_width)) - 1))
+static inline zig_i128 zig_add_i128(zig_i128 lhs, zig_i128 rhs) {
+ zig_i128 res;
+ res.hi = lhs.hi + rhs.hi + zig_addo_u64(&res.lo, lhs.lo, rhs.lo, zig_maxInt_u64);
+ return res;
+}
-static inline int zig_clz(unsigned int value, uint8_t zig_type_bit_width) {
- if (value == 0) return zig_type_bit_width;
- return __builtin_clz(value) - zig_bitsizeof(unsigned int) + zig_type_bit_width;
+static inline zig_u128 zig_sub_u128(zig_u128 lhs, zig_u128 rhs) {
+ zig_u128 res;
+ res.hi = lhs.hi - rhs.hi - zig_subo_u64(&res.lo, lhs.lo, rhs.lo, zig_maxInt_u64);
+ return res;
}
-static inline int zig_clzl(unsigned long value, uint8_t zig_type_bit_width) {
- if (value == 0) return zig_type_bit_width;
- return __builtin_clzl(value) - zig_bitsizeof(unsigned long) + zig_type_bit_width;
+static inline zig_i128 zig_sub_i128(zig_i128 lhs, zig_i128 rhs) {
+ zig_i128 res;
+ res.hi = lhs.hi - rhs.hi - zig_subo_u64(&res.lo, lhs.lo, rhs.lo, zig_maxInt_u64);
+ return res;
}
-static inline int zig_clzll(unsigned long long value, uint8_t zig_type_bit_width) {
- if (value == 0) return zig_type_bit_width;
- return __builtin_clzll(value) - zig_bitsizeof(unsigned long long) + zig_type_bit_width;
+static inline zig_i128 zig_div_floor_i128(zig_i128 lhs, zig_i128 rhs) {
+ return zig_sub_i128(zig_div_trunc_i128(lhs, rhs), (((lhs.hi ^ rhs.hi) & zig_rem_i128(lhs, rhs).hi) < zig_as_i64(0)) ? zig_as_i128(0, 1) : zig_as_i128(0, 0));
}
-#define zig_clz_u8 zig_clz
-#define zig_clz_i8 zig_clz
-#define zig_clz_u16 zig_clz
-#define zig_clz_i16 zig_clz
-#define zig_clz_u32 zig_clzl
-#define zig_clz_i32 zig_clzl
-#define zig_clz_u64 zig_clzll
-#define zig_clz_i64 zig_clzll
+static inline zig_i128 zig_mod_i128(zig_i128 lhs, zig_i128 rhs) {
+ zig_i128 rem = zig_rem_i128(lhs, rhs);
+ return rem + (((lhs.hi ^ rhs.hi) & rem.hi) < zig_as_i64(0) ? rhs : zig_as_i128(0, 0));
+}
+
+#endif /* zig_has_int128 */
+
+#define zig_div_floor_u128 zig_div_trunc_u128
+#define zig_mod_u128 zig_rem_u128
-static inline int zig_clz_u128(uint128_t value, uint8_t zig_type_bit_width) {
- if (value == 0) return zig_type_bit_width;
- const uint128_t mask = zig_bit_mask(uint128_t, zig_type_bit_width);
- const uint64_t hi = (value & mask) >> 64;
- const uint64_t lo = (value & mask);
- const int leading_zeroes = (
- hi != 0 ? __builtin_clzll(hi) : 64 + (lo != 0 ? __builtin_clzll(lo) : 64));
- return leading_zeroes - zig_bitsizeof(uint128_t) + zig_type_bit_width;
+static inline zig_i128 zig_shr_i128(zig_i128 lhs, zig_u8 rhs) {
+ zig_i128 sign_mask = zig_cmp_i128(lhs, zig_as_i128(0, 0)) < 0 ? zig_as_i128(-1, UINT64_MAX) : zig_as_i128(0, 0);
+ return zig_xor_i128(zig_bitcast_i128(zig_shr_u128(zig_bitcast_u128(zig_xor_i128(lhs, sign_mask)), rhs)), sign_mask);
}
-#define zig_clz_i128 zig_clz_u128
+static inline zig_u128 zig_wrap_u128(zig_u128 val, zig_u8 bits) {
+ return zig_and_u128(val, zig_maxInt(u128, bits));
+}
-static inline int zig_ctz(unsigned int value, uint8_t zig_type_bit_width) {
- if (value == 0) return zig_type_bit_width;
- return __builtin_ctz(value & zig_bit_mask(unsigned int, zig_type_bit_width));
+static inline zig_i128 zig_wrap_i128(zig_i128 val, zig_u8 bits) {
+ return zig_as_i128(zig_wrap_i64(zig_hi_i128(val), bits - zig_as_u8(64)), zig_lo_i128(val));
}
-static inline int zig_ctzl(unsigned long value, uint8_t zig_type_bit_width) {
- if (value == 0) return zig_type_bit_width;
- return __builtin_ctzl(value & zig_bit_mask(unsigned long, zig_type_bit_width));
+static inline zig_u128 zig_shlw_u128(zig_u128 lhs, zig_u8 rhs, zig_u8 bits) {
+ return zig_wrap_u128(zig_shl_u128(lhs, rhs), bits);
}
-static inline int zig_ctzll(unsigned long value, uint8_t zig_type_bit_width) {
- if (value == 0) return zig_type_bit_width;
- return __builtin_ctzll(value & zig_bit_mask(unsigned long, zig_type_bit_width));
+static inline zig_i128 zig_shlw_i128(zig_i128 lhs, zig_u8 rhs, zig_u8 bits) {
+ return zig_wrap_i128(zig_bitcast_i128(zig_shl_u128(zig_bitcast_u128(lhs), zig_bitcast_u128(rhs))), bits);
}
-#define zig_ctz_u8 zig_ctz
-#define zig_ctz_i8 zig_ctz
-#define zig_ctz_u16 zig_ctz
-#define zig_ctz_i16 zig_ctz
-#define zig_ctz_u32 zig_ctzl
-#define zig_ctz_i32 zig_ctzl
-#define zig_ctz_u64 zig_ctzll
-#define zig_ctz_i64 zig_ctzll
+static inline zig_u128 zig_addw_u128(zig_u128 lhs, zig_u8 rhs, zig_u8 bits) {
+ return zig_wrap_u128(zig_add_u128(lhs, rhs), bits);
+}
-static inline int zig_ctz_u128(uint128_t value, uint8_t zig_type_bit_width) {
- const uint128_t mask = zig_bit_mask(uint128_t, zig_type_bit_width);
- const uint64_t hi = (value & mask) >> 64;
- const uint64_t lo = (value & mask);
- return (lo != 0 ? __builtin_ctzll(lo) : 64 + (hi != 0 ? __builtin_ctzll(hi) : 64));
+static inline zig_i128 zig_addw_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+ return zig_wrap_i128(zig_bitcast_i128(zig_add_u128(zig_bitcast_u128(lhs), zig_bitcast_u128(rhs))), bits);
}
-#define zig_ctz_i128 zig_ctz_u128
+static inline zig_u128 zig_subw_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+ return zig_wrap_u128(zig_sub_u128(lhs, rhs), bits);
+}
-static inline int zig_popcount(unsigned int value, uint8_t zig_type_bit_width) {
- return __builtin_popcount(value & zig_bit_mask(unsigned int, zig_type_bit_width));
+static inline zig_i128 zig_subw_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+ return zig_wrap_i128(zig_bitcast_i128(zig_sub_u128(zig_bitcast_u128(lhs), zig_bitcast_u128(rhs))), bits);
}
-static inline int zig_popcountl(unsigned long value, uint8_t zig_type_bit_width) {
- return __builtin_popcountl(value & zig_bit_mask(unsigned long, zig_type_bit_width));
+static inline zig_u128 zig_mulw_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+ return zig_wrap_u128(zig_mul_u128(lhs, rhs), bits);
}
-static inline int zig_popcountll(unsigned long value, uint8_t zig_type_bit_width) {
- return __builtin_popcountll(value & zig_bit_mask(unsigned long, zig_type_bit_width));
+static inline zig_i128 zig_mulw_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+ return zig_wrap_i128(zig_bitcast_i128(zig_mul_u128(zig_bitcast_u128(lhs), zig_bitcast_u128(rhs))), bits);
}
-#define zig_popcount_u8 zig_popcount
-#define zig_popcount_i8 zig_popcount
-#define zig_popcount_u16 zig_popcount
-#define zig_popcount_i16 zig_popcount
-#define zig_popcount_u32 zig_popcountl
-#define zig_popcount_i32 zig_popcountl
-#define zig_popcount_u64 zig_popcountll
-#define zig_popcount_i64 zig_popcountll
+#if zig_has_int128
-static inline int zig_popcount_u128(uint128_t value, uint8_t zig_type_bit_width) {
- const uint128_t mask = zig_bit_mask(uint128_t, zig_type_bit_width);
- const uint64_t hi = (value & mask) >> 64;
- const uint64_t lo = (value & mask);
- return __builtin_popcountll(hi) + __builtin_popcountll(lo);
+static inline zig_bool zig_shlo_u128(zig_u128 *res, zig_u128 lhs, zig_u8 rhs, zig_u8 bits) {
+ *res = zig_shlw_u128(lhs, rhs, bits);
+ return zig_and_u128(lhs, zig_shl_u128(zig_maxInt_u128, bits - rhs)) != zig_as_u128(0, 0);
}
-#define zig_popcount_i128 zig_popcount_u128
+static inline zig_bool zig_shlo_i128(zig_i128 *res, zig_i128 lhs, zig_u8 rhs, zig_u8 bits) {
+ *res = zig_shlw_i128(lhs, rhs, bits);
+ zig_i128 mask = zig_bitcast_i128(zig_shl_u128(zig_maxInt_u128, bits - rhs - zig_as_u8(1)));
+ return zig_cmp_i128(zig_and_i128(lhs, mask), zig_as_i128(0, 0)) != 0 &&
+ zig_cmp_i128(zig_and_i128(lhs, mask), mask) != 0;
+}
-static inline bool zig_shlo_i8(int8_t lhs, int8_t rhs, int8_t *res, uint8_t bits) {
- *res = lhs << rhs;
- if (zig_clz_i8(lhs, bits) >= rhs) return false;
- *res &= UINT8_MAX >> (8 - bits);
- return true;
+static inline zig_bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+#if zig_has_builtin(add_overflow)
+ zig_u128 full_res;
+ zig_bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
+ *res = zig_wrap_u128(full_res, bits);
+ return overflow || full_res < zig_minInt(u128, bits) || full_res > zig_maxInt(u128, bits);
+#else
+ *res = zig_addw_u128(lhs, rhs, bits);
+ return *res < lhs;
+#endif
}
-static inline bool zig_shlo_i16(int16_t lhs, int16_t rhs, int16_t *res, uint8_t bits) {
- *res = lhs << rhs;
- if (zig_clz_i16(lhs, bits) >= rhs) return false;
- *res &= UINT16_MAX >> (16 - bits);
- return true;
+zig_extern_c zig_i128 __addoti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow);
+static inline zig_bool zig_addo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+#if zig_has_builtin(add_overflow)
+ zig_i128 full_res;
+ zig_bool overflow = __builtin_add_overflow(lhs, rhs, &full_res);
+#else
+ zig_c_int overflow_int;
+ zig_i128 full_res = __addoti4(lhs, rhs, &overflow_int);
+ zig_bool overflow = overflow_int != 0;
+#endif
+ *res = zig_wrap_i128(full_res, bits);
+ return overflow || full_res < zig_minInt(i128, bits) || full_res > zig_maxInt(i128, bits);
}
-static inline bool zig_shlo_i32(int32_t lhs, int32_t rhs, int32_t *res, uint8_t bits) {
- *res = lhs << rhs;
- if (zig_clz_i32(lhs, bits) >= rhs) return false;
- *res &= UINT32_MAX >> (32 - bits);
- return true;
+static inline zig_bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+#if zig_has_builtin(sub_overflow)
+ zig_u128 full_res;
+ zig_bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
+ *res = zig_wrap_u128(full_res, bits);
+ return overflow || full_res < zig_minInt(u128, bits) || full_res > zig_maxInt(u128, bits);
+#else
+ *res = zig_subw_u128(lhs, rhs, bits);
+ return *res > lhs;
+#endif
}
-static inline bool zig_shlo_i64(int64_t lhs, int64_t rhs, int64_t *res, uint8_t bits) {
- *res = lhs << rhs;
- if (zig_clz_i64(lhs, bits) >= rhs) return false;
- *res &= UINT64_MAX >> (64 - bits);
- return true;
+zig_extern_c zig_i128 __suboti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow);
+static inline zig_bool zig_subo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+#if zig_has_builtin(sub_overflow)
+ zig_i128 full_res;
+ zig_bool overflow = __builtin_sub_overflow(lhs, rhs, &full_res);
+#else
+ zig_c_int overflow_int;
+ zig_i128 full_res = __suboti4(lhs, rhs, &overflow_int);
+ zig_bool overflow = overflow_int != 0;
+#endif
+ *res = zig_wrap_i128(full_res, bits);
+ return overflow || full_res < zig_minInt(i128, bits) || full_res > zig_maxInt(i128, bits);
}
-static inline bool zig_shlo_i128(int128_t lhs, int128_t rhs, int128_t *res, uint8_t bits) {
- *res = lhs << rhs;
- if (zig_clz_i128(lhs, bits) >= rhs) return false;
- *res &= UINT128_MAX >> (128 - bits);
- return true;
+static inline zig_bool zig_mulo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+#if zig_has_builtin(mul_overflow)
+ zig_u128 full_res;
+ zig_bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
+ *res = zig_wrap_u128(full_res, bits);
+ return overflow || full_res < zig_minInt(u128, bits) || full_res > zig_maxInt(u128, bits);
+#else
+ *res = zig_mulw_u128(lhs, rhs, bits);
+ return rhs != zig_as_u128(0, 0) && lhs > zig_maxInt(u128, bits) / rhs;
+#endif
}
-static inline bool zig_shlo_u8(uint8_t lhs, uint8_t rhs, uint8_t *res, uint8_t bits) {
- *res = lhs << rhs;
- if (zig_clz_u8(lhs, bits) >= rhs) return false;
- *res &= UINT8_MAX >> (8 - bits);
- return true;
+zig_extern_c zig_i128 __muloti4(zig_i128 lhs, zig_i128 rhs, zig_c_int *overflow);
+static inline zig_bool zig_mulo_i128(zig_i128 *res, zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+#if zig_has_builtin(mul_overflow)
+ zig_i128 full_res;
+ zig_bool overflow = __builtin_mul_overflow(lhs, rhs, &full_res);
+#else
+ zig_c_int overflow_int;
+ zig_i128 full_res = __muloti4(lhs, rhs, &overflow);
+ zig_bool overflow = overflow_int != 0;
+#endif
+ *res = zig_wrap_i128(full_res, bits);
+ return overflow || full_res < zig_minInt(i128, bits) || full_res > zig_maxInt(i128, bits);
}
-static inline uint16_t zig_shlo_u16(uint16_t lhs, uint16_t rhs, uint16_t *res, uint8_t bits) {
- *res = lhs << rhs;
- if (zig_clz_u16(lhs, bits) >= rhs) return false;
- *res &= UINT16_MAX >> (16 - bits);
- return true;
+#else /* zig_has_int128 */
+
+static inline zig_bool zig_addo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs) {
+ return zig_addo_u64(&res->hi, lhs.hi, rhs.hi, UINT64_MAX) |
+ zig_addo_u64(&res->hi, res->hi, zig_addo_u64(&res->lo, lhs.lo, rhs.lo, UINT64_MAX));
}
-static inline uint32_t zig_shlo_u32(uint32_t lhs, uint32_t rhs, uint32_t *res, uint8_t bits) {
- *res = lhs << rhs;
- if (zig_clz_u32(lhs, bits) >= rhs) return false;
- *res &= UINT32_MAX >> (32 - bits);
- return true;
+static inline zig_bool zig_subo_u128(zig_u128 *res, zig_u128 lhs, zig_u128 rhs) {
+ return zig_subo_u64(&res->hi, lhs.hi, rhs.hi, UINT64_MAX) |
+ zig_subo_u64(&res->hi, res->hi, zig_subo_u64(&res->lo, lhs.lo, rhs.lo, UINT64_MAX));
}
-static inline uint64_t zig_shlo_u64(uint64_t lhs, uint64_t rhs, uint64_t *res, uint8_t bits) {
- *res = lhs << rhs;
- if (zig_clz_u64(lhs, bits) >= rhs) return false;
- *res &= UINT64_MAX >> (64 - bits);
- return true;
+#endif /* zig_has_int128 */
+
+static inline zig_u128 zig_shls_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+ zig_u128 res;
+ if (zig_cmp_u128(rhs, zig_as_u128(0, bits)) >= 0)
+ return zig_cmp_u128(lhs, zig_as_u128(0, 0)) != 0 ? zig_maxInt(u128, bits) : lhs;
+ return zig_shlo_u128(&res, lhs, (zig_u8)rhs, bits) ? zig_maxInt(u128, bits) : res;
}
-static inline uint128_t zig_shlo_u128(uint128_t lhs, uint128_t rhs, uint128_t *res, uint8_t bits) {
- *res = lhs << rhs;
- if (zig_clz_u128(lhs, bits) >= rhs) return false;
- *res &= UINT128_MAX >> (128 - bits);
- return true;
+static inline zig_i128 zig_shls_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+ zig_i128 res;
+ if (zig_cmp_u128(zig_bitcast_u128(rhs), zig_as_u128(0, bits)) < 0 && !zig_shlo_i128(&res, lhs, rhs, bits)) return res;
+ return zig_cmp_i128(lhs, zig_as_i128(0, 0)) < 0 ? zig_minInt(i128, bits) : zig_maxInt(i128, bits);
}
-#define zig_sign_extend(T) \
- static inline T zig_sign_extend_##T(T value, uint8_t zig_type_bit_width) { \
- const T m = (T)1 << (T)(zig_type_bit_width - 1); \
- return (value ^ m) - m; \
- }
+static inline zig_u128 zig_adds_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+ zig_u128 res;
+ return zig_addo_u128(&res, lhs, rhs, bits) ? zig_maxInt(u128, bits) : res;
+}
-zig_sign_extend(uint8_t)
-zig_sign_extend(uint16_t)
-zig_sign_extend(uint32_t)
-zig_sign_extend(uint64_t)
-zig_sign_extend(uint128_t)
+static inline zig_i128 zig_adds_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+ zig_i128 res;
+ if (!zig_addo_i128(&res, lhs, rhs, bits)) return res;
+ return zig_cmp_i128(res, zig_as_i128(0, 0)) >= 0 ? zig_minInt(i128, bits) : zig_maxInt(i128, bits);
+}
-#define zig_byte_swap_u(ZigTypeBits, CTypeBits) \
- static inline uint##CTypeBits##_t zig_byte_swap_u##ZigTypeBits(uint##CTypeBits##_t value, uint8_t zig_type_bit_width) { \
- return __builtin_bswap##CTypeBits(value) >> (CTypeBits - zig_type_bit_width); \
- }
+static inline zig_u128 zig_subs_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+ zig_u128 res;
+ return zig_subo_u128(&res, lhs, rhs, bits) ? zig_minInt(u128, bits) : res;
+}
-#define zig_byte_swap_s(ZigTypeBits, CTypeBits) \
- static inline int##CTypeBits##_t zig_byte_swap_i##ZigTypeBits(int##CTypeBits##_t value, uint8_t zig_type_bit_width) { \
- const uint##CTypeBits##_t swapped = zig_byte_swap_u##ZigTypeBits(value, zig_type_bit_width); \
- return zig_sign_extend_uint##CTypeBits##_t(swapped, zig_type_bit_width); \
- }
+static inline zig_i128 zig_subs_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+ zig_i128 res;
+ if (!zig_subo_i128(&res, lhs, rhs, bits)) return res;
+ return zig_cmp_i128(res, zig_as_i128(0, 0)) >= 0 ? zig_minInt(i128, bits) : zig_maxInt(i128, bits);
+}
+
+static inline zig_u128 zig_muls_u128(zig_u128 lhs, zig_u128 rhs, zig_u8 bits) {
+ zig_u128 res;
+ return zig_mulo_u128(&res, lhs, rhs, bits) ? zig_maxInt(u128, bits) : res;
+}
+
+static inline zig_i128 zig_muls_i128(zig_i128 lhs, zig_i128 rhs, zig_u8 bits) {
+ zig_i128 res;
+ if (!zig_mulo_i128(&res, lhs, rhs, bits)) return res;
+ return zig_cmp_i128(zig_xor_i128(lhs, rhs), zig_as_i128(0, 0)) < 0 ? zig_minInt(i128, bits) : zig_maxInt(i128, bits);
+}
+
+static inline zig_u8 zig_clz_u128(zig_u128 val, zig_u8 bits) {
+ if (zig_hi_u128(val) != 0) return zig_clz_u64(zig_hi_u128(val), bits - zig_as_u8(64));
+ return zig_clz_u64(zig_lo_u128(val), zig_as_u8(64)) + zig_as_u8(64);
+}
+
+static inline zig_u8 zig_clz_i128(zig_i128 val, zig_u8 bits) {
+ return zig_clz_u128(zig_bitcast_u128(val), bits);
+}
+
+static inline zig_u8 zig_ctz_u128(zig_u128 val, zig_u8 bits) {
+ if (zig_lo_u128(val) != 0) return zig_ctz_u64(zig_lo_u128(val), zig_as_u8(64));
+ return zig_ctz_u64(zig_hi_u128(val), bits - zig_as_u8(64)) + zig_as_u8(64);
+}
+
+static inline zig_u8 zig_ctz_i128(zig_i128 val, zig_u8 bits) {
+ return zig_ctz_u128(zig_bitcast_u128(val), bits);
+}
+
+static inline zig_u8 zig_popcount_u128(zig_u128 val, zig_u8 bits) {
+ return zig_popcount_u64(zig_hi_u128(val), bits - zig_as_u8(64)) +
+ zig_popcount_u64(zig_lo_u128(val), zig_as_u8(64));
+}
+
+static inline zig_u8 zig_popcount_i128(zig_i128 val, zig_u8 bits) {
+ return zig_popcount_u128(zig_bitcast_u128(val), bits);
+}
+
+static inline zig_u128 zig_byte_swap_u128(zig_u128 val, zig_u8 bits) {
+ zig_u128 full_res;
+#if zig_has_builtin(bswap128)
+ full_res = __builtin_bswap128(val);
+#else
+ full_res = zig_as_u128(zig_byte_swap_u64(zig_lo_u128(val), zig_as_u8(64)),
+ zig_byte_swap_u64(zig_hi_u128(val), zig_as_u8(64)));
+#endif
+ return zig_shr_u128(full_res, zig_as_u8(128) - bits);
+}
-#define zig_byte_swap(ZigTypeBits, CTypeBits) \
- zig_byte_swap_u(ZigTypeBits, CTypeBits) \
- zig_byte_swap_s(ZigTypeBits, CTypeBits)
-
-zig_byte_swap( 8, 16)
-zig_byte_swap(16, 16)
-zig_byte_swap(32, 32)
-zig_byte_swap(64, 64)
-
-static inline uint128_t zig_byte_swap_u128(uint128_t value, uint8_t zig_type_bit_width) {
- const uint128_t mask = zig_bit_mask(uint128_t, zig_type_bit_width);
- const uint128_t hi = __builtin_bswap64((uint64_t)(value >> 64));
- const uint128_t lo = __builtin_bswap64((uint64_t)value);
- return (((lo << 64 | hi) >> (128 - zig_type_bit_width))) & mask;
-}
-
-zig_byte_swap_s(128, 128)
-
-static const uint8_t zig_bit_reverse_lut[256] = {
- 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x10, 0x90, 0x50, 0xd0,
- 0x30, 0xb0, 0x70, 0xf0, 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8,
- 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8, 0x04, 0x84, 0x44, 0xc4,
- 0x24, 0xa4, 0x64, 0xe4, 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4,
- 0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec, 0x1c, 0x9c, 0x5c, 0xdc,
- 0x3c, 0xbc, 0x7c, 0xfc, 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2,
- 0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2, 0x0a, 0x8a, 0x4a, 0xca,
- 0x2a, 0xaa, 0x6a, 0xea, 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa,
- 0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6, 0x16, 0x96, 0x56, 0xd6,
- 0x36, 0xb6, 0x76, 0xf6, 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee,
- 0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe, 0x01, 0x81, 0x41, 0xc1,
- 0x21, 0xa1, 0x61, 0xe1, 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1,
- 0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9, 0x19, 0x99, 0x59, 0xd9,
- 0x39, 0xb9, 0x79, 0xf9, 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5,
- 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5, 0x0d, 0x8d, 0x4d, 0xcd,
- 0x2d, 0xad, 0x6d, 0xed, 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd,
- 0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3, 0x13, 0x93, 0x53, 0xd3,
- 0x33, 0xb3, 0x73, 0xf3, 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb,
- 0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb, 0x07, 0x87, 0x47, 0xc7,
- 0x27, 0xa7, 0x67, 0xe7, 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7,
- 0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef, 0x1f, 0x9f, 0x5f, 0xdf,
- 0x3f, 0xbf, 0x7f, 0xff
-};
-
-static inline uint8_t zig_bit_reverse_u8(uint8_t value, uint8_t zig_type_bit_width) {
- const uint8_t reversed = zig_bit_reverse_lut[value] >> (8 - zig_type_bit_width);
- return zig_sign_extend_uint8_t(reversed, zig_type_bit_width);
-}
-
-#define zig_bit_reverse_i8 zig_bit_reverse_u8
-
-static inline uint16_t zig_bit_reverse_u16(uint16_t value, uint8_t zig_type_bit_width) {
- const uint16_t swapped = zig_byte_swap_u16(value, zig_type_bit_width);
- const uint16_t reversed = (
- ((uint16_t)zig_bit_reverse_lut[(swapped >> 0x08) & 0xff] << 0x08) |
- ((uint16_t)zig_bit_reverse_lut[(swapped >> 0x00) & 0xff] << 0x00));
- return zig_sign_extend_uint16_t(
- reversed & zig_bit_mask(uint16_t, zig_type_bit_width),
- zig_type_bit_width);
-}
-
-#define zig_bit_reverse_i16 zig_bit_reverse_u16
-
-static inline uint32_t zig_bit_reverse_u32(uint32_t value, uint8_t zig_type_bit_width) {
- const uint32_t swapped = zig_byte_swap_u32(value, zig_type_bit_width);
- const uint32_t reversed = (
- ((uint32_t)zig_bit_reverse_lut[(swapped >> 0x18) & 0xff] << 0x18) |
- ((uint32_t)zig_bit_reverse_lut[(swapped >> 0x10) & 0xff] << 0x10) |
- ((uint32_t)zig_bit_reverse_lut[(swapped >> 0x08) & 0xff] << 0x08) |
- ((uint32_t)zig_bit_reverse_lut[(swapped >> 0x00) & 0xff] << 0x00));
- return zig_sign_extend_uint32_t(
- reversed & zig_bit_mask(uint32_t, zig_type_bit_width),
- zig_type_bit_width);
-}
-
-#define zig_bit_reverse_i32 zig_bit_reverse_u32
-
-static inline uint64_t zig_bit_reverse_u64(uint64_t value, uint8_t zig_type_bit_width) {
- const uint64_t swapped = zig_byte_swap_u64(value, zig_type_bit_width);
- const uint64_t reversed = (
- ((uint64_t)zig_bit_reverse_lut[(swapped >> 0x38) & 0xff] << 0x38) |
- ((uint64_t)zig_bit_reverse_lut[(swapped >> 0x30) & 0xff] << 0x30) |
- ((uint64_t)zig_bit_reverse_lut[(swapped >> 0x28) & 0xff] << 0x28) |
- ((uint64_t)zig_bit_reverse_lut[(swapped >> 0x20) & 0xff] << 0x20) |
- ((uint64_t)zig_bit_reverse_lut[(swapped >> 0x18) & 0xff] << 0x18) |
- ((uint64_t)zig_bit_reverse_lut[(swapped >> 0x10) & 0xff] << 0x10) |
- ((uint64_t)zig_bit_reverse_lut[(swapped >> 0x08) & 0xff] << 0x08) |
- ((uint64_t)zig_bit_reverse_lut[(swapped >> 0x00) & 0xff] << 0x00));
- return zig_sign_extend_uint64_t(
- reversed & zig_bit_mask(uint64_t, zig_type_bit_width),
- zig_type_bit_width);
-}
-
-#define zig_bit_reverse_i64 zig_bit_reverse_u64
-
-static inline uint128_t zig_bit_reverse_u128(uint128_t value, uint8_t zig_type_bit_width) {
- const uint128_t swapped = zig_byte_swap_u128(value, zig_type_bit_width);
- const uint128_t reversed = (
- ((uint128_t)zig_bit_reverse_lut[(swapped >> 0x78) & 0xff] << 0x78) |
- ((uint128_t)zig_bit_reverse_lut[(swapped >> 0x70) & 0xff] << 0x70) |
- ((uint128_t)zig_bit_reverse_lut[(swapped >> 0x68) & 0xff] << 0x68) |
- ((uint128_t)zig_bit_reverse_lut[(swapped >> 0x60) & 0xff] << 0x60) |
- ((uint128_t)zig_bit_reverse_lut[(swapped >> 0x58) & 0xff] << 0x58) |
- ((uint128_t)zig_bit_reverse_lut[(swapped >> 0x50) & 0xff] << 0x50) |
- ((uint128_t)zig_bit_reverse_lut[(swapped >> 0x48) & 0xff] << 0x48) |
- ((uint128_t)zig_bit_reverse_lut[(swapped >> 0x40) & 0xff] << 0x40) |
- ((uint128_t)zig_bit_reverse_lut[(swapped >> 0x38) & 0xff] << 0x38) |
- ((uint128_t)zig_bit_reverse_lut[(swapped >> 0x30) & 0xff] << 0x30) |
- ((uint128_t)zig_bit_reverse_lut[(swapped >> 0x28) & 0xff] << 0x28) |
- ((uint128_t)zig_bit_reverse_lut[(swapped >> 0x20) & 0xff] << 0x20) |
- ((uint128_t)zig_bit_reverse_lut[(swapped >> 0x18) & 0xff] << 0x18) |
- ((uint128_t)zig_bit_reverse_lut[(swapped >> 0x10) & 0xff] << 0x10) |
- ((uint128_t)zig_bit_reverse_lut[(swapped >> 0x08) & 0xff] << 0x08) |
- ((uint128_t)zig_bit_reverse_lut[(swapped >> 0x00) & 0xff] << 0x00));
- return zig_sign_extend_uint128_t(
- reversed & zig_bit_mask(uint128_t, zig_type_bit_width),
- zig_type_bit_width);
-}
-
-#define zig_bit_reverse_i128 zig_bit_reverse_u128
+static inline zig_i128 zig_byte_swap_i128(zig_i128 val, zig_u8 bits) {
+ return zig_byte_swap_u128(zig_bitcast_u128(val), bits);
+}
+
+static inline zig_u128 zig_bit_reverse_u128(zig_u128 val, zig_u8 bits) {
+ return zig_shr_u128(zig_as_u128(zig_bit_reverse_u64(zig_lo_u128(val), zig_as_u8(64)),
+ zig_bit_reverse_u64(zig_hi_u128(val), zig_as_u8(64))),
+ zig_as_u8(128) - bits);
+}
+
+static inline zig_i128 zig_bit_reverse_i128(zig_i128 val, zig_u8 bits) {
+ return zig_bit_reverse_u128(zig_bitcast_u128(val), bits);
+}
+
+/* ========================== Float Point Routines ========================== */
+
+static inline zig_f32 zig_bitcast_f32_u32(zig_u32 arg) {
+ zig_f32 dest;
+ memcpy(&dest, &arg, sizeof dest);
+ return dest;
+}
+
+static inline zig_f64 zig_bitcast_f64_u64(zig_u64 arg) {
+ zig_f64 dest;
+ memcpy(&dest, &arg, sizeof dest);
+ return dest;
+}
static inline float zig_div_truncf(float numerator, float denominator) {
return __builtin_truncf(numerator / denominator);
@@ -1562,17 +1459,6 @@ static inline long double zig_div_truncl(long double numerator, long double deno
#define zig_div_floor_f80 zig_div_floorl
#define zig_div_floor_f128 zig_div_floorl
-#define zig_div_floor_u8 zig_div_floorf
-#define zig_div_floor_i8 zig_div_floorf
-#define zig_div_floor_u16 zig_div_floorf
-#define zig_div_floor_i16 zig_div_floorf
-#define zig_div_floor_u32 zig_div_floor
-#define zig_div_floor_i32 zig_div_floor
-#define zig_div_floor_u64 zig_div_floor
-#define zig_div_floor_i64 zig_div_floor
-#define zig_div_floor_u128 zig_div_floorl
-#define zig_div_floor_i128 zig_div_floorl
-
static inline float zig_modf(float numerator, float denominator) {
return (numerator - (zig_div_floorf(numerator, denominator) * denominator));
}
@@ -1590,19 +1476,3 @@ static inline long double zig_modl(long double numerator, long double denominato
#define zig_mod_f64 zig_mod
#define zig_mod_f80 zig_modl
#define zig_mod_f128 zig_modl
-
-#define zig_mod_int(ZigType, CType) \
- static inline CType zig_mod_##ZigType(CType numerator, CType denominator) { \
- return (numerator - (zig_div_floor_##ZigType(numerator, denominator) * denominator)); \
- }
-
-zig_mod_int( u8, uint8_t)
-zig_mod_int( i8, int8_t)
-zig_mod_int( u16, uint16_t)
-zig_mod_int( i16, int16_t)
-zig_mod_int( u32, uint32_t)
-zig_mod_int( i32, int32_t)
-zig_mod_int( u64, uint64_t)
-zig_mod_int( i64, int64_t)
-zig_mod_int(u128, uint128_t)
-zig_mod_int(i128, int128_t)
src/codegen/c.zig
@@ -72,6 +72,12 @@ const ValueRenderLocation = enum {
Other,
};
+const BuiltinInfo = enum {
+ None,
+ Range,
+ Bits,
+};
+
/// TODO make this not cut off at 128 bytes
fn formatTypeAsCIdentifier(
data: FormatTypeAsCIdentContext,
@@ -323,6 +329,33 @@ pub const Function = struct {
}
}
+ fn writeCValueMember(f: *Function, w: anytype, c_value: CValue, member: CValue) !void {
+ switch (c_value) {
+ .constant => |inst| {
+ const ty = f.air.typeOf(inst);
+ const val = f.air.value(inst).?;
+ try f.object.dg.renderValue(w, ty, val, .Other);
+ try w.writeByte('.');
+ return f.writeCValue(w, member, .Other);
+ },
+ else => return f.object.dg.writeCValueMember(w, c_value, member),
+ }
+ }
+
+ fn writeCValueDerefMember(f: *Function, w: anytype, c_value: CValue, member: CValue) !void {
+ switch (c_value) {
+ .constant => |inst| {
+ const ty = f.air.typeOf(inst);
+ const val = f.air.value(inst).?;
+ try w.writeByte('(');
+ try f.object.dg.renderValue(w, ty, val, .Other);
+ try w.writeAll(")->");
+ return f.writeCValue(w, member, .Other);
+ },
+ else => return f.object.dg.writeCValueDerefMember(w, c_value, member),
+ }
+ }
+
fn fail(f: *Function, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } {
return f.object.dg.fail(format, args);
}
@@ -339,16 +372,63 @@ pub const Function = struct {
return f.object.dg.fmtIntLiteral(ty, val);
}
- fn renderFloatFnName(f: *Function, fn_name: []const u8, float_ty: Type) !void {
+ fn renderTypeForBuiltinFnName(f: *Function, writer: anytype, ty: Type) !void {
+ const target = f.object.dg.module.getTarget();
+ const c_bits = if (ty.isInt()) c_bits: {
+ const int_info = ty.intInfo(target);
+ try writer.writeByte(signAbbrev(int_info.signedness));
+ break :c_bits toCIntBits(int_info.bits) orelse
+ return f.fail("TODO: C backend: implement integer types larger than 128 bits", .{});
+ } else if (ty.isRuntimeFloat()) c_bits: {
+ try writer.writeByte('f');
+ break :c_bits ty.floatBits(target);
+ } else return f.fail("TODO: CBE: implement renderTypeForBuiltinFnName for type {}", .{
+ ty.fmt(f.object.dg.module),
+ });
+ try writer.print("{d}", .{c_bits});
+ }
+
+ fn renderBuiltinInfo(f: *Function, writer: anytype, ty: Type, info: BuiltinInfo) !void {
+ const target = f.object.dg.module.getTarget();
+ switch (info) {
+ .None => {},
+ .Range => {
+ var arena = std.heap.ArenaAllocator.init(f.object.dg.module.gpa);
+ defer arena.deinit();
+
+ const expected_contents = union { u: Value.Payload.U64, i: Value.Payload.I64 };
+ var stack align(@alignOf(expected_contents)) =
+ std.heap.stackFallback(@sizeOf(expected_contents), arena.allocator());
+
+ const int_info = ty.intInfo(target);
+ if (int_info.signedness == .signed) {
+ const min_val = try ty.minInt(stack.get(), target);
+ try writer.print(", {x}", .{try f.fmtIntLiteral(ty, min_val)});
+ }
+
+ const max_val = try ty.maxInt(stack.get(), target);
+ try writer.print(", {x}", .{try f.fmtIntLiteral(ty, max_val)});
+ },
+ .Bits => {
+ var bits_pl = Value.Payload.U64{
+ .base = .{ .tag = .int_u64 },
+ .data = ty.bitSize(target),
+ };
+ const bits_val = Value.initPayload(&bits_pl.base);
+ try writer.print(", {}", .{try f.fmtIntLiteral(Type.u8, bits_val)});
+ },
+ }
+ }
+
+ fn renderFloatFnName(f: *Function, writer: anytype, operation: []const u8, float_ty: Type) !void {
const target = f.object.dg.module.getTarget();
const float_bits = float_ty.floatBits(target);
const is_longdouble = float_bits == CType.longdouble.sizeInBits(target);
- const writer = f.object.writer();
try writer.writeAll("__");
if (is_longdouble or float_bits != 80) {
try writer.writeAll("builtin_");
}
- try writer.writeAll(fn_name);
+ try writer.writeAll(operation);
if (is_longdouble) {
try writer.writeByte('l');
} else switch (float_bits) {
@@ -558,9 +638,8 @@ pub const DeclGen = struct {
const target = dg.module.getTarget();
if (val.isUndefDeep()) {
switch (ty.zigTypeTag()) {
- // Using '{}' for integer and floats seemed to error C compilers (both GCC and Clang)
- // with 'error: expected expression' (including when built with 'zig cc')
- .Bool => return writer.writeAll("false"),
+ // bool b = 0xaa; evals to true, but memcpy(&b, 0xaa, 1); evals to false.
+ .Bool => return dg.renderValue(writer, ty, Value.@"false", location),
.Int, .Enum, .ErrorSet => return writer.print("{x}", .{try dg.fmtIntLiteral(ty, val)}),
.Float => switch (ty.tag()) {
.f32 => return writer.print("zig_bitcast_f32_u32({x})", .{
@@ -839,15 +918,14 @@ pub const DeclGen = struct {
},
}
},
- .Bool => return writer.print("{}", .{val.toBool()}),
+ .Bool => return writer.print("zig_{}", .{val.toBool()}),
.Optional => {
var opt_buf: Type.Payload.ElemType = undefined;
const payload_ty = ty.optionalChild(&opt_buf);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
- const is_null = val.castTag(.opt_payload) == null;
- return writer.print("{}", .{is_null});
- }
+ const is_null_val = Value.makeBool(val.tag() == .null_value);
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime())
+ return dg.renderValue(writer, Type.bool, is_null_val, location);
if (ty.optionalReprIsPayload()) {
const payload_val = if (val.castTag(.opt_payload)) |pl| pl.data else val;
@@ -864,21 +942,17 @@ pub const DeclGen = struct {
try writer.writeAll("{ .payload = ");
try dg.renderValue(writer, payload_ty, payload_val, .Initializer);
- try writer.print(", .is_null = {} }}", .{val.tag() == .null_value});
+ try writer.writeAll(", .is_null = ");
+ try dg.renderValue(writer, Type.bool, is_null_val, .Initializer);
+ try writer.writeAll(" }");
},
.ErrorSet => {
- switch (val.tag()) {
- .@"error" => {
- const payload = val.castTag(.@"error").?;
- // error values will be #defined at the top of the file
- return writer.print("zig_error_{s}", .{fmtIdent(payload.data.name)});
- },
- else => {
- // In this case we are rendering an error union which has a
- // 0 bits payload.
- return writer.writeByte('0');
- },
- }
+ const error_name = if (val.castTag(.@"error")) |error_pl|
+ error_pl.data.name
+ else
+ dg.module.error_name_list.items[0];
+ // Error values are already defined by genErrDecls.
+ try writer.print("zig_error_{}", .{fmtIdent(error_name)});
},
.ErrorUnion => {
const error_ty = ty.errorUnionSet();
@@ -897,7 +971,7 @@ pub const DeclGen = struct {
}
const payload_val = if (val.castTag(.eu_payload)) |pl| pl.data else Value.undef;
- const error_val = if (val.tag() == .eu_payload) Value.zero else val;
+ const error_val = if (val.errorUnionIsPayload()) Value.zero else val;
try writer.writeAll("{ .payload = ");
try dg.renderValue(writer, payload_ty, payload_val, .Initializer);
@@ -1026,24 +1100,14 @@ pub const DeclGen = struct {
fn renderFunctionSignature(dg: *DeclGen, w: anytype, kind: TypedefKind) !void {
const fn_info = dg.decl.ty.fnInfo();
- if (fn_info.cc == .Naked) {
- try w.writeAll("ZIG_NAKED ");
- }
- if (dg.decl.val.castTag(.function)) |func_payload| {
- const func: *Module.Fn = func_payload.data;
- if (func.is_cold) {
- try w.writeAll("ZIG_COLD ");
- }
- }
- if (fn_info.return_type.hasRuntimeBits()) {
- try dg.renderType(w, fn_info.return_type, kind);
- } else if (fn_info.return_type.isError()) {
- try dg.renderType(w, Type.anyerror, kind);
- } else if (fn_info.return_type.zigTypeTag() == .NoReturn) {
- try w.writeAll("zig_noreturn void");
- } else {
- try w.writeAll("void");
- }
+ if (fn_info.cc == .Naked) try w.writeAll("zig_naked ");
+ if (dg.decl.val.castTag(.function)) |func_payload|
+ if (func_payload.data.is_cold) try w.writeAll("zig_cold ");
+ const ret_ty = fn_info.return_type;
+ try dg.renderType(w, if (ret_ty.tag() == .noreturn or ret_ty.hasRuntimeBitsIgnoreComptime())
+ ret_ty
+ else
+ Type.void, kind);
try w.writeByte(' ');
try dg.renderDeclName(w, dg.decl_index);
try w.writeByte('(');
@@ -1051,9 +1115,7 @@ pub const DeclGen = struct {
var index: usize = 0;
for (fn_info.param_types) |param_type| {
if (!param_type.hasRuntimeBitsIgnoreComptime()) continue;
- if (index > 0) {
- try w.writeAll(", ");
- }
+ if (index > 0) try w.writeAll(", ");
const name = CValue{ .arg = index };
try dg.renderTypeAndName(w, param_type, name, .Const, 0, kind);
index += 1;
@@ -1063,7 +1125,7 @@ pub const DeclGen = struct {
if (index > 0) try w.writeAll(", ");
try w.writeAll("...");
} else if (index == 0) {
- try w.writeAll("void");
+ try dg.renderType(w, Type.void, kind);
}
try w.writeByte(')');
}
@@ -1102,7 +1164,7 @@ pub const DeclGen = struct {
if (params_written != 0) try bw.writeAll(", ");
try bw.writeAll("...");
} else if (params_written == 0) {
- try bw.writeAll("void");
+ try dg.renderType(bw, Type.void, .Forward);
}
try bw.writeAll(");\n");
@@ -1126,14 +1188,18 @@ pub const DeclGen = struct {
defer buffer.deinit();
const bw = buffer.writer();
- try bw.writeAll("typedef struct { ");
+ var ptr_ty_buf: Type.SlicePtrFieldTypeBuffer = undefined;
+ const ptr_ty = t.slicePtrFieldType(&ptr_ty_buf);
+ const ptr_name = CValue{ .identifier = "ptr" };
+ const len_ty = Type.usize;
+ const len_name = CValue{ .identifier = "len" };
- var ptr_type_buf: Type.SlicePtrFieldTypeBuffer = undefined;
- const ptr_type = t.slicePtrFieldType(&ptr_type_buf);
- const ptr_name = CValue{ .bytes = "ptr" };
- try dg.renderTypeAndName(bw, ptr_type, ptr_name, .Mut, 0, .Complete);
+ try bw.writeAll("typedef struct {\n ");
+ try dg.renderTypeAndName(bw, ptr_ty, ptr_name, .Mut, 0, .Complete);
+ try bw.writeAll(";\n ");
+ try dg.renderTypeAndName(bw, len_ty, len_name, .Mut, 0, .Complete);
- try bw.writeAll("; size_t len; } ");
+ try bw.writeAll(";\n} ");
const name_begin = buffer.items.len;
try bw.print("zig_{c}_{}", .{
@as(u8, if (t.isConstPtr()) 'L' else 'M'),
@@ -1339,27 +1405,31 @@ pub const DeclGen = struct {
}
fn renderErrorUnionTypedef(dg: *DeclGen, t: Type) error{ OutOfMemory, AnalysisFail }![]const u8 {
- const payload_ty = t.errorUnionPayload();
assert(t.errorUnionSet().tag() == .anyerror);
var buffer = std.ArrayList(u8).init(dg.typedefs.allocator);
defer buffer.deinit();
const bw = buffer.writer();
- const payload_name = CValue{ .bytes = "payload" };
+ const payload_ty = t.errorUnionPayload();
+ const payload_name = CValue{ .identifier = "payload" };
+ const error_ty = t.errorUnionSet();
+ const error_name = CValue{ .identifier = "error" };
+
const target = dg.module.getTarget();
const payload_align = payload_ty.abiAlignment(target);
- const error_align = Type.anyerror.abiAlignment(target);
+ const error_align = error_ty.abiAlignment(target);
+ try bw.writeAll("typedef struct {\n ");
if (error_align > payload_align) {
- try bw.writeAll("typedef struct { ");
try dg.renderTypeAndName(bw, payload_ty, payload_name, .Mut, 0, .Complete);
- try bw.writeAll("; uint16_t error; } ");
+ try bw.writeAll(";\n ");
+ try dg.renderTypeAndName(bw, error_ty, error_name, .Mut, 0, .Complete);
} else {
- try bw.writeAll("typedef struct { uint16_t error; ");
+ try dg.renderTypeAndName(bw, error_ty, error_name, .Mut, 0, .Complete);
+ try bw.writeAll(";\n ");
try dg.renderTypeAndName(bw, payload_ty, payload_name, .Mut, 0, .Complete);
- try bw.writeAll("; } ");
}
-
+ try bw.writeAll(";\n} ");
const name_begin = buffer.items.len;
try bw.print("zig_E_{}", .{typeToCIdentifier(payload_ty, dg.module)});
const name_end = buffer.items.len;
@@ -1413,11 +1483,11 @@ pub const DeclGen = struct {
defer buffer.deinit();
const bw = buffer.writer();
- try bw.writeAll("typedef struct { ");
- const payload_name = CValue{ .bytes = "payload" };
- try dg.renderTypeAndName(bw, child_type, payload_name, .Mut, 0, .Complete);
- try bw.writeAll("; bool is_null; } ");
-
+ try bw.writeAll("typedef struct {\n ");
+ try dg.renderTypeAndName(bw, child_type, .{ .identifier = "payload" }, .Mut, 0, .Complete);
+ try bw.writeAll(";\n ");
+ try dg.renderTypeAndName(bw, Type.bool, .{ .identifier = "is_null" }, .Mut, 0, .Complete);
+ try bw.writeAll("; } ");
const name_begin = buffer.items.len;
try bw.print("zig_Q_{}", .{typeToCIdentifier(child_type, dg.module)});
const name_end = buffer.items.len;
@@ -1486,51 +1556,20 @@ pub const DeclGen = struct {
const target = dg.module.getTarget();
switch (t.zigTypeTag()) {
- .NoReturn, .Void => try w.writeAll("void"),
- .Bool => try w.writeAll("bool"),
- .Int => {
- switch (t.tag()) {
- .u1, .u8 => try w.writeAll("uint8_t"),
- .i8 => try w.writeAll("int8_t"),
- .u16 => try w.writeAll("uint16_t"),
- .i16 => try w.writeAll("int16_t"),
- .u32 => try w.writeAll("uint32_t"),
- .i32 => try w.writeAll("int32_t"),
- .u64 => try w.writeAll("uint64_t"),
- .i64 => try w.writeAll("int64_t"),
- .u128 => try w.writeAll("uint128_t"),
- .i128 => try w.writeAll("int128_t"),
- .usize => try w.writeAll("uintptr_t"),
- .isize => try w.writeAll("intptr_t"),
- .c_short => try w.writeAll("short"),
- .c_ushort => try w.writeAll("unsigned short"),
- .c_int => try w.writeAll("int"),
- .c_uint => try w.writeAll("unsigned int"),
- .c_long => try w.writeAll("long"),
- .c_ulong => try w.writeAll("unsigned long"),
- .c_longlong => try w.writeAll("long long"),
- .c_ulonglong => try w.writeAll("unsigned long long"),
- .u29, .int_signed, .int_unsigned => {
- const info = t.intInfo(target);
- const sign_prefix = switch (info.signedness) {
- .signed => "",
- .unsigned => "u",
- };
- const c_bits = toCIntBits(info.bits) orelse
- return dg.fail("TODO: C backend: implement integer types larger than 128 bits", .{});
- try w.print("{s}int{d}_t", .{ sign_prefix, c_bits });
- },
- else => unreachable,
- }
- },
- .Float => {
- switch (t.tag()) {
- .f32 => try w.writeAll("float"),
- .f64 => try w.writeAll("double"),
- .c_longdouble => try w.writeAll("long double"),
- .f16 => return dg.fail("TODO: C backend: implement float type f16", .{}),
- .f128 => return dg.fail("TODO: C backend: implement float type f128", .{}),
- else => unreachable,
+ .NoReturn, .Void, .Bool, .Int, .Float, .ErrorSet => |tag| {
+ const is_named = switch (tag) {
+ .Int => t.isNamedInt(),
+ .ErrorSet => false,
+ else => true,
+ };
+ if (is_named) {
+ try w.writeAll("zig_");
+ try t.print(w, dg.module);
+ } else {
+ const info = t.intInfo(target);
+ const c_bits = toCIntBits(info.bits) orelse
+ return dg.fail("TODO: C backend: implement integer types larger than 128 bits", .{});
+ try w.print("zig_{c}{d}", .{ signAbbrev(info.signedness), c_bits });
}
},
.Pointer => {
@@ -1564,9 +1603,10 @@ pub const DeclGen = struct {
// u8 and i8 produce unsigned char and signed char respectively,
// which in C are (not very usefully) different than char.
try w.writeAll("char");
- } else {
- try dg.renderType(w, child_ty, .Forward);
- }
+ } else try dg.renderType(w, switch (child_ty.tag()) {
+ .anyopaque => Type.void,
+ else => child_ty,
+ }, .Forward);
if (t.isConstPtr()) try w.writeAll(" const");
if (t.isVolatilePtr()) try w.writeAll(" volatile");
return w.writeAll(" *");
@@ -1587,29 +1627,22 @@ pub const DeclGen = struct {
var opt_buf: Type.Payload.ElemType = undefined;
const child_type = t.optionalChild(&opt_buf);
- if (!child_type.hasRuntimeBitsIgnoreComptime()) {
- return w.writeAll("bool");
- }
+ if (!child_type.hasRuntimeBitsIgnoreComptime())
+ return dg.renderType(w, Type.bool, kind);
- if (t.optionalReprIsPayload()) {
- return dg.renderType(w, child_type, .Complete);
- }
+ if (t.optionalReprIsPayload())
+ return dg.renderType(w, child_type, kind);
const name = dg.getTypedefName(t) orelse
try dg.renderOptionalTypedef(t, child_type);
return w.writeAll(name);
},
- .ErrorSet => {
- comptime assert(Type.anyerror.abiSize(builtin.target) == 2);
- return w.writeAll("uint16_t");
- },
.ErrorUnion => {
const payload_ty = t.errorUnionPayload();
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
- return dg.renderType(w, Type.anyerror, .Complete);
- }
+ if (!payload_ty.hasRuntimeBitsIgnoreComptime())
+ return dg.renderType(w, Type.anyerror, kind);
var error_union_pl = Type.Payload.ErrorUnion{
.data = .{ .error_set = Type.anyerror, .payload = payload_ty },
@@ -1652,7 +1685,6 @@ pub const DeclGen = struct {
try dg.renderType(w, int_tag_ty, kind);
},
.Opaque => switch (t.tag()) {
- .anyopaque => try w.writeAll("void"),
.@"opaque" => {
const name = dg.getTypedefName(t) orelse
try dg.renderOpaqueTypedef(t);
@@ -1695,13 +1727,8 @@ pub const DeclGen = struct {
/// | `renderTypeAndName` | "uint8_t *name" | "uint8_t *name[10]" |
/// | `renderType` | "uint8_t *" | "zig_A_uint8_t_10" |
///
- fn renderTypecast(
- dg: *DeclGen,
- w: anytype,
- ty: Type,
- ) error{ OutOfMemory, AnalysisFail }!void {
- const name = CValue{ .bytes = "" };
- return renderTypeAndName(dg, w, ty, name, .Mut, 0, .Complete);
+ fn renderTypecast(dg: *DeclGen, w: anytype, ty: Type) error{ OutOfMemory, AnalysisFail }!void {
+ return renderTypeAndName(dg, w, ty, .{ .bytes = "" }, .Mut, 0, .Complete);
}
/// Renders a type and name in field declaration/definition format.
@@ -1735,8 +1762,9 @@ pub const DeclGen = struct {
render_ty = render_ty.elemType();
}
- if (alignment != 0)
- try w.print("ZIG_ALIGN({}) ", .{alignment});
+ if (alignment != 0 and alignment > ty.abiAlignment(dg.module.getTarget())) {
+ try w.print("zig_align({}) ", .{alignment});
+ }
try dg.renderType(w, render_ty, kind);
const const_prefix = switch (mutability) {
@@ -1792,13 +1820,16 @@ pub const DeclGen = struct {
try buffer.appendSlice(";\n return (");
try dg.renderTypecast(bw, name_slice_ty);
try bw.print("){{{}, {}}};\n", .{
- fmtIdent("name"),
- try dg.fmtIntLiteral(Type.usize, len_val),
+ fmtIdent("name"), try dg.fmtIntLiteral(Type.usize, len_val),
});
try buffer.appendSlice(" }\n");
}
- try buffer.appendSlice(" }\n while (true) zig_breakpoint();\n}\n");
+ try buffer.appendSlice(" }\n while (");
+ try dg.renderValue(bw, Type.bool, Value.@"true", .Other);
+ try buffer.appendSlice(") ");
+ _ = try airBreakpoint(bw);
+ try buffer.appendSlice("}\n");
const rendered = buffer.toOwnedSlice();
errdefer dg.typedefs.allocator.free(rendered);
@@ -1874,6 +1905,27 @@ pub const DeclGen = struct {
}
}
+ fn writeCValueMember(dg: *DeclGen, writer: anytype, c_value: CValue, member: CValue) !void {
+ try dg.writeCValue(writer, c_value);
+ try writer.writeByte('.');
+ try dg.writeCValue(writer, member);
+ }
+
+ fn writeCValueDerefMember(dg: *DeclGen, writer: anytype, c_value: CValue, member: CValue) !void {
+ switch (c_value) {
+ .none, .constant, .undef => unreachable,
+ .local, .arg, .decl, .identifier, .bytes => {
+ try dg.writeCValue(writer, c_value);
+ try writer.writeAll("->");
+ },
+ .local_ref, .decl_ref => {
+ try dg.writeCValueDeref(writer, c_value);
+ try writer.writeByte('.');
+ },
+ }
+ try dg.writeCValue(writer, member);
+ }
+
fn renderDeclName(dg: *DeclGen, writer: anytype, decl_index: Decl.Index) !void {
const decl = dg.module.declPtr(decl_index);
dg.module.markDeclAlive(decl);
@@ -1971,8 +2023,7 @@ pub fn genErrDecls(o: *Object) !void {
const len_val = Value.initPayload(&len_pl.base);
try writer.print("{{" ++ name_prefix ++ "_{}, {}}}", .{
- fmtIdent(name),
- try o.dg.fmtIntLiteral(Type.usize, len_val),
+ fmtIdent(name), try o.dg.fmtIntLiteral(Type.usize, len_val),
});
}
try writer.writeAll("};\n");
@@ -1989,7 +2040,7 @@ pub fn genFunc(f: *Function) !void {
const is_global = o.dg.module.decl_exports.contains(f.func.owner_decl);
const fwd_decl_writer = o.dg.fwd_decl.writer();
- try fwd_decl_writer.writeAll(if (is_global) "ZIG_EXTERN_C " else "static ");
+ try fwd_decl_writer.writeAll(if (is_global) "zig_extern_c " else "static ");
try o.dg.renderFunctionSignature(fwd_decl_writer, .Forward);
try fwd_decl_writer.writeAll(";\n");
@@ -2028,7 +2079,7 @@ pub fn genDecl(o: *Object) !void {
};
if (tv.val.tag() == .extern_fn) {
const fwd_decl_writer = o.dg.fwd_decl.writer();
- try fwd_decl_writer.writeAll("ZIG_EXTERN_C ");
+ try fwd_decl_writer.writeAll("zig_extern_c ");
try o.dg.renderFunctionSignature(fwd_decl_writer, .Forward);
try fwd_decl_writer.writeAll(";\n");
} else if (tv.val.castTag(.variable)) |var_payload| {
@@ -2036,7 +2087,7 @@ pub fn genDecl(o: *Object) !void {
const is_global = o.dg.declIsGlobal(tv) or variable.is_extern;
const fwd_decl_writer = o.dg.fwd_decl.writer();
if (is_global) {
- try fwd_decl_writer.writeAll("ZIG_EXTERN_C ");
+ try fwd_decl_writer.writeAll("zig_extern_c ");
}
if (variable.is_threadlocal) {
try fwd_decl_writer.writeAll("zig_threadlocal ");
@@ -2096,7 +2147,7 @@ pub fn genHeader(dg: *DeclGen) error{ AnalysisFail, OutOfMemory }!void {
.Fn => {
const is_global = dg.declIsGlobal(tv);
if (is_global) {
- try writer.writeAll("ZIG_EXTERN_C ");
+ try writer.writeAll("zig_extern_c ");
try dg.renderFunctionSignature(writer, .Complete);
try dg.fwd_decl.appendSlice(";\n");
}
@@ -2124,7 +2175,7 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO
.const_ty => unreachable, // excluded from function bodies
.arg => airArg(f),
- .breakpoint => try airBreakpoint(f),
+ .breakpoint => try airBreakpoint(f.object.writer()),
.ret_addr => try airRetAddr(f, inst),
.frame_addr => try airFrameAddress(f, inst),
.unreach => try airUnreach(f),
@@ -2135,10 +2186,10 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO
// TODO use a different strategy for add, sub, mul, div
// that communicates to the optimizer that wrapping is UB.
- .add => try airBinOp(f, inst, "+"),
- .sub => try airBinOp(f, inst, "-"),
- .mul => try airBinOp(f, inst, "*"),
- .div_float, .div_exact => try airBinOp(f, inst, "/"),
+ .add => try airBinOp(f, inst, "+", "add", .None),
+ .sub => try airBinOp(f, inst, "-", "sub", .None),
+ .mul => try airBinOp(f, inst, "*", "mul", .None),
+ .div_float, .div_exact => try airBinOp(f, inst, "/", "div_trunc", .None),
.rem => blk: {
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
@@ -2146,9 +2197,9 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO
// For binary operations @TypeOf(lhs)==@TypeOf(rhs),
// so we only check one.
break :blk if (lhs_ty.isInt())
- try airBinOp(f, inst, "%")
+ try airBinOp(f, inst, "%", "rem", .None)
else
- try airBinFloatOp(f, inst, "fmod"); // yes, @rem() => fmod()
+ try airBinFloatOp(f, inst, "fmod");
},
.div_trunc => blk: {
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
@@ -2156,21 +2207,21 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO
// For binary operations @TypeOf(lhs)==@TypeOf(rhs),
// so we only check one.
break :blk if (lhs_ty.isInt())
- try airBinOp(f, inst, "/")
+ try airBinOp(f, inst, "/", "div_trunc", .None)
else
- try airBinOpBuiltinCall(f, inst, "div_trunc");
+ try airBinBuiltinCall(f, inst, "div_trunc", .None);
},
- .div_floor => try airBinOpBuiltinCall(f, inst, "div_floor"),
- .mod => try airBinOpBuiltinCall(f, inst, "mod"),
+ .div_floor => try airBinBuiltinCall(f, inst, "div_floor", .None),
+ .mod => try airBinBuiltinCall(f, inst, "mod", .None),
- .addwrap => try airWrapOp(f, inst, "+", "add"),
- .subwrap => try airWrapOp(f, inst, "-", "sub"),
- .mulwrap => try airWrapOp(f, inst, "*", "mul"),
+ .addwrap => try airBinBuiltinCall(f, inst, "addw", .Bits),
+ .subwrap => try airBinBuiltinCall(f, inst, "subw", .Bits),
+ .mulwrap => try airBinBuiltinCall(f, inst, "mulw", .Bits),
- .add_sat => try airSatOp(f, inst, "add"),
- .sub_sat => try airSatOp(f, inst, "sub"),
- .mul_sat => try airSatOp(f, inst, "mul"),
- .shl_sat => try airSatOp(f, inst, "shl"),
+ .add_sat => try airBinBuiltinCall(f, inst, "adds", .Bits),
+ .sub_sat => try airBinBuiltinCall(f, inst, "subs", .Bits),
+ .mul_sat => try airBinBuiltinCall(f, inst, "muls", .Bits),
+ .shl_sat => try airBinBuiltinCall(f, inst, "shls", .Bits),
.neg => try airNeg(f, inst),
@@ -2192,20 +2243,20 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO
.mul_add => try airMulAdd(f, inst),
- .add_with_overflow => try airOverflow(f, inst, "add", .range),
- .sub_with_overflow => try airOverflow(f, inst, "sub", .range),
- .mul_with_overflow => try airOverflow(f, inst, "mul", .range),
- .shl_with_overflow => try airOverflow(f, inst, "shl", .bits),
+ .add_with_overflow => try airOverflow(f, inst, "add", .Bits),
+ .sub_with_overflow => try airOverflow(f, inst, "sub", .Bits),
+ .mul_with_overflow => try airOverflow(f, inst, "mul", .Bits),
+ .shl_with_overflow => try airOverflow(f, inst, "shl", .Bits),
.min => try airMinMax(f, inst, '<'),
.max => try airMinMax(f, inst, '>'),
.slice => try airSlice(f, inst),
- .cmp_gt => try airBinOp(f, inst, ">"),
- .cmp_gte => try airBinOp(f, inst, ">="),
- .cmp_lt => try airBinOp(f, inst, "<"),
- .cmp_lte => try airBinOp(f, inst, "<="),
+ .cmp_gt => try airCmpOp(f, inst, ">"),
+ .cmp_gte => try airCmpOp(f, inst, ">="),
+ .cmp_lt => try airCmpOp(f, inst, "<"),
+ .cmp_lte => try airCmpOp(f, inst, "<="),
.cmp_eq => try airEquality(f, inst, "((", "=="),
.cmp_neq => try airEquality(f, inst, "!((", "!="),
@@ -2214,12 +2265,13 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO
.cmp_lt_errors_len => return f.fail("TODO: C backend: implement cmp_lt_errors_len", .{}),
// bool_and and bool_or are non-short-circuit operations
- .bool_and, .bit_and => try airBinOp(f, inst, "&"),
- .bool_or, .bit_or => try airBinOp(f, inst, "|"),
- .xor => try airBinOp(f, inst, "^"),
- .shr, .shr_exact => try airBinOp(f, inst, ">>"),
- .shl, .shl_exact => try airBinOp(f, inst, "<<"),
- .not => try airNot (f, inst),
+ .bool_and, .bit_and => try airBinOp(f, inst, "&", "and", .None),
+ .bool_or, .bit_or => try airBinOp(f, inst, "|", "or", .None),
+ .xor => try airBinOp(f, inst, "^", "xor", .None),
+ .shr, .shr_exact => try airBinBuiltinCall(f, inst, "shr", .None),
+ .shl, => try airBinBuiltinCall(f, inst, "shl", .None),
+ .shl_exact => try airBinOp(f, inst, "<<", "shl", .None),
+ .not => try airNot (f, inst),
.optional_payload => try airOptionalPayload(f, inst),
.optional_payload_ptr => try airOptionalPayloadPtr(f, inst),
@@ -2263,11 +2315,11 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO
.memcpy => try airMemcpy(f, inst),
.set_union_tag => try airSetUnionTag(f, inst),
.get_union_tag => try airGetUnionTag(f, inst),
- .clz => try airBuiltinCall(f, inst, "clz"),
- .ctz => try airBuiltinCall(f, inst, "ctz"),
- .popcount => try airBuiltinCall(f, inst, "popcount"),
- .byte_swap => try airBuiltinCall(f, inst, "byte_swap"),
- .bit_reverse => try airBuiltinCall(f, inst, "bit_reverse"),
+ .clz => try airUnBuiltinCall(f, inst, "clz", .Bits),
+ .ctz => try airUnBuiltinCall(f, inst, "ctz", .Bits),
+ .popcount => try airUnBuiltinCall(f, inst, "popcount", .Bits),
+ .byte_swap => try airUnBuiltinCall(f, inst, "byte_swap", .Bits),
+ .bit_reverse => try airUnBuiltinCall(f, inst, "bit_reverse", .Bits),
.tag_name => try airTagName(f, inst),
.error_name => try airErrorName(f, inst),
.splat => try airSplat(f, inst),
@@ -2393,10 +2445,10 @@ fn airSliceField(f: *Function, inst: Air.Inst.Index, is_ptr: bool, field_name: [
const writer = f.object.writer();
const local = try f.allocLocal(inst_ty, .Const);
try writer.writeAll(" = ");
- if (is_ptr) try writer.writeByte('&');
- try f.writeCValue(writer, operand, .Other);
- try if (is_ptr) writer.writeAll("->") else writer.writeByte('.');
- try writer.writeAll(field_name);
+ if (is_ptr) {
+ try writer.writeByte('&');
+ try f.writeCValueDerefMember(writer, operand, .{ .identifier = field_name });
+ } else try f.writeCValueMember(writer, operand, .{ .identifier = field_name });
try writer.writeAll(";\n");
return local;
}
@@ -2752,138 +2804,7 @@ fn airStore(f: *Function, inst: Air.Inst.Index) !CValue {
return CValue.none;
}
-fn airWrapOp(f: *Function, inst: Air.Inst.Index, operator: []const u8, fn_name: []const u8) !CValue {
- if (f.liveness.isUnused(inst)) return CValue.none;
-
- const bin_op = f.air.instructions.items(.data)[inst].bin_op;
- const inst_ty = f.air.typeOfIndex(inst);
- const target = f.object.dg.module.getTarget();
- const int_info = inst_ty.intInfo(target);
- const bits = int_info.bits;
-
- // if it's an unsigned int with non-arbitrary bit size then we can just add
- if (toCIntBits(bits)) |c_bits| {
- if (int_info.signedness == .unsigned and bits == c_bits) {
- return try airBinOp(f, inst, operator);
- }
- }
-
- if (bits > 64) return f.fail("TODO: C backend: airWrapOp for large integers", .{});
-
- const lhs = try f.resolveInst(bin_op.lhs);
- const rhs = try f.resolveInst(bin_op.rhs);
- const w = f.object.writer();
-
- const local = try f.allocLocal(inst_ty, .Mut);
- try w.print(" = zig_{s}w_", .{fn_name});
-
- switch (inst_ty.tag()) {
- .isize => try w.writeAll("isize"),
- .c_short => try w.writeAll("short"),
- .c_int => try w.writeAll("int"),
- .c_long => try w.writeAll("long"),
- .c_longlong => try w.writeAll("longlong"),
- else => {
- const prefix_byte: u8 = signAbbrev(int_info.signedness);
- for ([_]u8{ 8, 16, 32, 64 }) |nbits| {
- if (bits <= nbits) {
- try w.print("{c}{d}", .{ prefix_byte, nbits });
- break;
- }
- } else {
- unreachable;
- }
- },
- }
-
- try w.writeByte('(');
- try f.writeCValue(w, lhs, .FunctionArgument);
- try w.writeAll(", ");
- try f.writeCValue(w, rhs, .FunctionArgument);
- {
- var arena = std.heap.ArenaAllocator.init(f.object.dg.module.gpa);
- defer arena.deinit();
-
- const expected_contents = union { u: Value.Payload.U64, i: Value.Payload.I64 };
- var stack align(@alignOf(expected_contents)) =
- std.heap.stackFallback(@sizeOf(expected_contents), arena.allocator());
-
- if (int_info.signedness == .signed) {
- const min_val = try inst_ty.minInt(stack.get(), target);
- try w.print(", {}", .{try f.fmtIntLiteral(inst_ty, min_val)});
- }
-
- const max_val = try inst_ty.maxInt(stack.get(), target);
- try w.print(", {});", .{try f.fmtIntLiteral(inst_ty, max_val)});
- }
- try f.object.indent_writer.insertNewline();
-
- return local;
-}
-
-fn airSatOp(f: *Function, inst: Air.Inst.Index, fn_name: []const u8) !CValue {
- if (f.liveness.isUnused(inst)) return CValue.none;
-
- const bin_op = f.air.instructions.items(.data)[inst].bin_op;
- const inst_ty = f.air.typeOfIndex(inst);
- const target = f.object.dg.module.getTarget();
- const int_info = inst_ty.intInfo(target);
- const bits = int_info.bits;
-
- if (bits > 64) return f.object.dg.fail("TODO: C backend: airSatOp for large integers", .{});
-
- const lhs = try f.resolveInst(bin_op.lhs);
- const rhs = try f.resolveInst(bin_op.rhs);
- const w = f.object.writer();
-
- const local = try f.allocLocal(inst_ty, .Mut);
- try w.print(" = zig_{s}s_", .{fn_name});
-
- switch (inst_ty.tag()) {
- .isize => try w.writeAll("isize"),
- .c_short => try w.writeAll("short"),
- .c_int => try w.writeAll("int"),
- .c_long => try w.writeAll("long"),
- .c_longlong => try w.writeAll("longlong"),
- else => {
- const prefix_byte: u8 = signAbbrev(int_info.signedness);
- for ([_]u8{ 8, 16, 32, 64 }) |nbits| {
- if (bits <= nbits) {
- try w.print("{c}{d}", .{ prefix_byte, nbits });
- break;
- }
- } else {
- unreachable;
- }
- },
- }
-
- try w.writeByte('(');
- try f.writeCValue(w, lhs, .FunctionArgument);
- try w.writeAll(", ");
- try f.writeCValue(w, rhs, .FunctionArgument);
- {
- var arena = std.heap.ArenaAllocator.init(f.object.dg.module.gpa);
- defer arena.deinit();
-
- const expected_contents = union { u: Value.Payload.U64, i: Value.Payload.I64 };
- var stack align(@alignOf(expected_contents)) =
- std.heap.stackFallback(@sizeOf(expected_contents), arena.allocator());
-
- if (int_info.signedness == .signed) {
- const min_val = try inst_ty.minInt(stack.get(), target);
- try w.print(", {}", .{try f.fmtIntLiteral(inst_ty, min_val)});
- }
-
- const max_val = try inst_ty.maxInt(stack.get(), target);
- try w.print(", {});", .{try f.fmtIntLiteral(inst_ty, max_val)});
- }
- try f.object.indent_writer.insertNewline();
-
- return local;
-}
-
-fn airOverflow(f: *Function, inst: Air.Inst.Index, fn_name: []const u8, kind: enum { range, bits }) !CValue {
+fn airOverflow(f: *Function, inst: Air.Inst.Index, operation: []const u8, info: BuiltinInfo) !CValue {
if (f.liveness.isUnused(inst))
return CValue.none;
@@ -2895,54 +2816,29 @@ fn airOverflow(f: *Function, inst: Air.Inst.Index, fn_name: []const u8, kind: en
const inst_ty = f.air.typeOfIndex(inst);
const scalar_ty = f.air.typeOf(bin_op.lhs).scalarType();
- const target = f.object.dg.module.getTarget();
- const int_info = scalar_ty.intInfo(target);
const w = f.object.writer();
- const c_bits = toCIntBits(int_info.bits) orelse
- return f.fail("TODO: C backend: implement integer arithmetic larger than 128 bits", .{});
const local = try f.allocLocal(inst_ty, .Mut);
try w.writeAll(";\n");
try f.writeCValue(w, local, .Other);
- try w.print(".field_1 = zig_{s}o_{c}{d}(", .{
- fn_name, signAbbrev(int_info.signedness), c_bits,
- });
+ try w.writeAll(".field_1 = zig_");
+ try w.writeAll(operation);
+ try w.writeAll("o_");
+ try f.renderTypeForBuiltinFnName(w, scalar_ty);
+ try w.writeAll("(&");
+ try f.writeCValueMember(w, local, .{ .identifier = "field_0" });
+ try w.writeAll(", ");
try f.writeCValue(w, lhs, .FunctionArgument);
try w.writeAll(", ");
try f.writeCValue(w, rhs, .FunctionArgument);
- try w.writeAll(", &");
- try f.writeCValue(w, local, .Other);
- try w.writeAll(".field_0, ");
- switch (kind) {
- .range => {
- var arena = std.heap.ArenaAllocator.init(f.object.dg.module.gpa);
- defer arena.deinit();
-
- const expected_contents = union { u: Value.Payload.U64, i: Value.Payload.I64 };
- var stack align(@alignOf(expected_contents)) =
- std.heap.stackFallback(@sizeOf(expected_contents), arena.allocator());
-
- if (int_info.signedness == .signed) {
- const min_val = try scalar_ty.minInt(stack.get(), target);
- try w.print("{}, ", .{try f.fmtIntLiteral(scalar_ty, min_val)});
- }
-
- const max_val = try scalar_ty.maxInt(stack.get(), target);
- try w.print("{});\n", .{try f.fmtIntLiteral(scalar_ty, max_val)});
- },
- .bits => {
- var bits_pl = Value.Payload.U64{ .base = .{ .tag = .int_u64 }, .data = int_info.bits };
- const bits_val = Value.initPayload(&bits_pl.base);
- try w.print("{x});\n", .{try f.fmtIntLiteral(Type.u8, bits_val)});
- },
- }
+ try f.renderBuiltinInfo(w, scalar_ty, info);
+ try w.writeAll(");\n");
return local;
}
fn airNot(f: *Function, inst: Air.Inst.Index) !CValue {
- if (f.liveness.isUnused(inst))
- return CValue.none;
+ if (f.liveness.isUnused(inst)) return CValue.none;
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
const op = try f.resolveInst(ty_op.operand);
@@ -2951,6 +2847,9 @@ fn airNot(f: *Function, inst: Air.Inst.Index) !CValue {
const inst_ty = f.air.typeOfIndex(inst);
const local = try f.allocLocal(inst_ty, .Const);
+ const target = f.object.dg.module.getTarget();
+ if (inst_ty.bitSize(target) > 64) {}
+
try writer.writeAll(" = ");
try writer.writeByte(if (inst_ty.tag() == .bool) '!' else '~');
try f.writeCValue(writer, op, .Other);
@@ -2959,16 +2858,53 @@ fn airNot(f: *Function, inst: Air.Inst.Index) !CValue {
return local;
}
-fn airBinOp(f: *Function, inst: Air.Inst.Index, operator: []const u8) !CValue {
- if (f.liveness.isUnused(inst))
- return CValue.none;
+fn airBinOp(
+ f: *Function,
+ inst: Air.Inst.Index,
+ operator: []const u8,
+ operation: []const u8,
+ info: BuiltinInfo,
+) !CValue {
+ if (f.liveness.isUnused(inst)) return CValue.none;
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
+
+ const operand_ty = f.air.typeOf(bin_op.lhs);
+ const target = f.object.dg.module.getTarget();
+ if (operand_ty.bitSize(target) > 64) return try airBinBuiltinCall(f, inst, operation, info);
+
+ const inst_ty = f.air.typeOfIndex(inst);
const lhs = try f.resolveInst(bin_op.lhs);
const rhs = try f.resolveInst(bin_op.rhs);
const writer = f.object.writer();
+ const local = try f.allocLocal(inst_ty, .Const);
+
+ try writer.writeAll(" = ");
+ try f.writeCValue(writer, lhs, .Other);
+ try writer.writeByte(' ');
+ try writer.writeAll(operator);
+ try writer.writeByte(' ');
+ try f.writeCValue(writer, rhs, .Other);
+ try writer.writeAll(";\n");
+
+ return local;
+}
+
+fn airCmpOp(f: *Function, inst: Air.Inst.Index, operator: []const u8) !CValue {
+ if (f.liveness.isUnused(inst)) return CValue.none;
+
+ const bin_op = f.air.instructions.items(.data)[inst].bin_op;
+
+ const operand_ty = f.air.typeOf(bin_op.lhs);
+ const target = f.object.dg.module.getTarget();
+ if (operand_ty.bitSize(target) > 64) return try airCmpBuiltinCall(f, inst, operator);
+
const inst_ty = f.air.typeOfIndex(inst);
+ const lhs = try f.resolveInst(bin_op.lhs);
+ const rhs = try f.resolveInst(bin_op.rhs);
+
+ const writer = f.object.writer();
const local = try f.allocLocal(inst_ty, .Const);
try writer.writeAll(" = ");
@@ -3301,27 +3237,19 @@ fn lowerTry(
const payload_has_bits = payload_ty.hasRuntimeBitsIgnoreComptime();
if (!err_union_ty.errorUnionSet().errorSetIsEmpty()) {
- err: {
- if (!payload_has_bits) {
- if (operand_is_ptr) {
- try writer.writeAll("if(*");
- } else {
- try writer.writeAll("if(");
- }
- try f.writeCValue(writer, err_union, .Other);
- try writer.writeByte(')');
- break :err;
- }
- if (operand_is_ptr or isByRef(err_union_ty)) {
- try writer.writeAll("if(");
+ try writer.writeAll("if (");
+ if (!payload_has_bits) {
+ if (operand_is_ptr)
+ try f.writeCValueDeref(writer, err_union)
+ else
try f.writeCValue(writer, err_union, .Other);
- try writer.writeAll("->error)");
- break :err;
- }
- try writer.writeAll("if(");
- try f.writeCValue(writer, err_union, .Other);
- try writer.writeAll(".error)");
+ } else {
+ if (operand_is_ptr or isByRef(err_union_ty))
+ try f.writeCValueDerefMember(writer, err_union, .{ .identifier = "error" })
+ else
+ try f.writeCValueMember(writer, err_union, .{ .identifier = "error" });
}
+ try writer.writeByte(')');
try genBody(f, body);
try f.object.indent_writer.insertNewline();
@@ -3342,20 +3270,17 @@ fn lowerTry(
try writer.writeAll("memcpy(");
try f.writeCValue(writer, local, .FunctionArgument);
try writer.writeAll(", ");
- try f.writeCValue(writer, err_union, .Other);
- try writer.writeAll(".payload, sizeof(");
+ try f.writeCValueMember(writer, err_union, .{ .identifier = "payload" });
+ try writer.writeAll(", sizeof(");
try f.renderTypecast(writer, payload_ty);
try writer.writeAll("));\n");
} else {
+ try writer.writeAll(" = ");
if (operand_is_ptr or isByRef(payload_ty)) {
- try writer.writeAll(" = &");
- try f.writeCValue(writer, err_union, .Other);
- try writer.writeAll("->payload;\n");
- } else {
- try writer.writeAll(" = ");
- try f.writeCValue(writer, err_union, .Other);
- try writer.writeAll(".payload;\n");
- }
+ try writer.writeByte('&');
+ try f.writeCValueDerefMember(writer, err_union, .{ .identifier = "payload" });
+ } else try f.writeCValueMember(writer, err_union, .{ .identifier = "payload" });
+ try writer.writeAll(";\n");
}
return local;
}
@@ -3415,8 +3340,8 @@ fn airBitcast(f: *Function, inst: Air.Inst.Index) !CValue {
return local;
}
-fn airBreakpoint(f: *Function) !CValue {
- try f.object.writer().writeAll("zig_breakpoint();\n");
+fn airBreakpoint(writer: anytype) !CValue {
+ try writer.writeAll("zig_breakpoint();\n");
return CValue.none;
}
@@ -3463,9 +3388,12 @@ fn airLoop(f: *Function, inst: Air.Inst.Index) !CValue {
const ty_pl = f.air.instructions.items(.data)[inst].ty_pl;
const loop = f.air.extraData(Air.Block, ty_pl.payload);
const body = f.air.extra[loop.end..][0..loop.data.body_len];
- try f.object.writer().writeAll("while (true) ");
+ const writer = f.object.writer();
+ try writer.writeAll("while (");
+ try f.object.dg.renderValue(writer, Type.bool, Value.@"true", .Other);
+ try writer.writeAll(") ");
try genBody(f, body);
- try f.object.indent_writer.insertNewline();
+ try writer.writeByte('\n');
return CValue.none;
}
@@ -3496,7 +3424,11 @@ fn airSwitchBr(f: *Function, inst: Air.Inst.Index) !CValue {
const writer = f.object.writer();
try writer.writeAll("switch (");
- if (condition_ty.tag() == .bool) try writer.writeAll("(int)");
+ if (condition_ty.tag() == .bool) {
+ try writer.writeByte('(');
+ try f.renderTypecast(writer, Type.u1);
+ try writer.writeByte(')');
+ }
try f.writeCValue(writer, condition, .Other);
try writer.writeAll(") {");
f.object.indent_writer.pushIndent();
@@ -3751,16 +3683,22 @@ fn airIsNull(
var payload_buf: Type.Payload.ElemType = undefined;
const payload_ty = optional_ty.optionalChild(&payload_buf);
- if (!payload_ty.hasRuntimeBitsIgnoreComptime()) {
- try writer.print(" {s} true;\n", .{operator});
- } else if (operand_ty.isPtrLikeOptional()) {
+ const rhs = if (!payload_ty.hasRuntimeBitsIgnoreComptime())
+ TypedValue{ .ty = Type.bool, .val = Value.@"true" }
+ else if (operand_ty.isPtrLikeOptional())
// operand is a regular pointer, test `operand !=/== NULL`
- try writer.print(" {s} NULL;\n", .{operator});
- } else if (payload_ty.zigTypeTag() == .ErrorSet) {
- try writer.print(" {s} 0;\n", .{operator});
- } else {
- try writer.print(".is_null {s} true;\n", .{operator});
- }
+ TypedValue{ .ty = operand_ty, .val = Value.@"null" }
+ else if (payload_ty.zigTypeTag() == .ErrorSet)
+ TypedValue{ .ty = payload_ty, .val = Value.zero }
+ else rhs: {
+ try writer.writeAll(".is_null");
+ break :rhs TypedValue{ .ty = Type.bool, .val = Value.@"true" };
+ };
+ try writer.writeByte(' ');
+ try writer.writeAll(operator);
+ try writer.writeByte(' ');
+ try f.object.dg.renderValue(writer, rhs.ty, rhs.val, .Other);
+ try writer.writeAll(";\n");
return local;
}
@@ -3812,9 +3750,9 @@ fn airOptionalPayloadPtr(f: *Function, inst: Air.Inst.Index) !CValue {
}
const local = try f.allocLocal(inst_ty, .Const);
- try writer.writeAll(" = &(");
- try f.writeCValue(writer, operand, .Other);
- try writer.writeAll(")->payload;\n");
+ try writer.writeAll(" = &");
+ try f.writeCValueDerefMember(writer, operand, .{ .identifier = "payload" });
+ try writer.writeAll(";\n");
return local;
}
@@ -3833,7 +3771,9 @@ fn airOptionalPayloadPtrSet(f: *Function, inst: Air.Inst.Index) !CValue {
}
try f.writeCValueDeref(writer, operand);
- try writer.writeAll(".is_null = false;\n");
+ try writer.writeAll(".is_null = ");
+ try f.object.dg.renderValue(writer, Type.bool, Value.@"false", .Initializer);
+ try writer.writeAll(";\n");
const inst_ty = f.air.typeOfIndex(inst);
const local = try f.allocLocal(inst_ty, .Const);
@@ -3974,44 +3914,30 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
/// *(E!T) -> E
/// Note that the result is never a pointer.
fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
- if (f.liveness.isUnused(inst))
- return CValue.none;
+ if (f.liveness.isUnused(inst)) return CValue.none;
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
const inst_ty = f.air.typeOfIndex(inst);
- const writer = f.object.writer();
const operand = try f.resolveInst(ty_op.operand);
const operand_ty = f.air.typeOf(ty_op.operand);
- if (operand_ty.zigTypeTag() == .Pointer) {
- const err_union_ty = operand_ty.childType();
- if (err_union_ty.errorUnionSet().errorSetIsEmpty()) {
- return CValue{ .bytes = "0" };
- }
- if (!err_union_ty.errorUnionPayload().hasRuntimeBits()) {
- return operand;
- }
- const local = try f.allocLocal(inst_ty, .Const);
- try writer.writeAll(" = *");
- try f.writeCValue(writer, operand, .Other);
- try writer.writeAll(";\n");
- return local;
- }
- if (operand_ty.errorUnionSet().errorSetIsEmpty()) {
- return CValue{ .bytes = "0" };
- }
- if (!operand_ty.errorUnionPayload().hasRuntimeBits()) {
- return operand;
- }
+ const operand_is_ptr = operand_ty.zigTypeTag() == .Pointer;
+ const error_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty;
+ const error_ty = error_union_ty.errorUnionSet();
+ const payload_ty = error_union_ty.errorUnionPayload();
+ if (!payload_ty.hasRuntimeBits()) return operand;
+ const writer = f.object.writer();
const local = try f.allocLocal(inst_ty, .Const);
try writer.writeAll(" = ");
- if (operand_ty.zigTypeTag() == .Pointer) {
- try f.writeCValueDeref(writer, operand);
- } else {
- try f.writeCValue(writer, operand, .Other);
- }
- try writer.writeAll(".error;\n");
+ if (!error_ty.errorSetIsEmpty())
+ if (operand_is_ptr)
+ try f.writeCValueDerefMember(writer, operand, .{ .identifier = "error" })
+ else
+ try f.writeCValueMember(writer, operand, .{ .identifier = "error" })
+ else
+ try f.object.dg.renderValue(writer, error_ty, Value.zero, .Initializer);
+ try writer.writeAll(";\n");
return local;
}
@@ -4020,26 +3946,23 @@ fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, is_ptr: bool) !CValu
return CValue.none;
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
- const writer = f.object.writer();
+ const inst_ty = f.air.typeOfIndex(inst);
const operand = try f.resolveInst(ty_op.operand);
const operand_ty = f.air.typeOf(ty_op.operand);
const operand_is_ptr = operand_ty.zigTypeTag() == .Pointer;
const error_union_ty = if (operand_is_ptr) operand_ty.childType() else operand_ty;
- if (!error_union_ty.errorUnionPayload().hasRuntimeBits()) {
- return CValue.none;
- }
-
- const inst_ty = f.air.typeOfIndex(inst);
+ if (!error_union_ty.errorUnionPayload().hasRuntimeBits()) return CValue.none;
+ const writer = f.object.writer();
const local = try f.allocLocal(inst_ty, .Const);
try writer.writeAll(" = ");
if (is_ptr) try writer.writeByte('&');
- try writer.writeByte('(');
- try f.writeCValue(writer, operand, .Other);
- try writer.writeByte(')');
- try if (operand_is_ptr) writer.writeAll("->") else writer.writeByte('.');
- try writer.writeAll("payload;\n");
+ if (operand_is_ptr)
+ try f.writeCValueDerefMember(writer, operand, .{ .identifier = "payload" })
+ else
+ try f.writeCValueMember(writer, operand, .{ .identifier = "payload" });
+ try writer.writeAll(";\n");
return local;
}
@@ -4060,7 +3983,9 @@ fn airWrapOptional(f: *Function, inst: Air.Inst.Index) !CValue {
const local = try f.allocLocal(inst_ty, .Const);
try writer.writeAll(" = { .payload = ");
try f.writeCValue(writer, operand, .Initializer);
- try writer.writeAll(", .is_null = false };\n");
+ try writer.writeAll(", .is_null = ");
+ try f.object.dg.renderValue(writer, Type.bool, Value.@"false", .Initializer);
+ try writer.writeAll(" };\n");
return local;
}
@@ -4070,13 +3995,11 @@ fn airWrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
const writer = f.object.writer();
const ty_op = f.air.instructions.items(.data)[inst].ty_op;
const operand = try f.resolveInst(ty_op.operand);
- const err_un_ty = f.air.typeOfIndex(inst);
- const payload_ty = err_un_ty.errorUnionPayload();
- if (!payload_ty.hasRuntimeBits()) {
- return operand;
- }
+ const error_union_ty = f.air.typeOfIndex(inst);
+ const payload_ty = error_union_ty.errorUnionPayload();
+ if (!payload_ty.hasRuntimeBits()) return operand;
- const local = try f.allocLocal(err_un_ty, .Const);
+ const local = try f.allocLocal(error_union_ty, .Const);
try writer.writeAll(" = { .payload = ");
try f.writeCValue(writer, .{ .undef = payload_ty }, .Initializer);
try writer.writeAll(", .error = ");
@@ -4180,18 +4103,21 @@ fn airIsErr(f: *Function, inst: Air.Inst.Index, is_ptr: bool, operator: []const
try writer.writeAll(" = ");
- if (error_ty.errorSetIsEmpty()) {
- try writer.writeByte('0');
- } else {
- try f.writeCValue(writer, operand, .Other);
- if (payload_ty.hasRuntimeBits()) {
- try if (is_ptr) writer.writeAll("->") else writer.writeByte('.');
- try writer.writeAll("error");
- }
- }
+ if (!error_ty.errorSetIsEmpty())
+ if (payload_ty.hasRuntimeBits())
+ if (is_ptr)
+ try f.writeCValueDerefMember(writer, operand, .{ .identifier = "error" })
+ else
+ try f.writeCValueMember(writer, operand, .{ .identifier = "error" })
+ else
+ try f.writeCValue(writer, operand, .Other)
+ else
+ try f.object.dg.renderValue(writer, error_ty, Value.zero, .Other);
try writer.writeByte(' ');
try writer.writeAll(operator);
- try writer.writeAll(" 0;\n");
+ try writer.writeByte(' ');
+ try f.object.dg.renderValue(writer, error_ty, Value.zero, .Other);
+ try writer.writeAll(";\n");
return local;
}
@@ -4258,55 +4184,74 @@ fn airPtrToInt(f: *Function, inst: Air.Inst.Index) !CValue {
return local;
}
-fn airBuiltinCall(f: *Function, inst: Air.Inst.Index, fn_name: [*:0]const u8) !CValue {
+fn airUnBuiltinCall(
+ f: *Function,
+ inst: Air.Inst.Index,
+ operation: []const u8,
+ info: BuiltinInfo,
+) !CValue {
if (f.liveness.isUnused(inst)) return CValue.none;
const inst_ty = f.air.typeOfIndex(inst);
- const local = try f.allocLocal(inst_ty, .Const);
const operand = f.air.instructions.items(.data)[inst].ty_op.operand;
const operand_ty = f.air.typeOf(operand);
- const target = f.object.dg.module.getTarget();
- const writer = f.object.writer();
-
- const int_info = operand_ty.intInfo(target);
- const c_bits = toCIntBits(int_info.bits) orelse
- return f.fail("TODO: C backend: implement integer types larger than 128 bits", .{});
- try writer.print(" = zig_{s}_", .{fn_name});
- try writer.print("{c}{d}(", .{ signAbbrev(int_info.signedness), c_bits });
+ const local = try f.allocLocal(inst_ty, .Const);
+ const writer = f.object.writer();
+ try writer.writeAll(" = zig_");
+ try writer.writeAll(operation);
+ try writer.writeByte('_');
+ try f.renderTypeForBuiltinFnName(writer, operand_ty);
+ try writer.writeByte('(');
try f.writeCValue(writer, try f.resolveInst(operand), .FunctionArgument);
- try writer.print(", {d});\n", .{int_info.bits});
+ try f.renderBuiltinInfo(writer, operand_ty, info);
+ try writer.writeAll(");\n");
return local;
}
-fn airBinOpBuiltinCall(f: *Function, inst: Air.Inst.Index, fn_name: [*:0]const u8) !CValue {
+fn airBinBuiltinCall(
+ f: *Function,
+ inst: Air.Inst.Index,
+ operation: []const u8,
+ info: BuiltinInfo,
+) !CValue {
if (f.liveness.isUnused(inst)) return CValue.none;
const inst_ty = f.air.typeOfIndex(inst);
- const local = try f.allocLocal(inst_ty, .Const);
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
- const lhs_ty = f.air.typeOf(bin_op.lhs);
- const target = f.object.dg.module.getTarget();
+ const operand_ty = f.air.typeOf(bin_op.lhs);
+
+ const local = try f.allocLocal(inst_ty, .Const);
const writer = f.object.writer();
+ try writer.writeAll(" = zig_");
+ try writer.writeAll(operation);
+ try writer.writeByte('_');
+ try f.renderTypeForBuiltinFnName(writer, operand_ty);
+ try writer.writeByte('(');
+ try f.writeCValue(writer, try f.resolveInst(bin_op.lhs), .FunctionArgument);
+ try writer.writeAll(", ");
+ try f.writeCValue(writer, try f.resolveInst(bin_op.rhs), .FunctionArgument);
+ try f.renderBuiltinInfo(writer, operand_ty, info);
+ try writer.writeAll(");\n");
+ return local;
+}
- // For binary operations @TypeOf(lhs)==@TypeOf(rhs), so we only check one.
- if (lhs_ty.isInt()) {
- const int_info = lhs_ty.intInfo(target);
- const c_bits = toCIntBits(int_info.bits) orelse
- return f.fail("TODO: C backend: implement integer types larger than 128 bits", .{});
- try writer.print(" = zig_{s}_{c}{d}", .{ fn_name, signAbbrev(int_info.signedness), c_bits });
- } else if (lhs_ty.isRuntimeFloat()) {
- const c_bits = lhs_ty.floatBits(target);
- try writer.print(" = zig_{s}_f{d}", .{ fn_name, c_bits });
- } else {
- return f.fail("TODO: C backend: implement airBinOpBuiltinCall for type {s}", .{@tagName(lhs_ty.tag())});
- }
+fn airCmpBuiltinCall(f: *Function, inst: Air.Inst.Index, operator: []const u8) !CValue {
+ if (f.liveness.isUnused(inst)) return CValue.none;
+
+ const inst_ty = f.air.typeOfIndex(inst);
+ const bin_op = f.air.instructions.items(.data)[inst].bin_op;
+ const operand_ty = f.air.typeOf(bin_op.lhs);
+ const local = try f.allocLocal(inst_ty, .Const);
+ const writer = f.object.writer();
+ try writer.writeAll(" = zig_cmp_");
+ try f.renderTypeForBuiltinFnName(writer, operand_ty);
try writer.writeByte('(');
try f.writeCValue(writer, try f.resolveInst(bin_op.lhs), .FunctionArgument);
try writer.writeAll(", ");
try f.writeCValue(writer, try f.resolveInst(bin_op.rhs), .FunctionArgument);
- try writer.writeAll(");\n");
+ try writer.print(") {s} {};\n", .{ operator, try f.fmtIntLiteral(Type.initTag(.i8), Value.zero) });
return local;
}
@@ -4325,7 +4270,11 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue
try writer.writeAll(" = ");
if (is_struct) try writer.writeAll("{ .payload = ");
try f.writeCValue(writer, expected_value, .Initializer);
- if (is_struct) try writer.writeAll(", .is_null = false }");
+ if (is_struct) {
+ try writer.writeAll(", .is_null = ");
+ try f.object.dg.renderValue(writer, Type.bool, Value.@"false", .Initializer);
+ try writer.writeAll(" }");
+ }
try writer.writeAll(";\n");
if (is_struct) {
@@ -4341,10 +4290,10 @@ fn airCmpxchg(f: *Function, inst: Air.Inst.Index, flavor: [*:0]const u8) !CValue
try writer.writeAll(" *)");
try f.writeCValue(writer, ptr, .Other);
try writer.writeAll(", ");
- try f.writeCValue(writer, local, .FunctionArgument);
- if (is_struct) {
- try writer.writeAll(".payload");
- }
+ if (is_struct)
+ try f.writeCValueMember(writer, local, .{ .identifier = "payload" })
+ else
+ try f.writeCValue(writer, local, .FunctionArgument);
try writer.writeAll(", ");
try f.writeCValue(writer, new_value, .FunctionArgument);
try writer.writeAll(", ");
@@ -4537,8 +4486,6 @@ fn airTagName(f: *Function, inst: Air.Inst.Index) !CValue {
try f.writeCValue(writer, operand, .Other);
try writer.writeAll(");\n");
- try f.object.dg.fwd_decl.writer().writeAll("// This is where the fwd decl for tagName ended up\n");
-
return local;
}
@@ -4804,7 +4751,7 @@ fn airNeg(f: *Function, inst: Air.Inst.Index) !CValue {
return local;
}
-fn airUnFloatOp(f: *Function, inst: Air.Inst.Index, fn_name: []const u8) !CValue {
+fn airUnFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CValue {
if (f.liveness.isUnused(inst)) return CValue.none;
const un_op = f.air.instructions.items(.data)[inst].un_op;
const writer = f.object.writer();
@@ -4812,14 +4759,14 @@ fn airUnFloatOp(f: *Function, inst: Air.Inst.Index, fn_name: []const u8) !CValue
const operand = try f.resolveInst(un_op);
const local = try f.allocLocal(inst_ty, .Const);
try writer.writeAll(" = ");
- try f.renderFloatFnName(fn_name, inst_ty);
+ try f.renderFloatFnName(writer, operation, inst_ty);
try writer.writeByte('(');
try f.writeCValue(writer, operand, .FunctionArgument);
try writer.writeAll(");\n");
return local;
}
-fn airBinFloatOp(f: *Function, inst: Air.Inst.Index, fn_name: []const u8) !CValue {
+fn airBinFloatOp(f: *Function, inst: Air.Inst.Index, operation: []const u8) !CValue {
if (f.liveness.isUnused(inst)) return CValue.none;
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
const writer = f.object.writer();
@@ -4828,7 +4775,7 @@ fn airBinFloatOp(f: *Function, inst: Air.Inst.Index, fn_name: []const u8) !CValu
const rhs = try f.resolveInst(bin_op.rhs);
const local = try f.allocLocal(inst_ty, .Const);
try writer.writeAll(" = ");
- try f.renderFloatFnName(fn_name, inst_ty);
+ try f.renderFloatFnName(writer, operation, inst_ty);
try writer.writeByte('(');
try f.writeCValue(writer, lhs, .FunctionArgument);
try writer.writeAll(", ");
@@ -4848,7 +4795,7 @@ fn airMulAdd(f: *Function, inst: Air.Inst.Index) !CValue {
const writer = f.object.writer();
const local = try f.allocLocal(inst_ty, .Const);
try writer.writeAll(" = ");
- try f.renderFloatFnName("fma", inst_ty);
+ try f.renderFloatFnName(writer, "fma", inst_ty);
try writer.writeByte('(');
try f.writeCValue(writer, mulend1, .FunctionArgument);
try writer.writeAll(", ");
@@ -5010,8 +4957,7 @@ fn formatIntLiteral(
};
undef_limbs: [limbs_count_128]Limb,
- str: [worst_case_int.sizeInBaseUpperBound(base)]u8,
- limbs_limbs: [expected_needed_limbs_count]Limb,
+ wrap_limbs: [limbs_count_128]Limb,
};
var stack align(@alignOf(expected_contents)) =
std.heap.stackFallback(@sizeOf(expected_contents), data.mod.gpa);
@@ -5037,35 +4983,89 @@ fn formatIntLiteral(
} else data.val.toBigInt(&int_buf, target);
assert(int.fitsInTwosComp(int_info.signedness, int_info.bits));
- const limbs_count_64 = @divExact(64, @bitSizeOf(Limb));
const c_bits = toCIntBits(int_info.bits) orelse unreachable;
- if (c_bits == 128) {
- // Clang and GCC don't support 128-bit integer constants but
- // will hopefully unfold them if we construct one manually.
- //std.debug.todo("128-bit is unimplemented");
- try writer.writeByte('(');
- if (int_info.signedness == .signed) {
- try writer.writeAll("(int128_t)");
- if (!int.positive) try writer.writeByte('-');
+ var one_limbs: [BigInt.calcLimbLen(1)]Limb = undefined;
+ const one = BigInt.Mutable.init(&one_limbs, 1).toConst();
+
+ const wrap_limbs = try allocator.alloc(Limb, BigInt.calcTwosCompLimbCount(c_bits));
+ defer allocator.free(wrap_limbs);
+ var wrap = BigInt.Mutable{ .limbs = wrap_limbs, .len = undefined, .positive = undefined };
+ if (wrap.addWrap(int, one, int_info.signedness, c_bits) or
+ int_info.signedness == .signed and wrap.subWrap(int, one, int_info.signedness, c_bits))
+ {
+ const abbrev = switch (data.ty.tag()) {
+ .c_short, .c_ushort => "SHRT",
+ .c_int, .c_uint => "INT",
+ .c_long, .c_ulong => "LONG",
+ .c_longlong, .c_ulonglong => "LLONG",
+ .isize, .usize => "INTPTR",
+ else => return writer.print("zig_{s}Int_{c}{d}", .{
+ if (int.positive) "max" else "min", signAbbrev(int_info.signedness), c_bits,
+ }),
+ };
+ if (int_info.signedness == .unsigned) try writer.writeByte('U');
+ return writer.print("{s}_{s}", .{ abbrev, if (int.positive) "MAX" else "MIN" });
+ }
+
+ if (!int.positive) try writer.writeByte('-');
+ switch (data.ty.tag()) {
+ .c_short, .c_ushort, .c_int, .c_uint, .c_long, .c_ulong, .c_longlong, .c_ulonglong => {},
+ else => try writer.print("zig_as_{c}{d}(", .{ signAbbrev(int_info.signedness), c_bits }),
+ }
+
+ const limbs_count_64 = @divExact(64, @bitSizeOf(Limb));
+ if (c_bits <= 64) {
+ var base: u8 = undefined;
+ var case: std.fmt.Case = undefined;
+ switch (fmt.len) {
+ 0 => base = 10,
+ 1 => switch (fmt[0]) {
+ 'b' => {
+ base = 2;
+ try writer.writeAll("0b");
+ },
+ 'o' => {
+ base = 8;
+ try writer.writeByte('0');
+ },
+ 'd' => base = 10,
+ 'x' => {
+ base = 16;
+ case = .lower;
+ try writer.writeAll("0x");
+ },
+ 'X' => {
+ base = 16;
+ case = .upper;
+ try writer.writeAll("0x");
+ },
+ else => @compileError("Invalid fmt: " ++ fmt),
+ },
+ else => @compileError("Invalid fmt: " ++ fmt),
}
+ var str: [64]u8 = undefined;
+ var limbs_buf: [BigInt.calcToStringLimbsBufferLen(limbs_count_64, 10)]Limb = undefined;
+ try writer.writeAll(str[0..int.abs().toString(&str, base, case, &limbs_buf)]);
+ } else {
+ assert(c_bits == 128);
const split = std.math.min(int.limbs.len, limbs_count_64);
+
var upper_pl = Value.Payload.BigInt{
.base = .{ .tag = .int_big_positive },
.data = int.limbs[split..],
};
- const have_upper = !upper_pl.asBigInt().eqZero();
- if (have_upper) try writer.writeByte('(');
- if (have_upper or !int.positive) try writer.writeAll("(uint128_t)");
- if (have_upper) {
- const upper_val = Value.initPayload(&upper_pl.base);
- try formatIntLiteral(.{
- .ty = Type.u64,
- .val = upper_val,
- .mod = data.mod,
- }, fmt, options, writer);
- try writer.writeAll("<<64|");
- }
+ const upper_val = Value.initPayload(&upper_pl.base);
+ try formatIntLiteral(.{
+ .ty = switch (int_info.signedness) {
+ .unsigned => Type.u64,
+ .signed => Type.i64,
+ },
+ .val = upper_val,
+ .mod = data.mod,
+ }, fmt, options, writer);
+
+ try writer.writeAll(", ");
var lower_pl = Value.Payload.BigInt{
.base = .{ .tag = .int_big_positive },
@@ -5078,74 +5078,9 @@ fn formatIntLiteral(
.mod = data.mod,
}, fmt, options, writer);
- if (have_upper) try writer.writeByte(')');
return writer.writeByte(')');
}
- assert(c_bits <= 64);
- var one_limbs: [BigInt.calcLimbLen(1)]Limb = undefined;
- const one = BigInt.Mutable.init(&one_limbs, 1).toConst();
-
- var wrap_limbs: [BigInt.calcTwosCompLimbCount(64)]Limb = undefined;
- var wrap = BigInt.Mutable{ .limbs = &wrap_limbs, .len = undefined, .positive = undefined };
- if (wrap.addWrap(int, one, int_info.signedness, c_bits) or
- int_info.signedness == .signed and wrap.subWrap(int, one, int_info.signedness, c_bits))
- {
- if (int_info.signedness == .unsigned) try writer.writeByte('U');
- switch (data.ty.tag()) {
- .c_short, .c_ushort => try writer.writeAll("SHRT"),
- .c_int, .c_uint => try writer.writeAll("INT"),
- .c_long, .c_ulong => try writer.writeAll("LONG"),
- .c_longlong, .c_ulonglong => try writer.writeAll("LLONG"),
- .isize, .usize => try writer.writeAll("INTPTR"),
- else => try writer.print("INT{d}", .{c_bits}),
- }
- try writer.writeAll(if (int.positive) "_MAX" else "_MIN");
- return;
- }
-
- if (!int.positive) try writer.writeByte('-');
- switch (data.ty.tag()) {
- .c_short, .c_ushort, .c_int, .c_uint, .c_long, .c_ulong, .c_longlong, .c_ulonglong => {},
- else => {
- if (int_info.signedness == .unsigned) try writer.writeByte('U');
- try writer.print("INT{d}_C(", .{c_bits});
- },
- }
-
- var base: u8 = undefined;
- var case: std.fmt.Case = undefined;
- switch (fmt.len) {
- 0 => base = 10,
- 1 => switch (fmt[0]) {
- 'b' => {
- base = 2;
- try writer.writeAll("0b");
- },
- 'o' => {
- base = 8;
- try writer.writeByte('0');
- },
- 'd' => base = 10,
- 'x' => {
- base = 16;
- case = .lower;
- try writer.writeAll("0x");
- },
- 'X' => {
- base = 16;
- case = .upper;
- try writer.writeAll("0x");
- },
- else => @compileError("Invalid fmt: " ++ fmt),
- },
- else => @compileError("Invalid fmt: " ++ fmt),
- }
-
- var str: [64]u8 = undefined;
- var limbs_buf: [BigInt.calcToStringLimbsBufferLen(limbs_count_64, 10)]Limb = undefined;
- try writer.writeAll(str[0..int.abs().toString(&str, base, case, &limbs_buf)]);
-
switch (data.ty.tag()) {
.c_short, .c_ushort, .c_int => {},
.c_uint => try writer.writeAll("u"),
test/behavior/bugs/2114.zig
@@ -12,7 +12,6 @@ test "fixed" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
try testCtz();
comptime try testCtz();
test/behavior/align.zig
@@ -393,8 +393,6 @@ test "function callconv expression depends on generic parameter" {
}
test "runtime-known array index has best alignment possible" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
-
// take full advantage of over-alignment
var array align(4) = [_]u8{ 1, 2, 3, 4 };
comptime assert(@TypeOf(&array[0]) == *align(4) u8);
test/behavior/cast.zig
@@ -119,7 +119,6 @@ test "@intToFloat(f80)" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const S = struct {
fn doTheTest(comptime Int: type) !void {
@@ -1157,7 +1156,6 @@ fn castToOptionalSlice() ?[]const u8 {
test "cast u128 to f128 and back" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
test/behavior/floatop.zig
@@ -54,7 +54,6 @@ fn testFloatComparisons() !void {
test "different sized float comparisons" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@@ -303,7 +302,6 @@ test "@log" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
comptime try testLog();
try testLog();
@@ -543,7 +541,6 @@ fn testTrunc() !void {
test "negation f16" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
test/behavior/int128.zig
@@ -43,7 +43,6 @@ test "int128" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
var buff: i128 = -1;
try expect(buff < 0 and (buff + 1) == 0);
test/behavior/math.zig
@@ -452,7 +452,6 @@ fn testDivision() !void {
}
test "division half-precision floats" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
@@ -685,7 +684,6 @@ test "basic @mulWithOverflow" {
// TODO migrate to this for all backends once they handle more cases
test "extensive @mulWithOverflow" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
{
@@ -835,7 +833,6 @@ test "extensive @mulWithOverflow" {
}
test "@mulWithOverflow bitsize > 32" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
@@ -923,8 +920,6 @@ test "@subWithOverflow" {
}
test "@shlWithOverflow" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
-
{
var result: u4 = undefined;
var a: u4 = 2;
@@ -1274,7 +1269,6 @@ test "@sqrt" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
try testSqrt(f64, 12.0);
comptime try testSqrt(f64, 12.0);
@@ -1339,7 +1333,6 @@ test "@floor" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
try testFloor(f64, 12.0);
comptime try testFloor(f64, 12.0);
@@ -1388,7 +1381,6 @@ test "@ceil" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
try testCeil(f64, 12.0);
comptime try testCeil(f64, 12.0);
@@ -1437,7 +1429,6 @@ test "@trunc" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
try testTrunc(f64, 12.0);
comptime try testTrunc(f64, 12.0);
@@ -1500,7 +1491,6 @@ test "@round" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
try testRound(f64, 12.0);
comptime try testRound(f64, 12.0);
test/behavior/muladd.zig
@@ -27,7 +27,6 @@ fn testMulAdd() !void {
test "@mulAdd f16" {
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
test/behavior/packed-struct.zig
@@ -448,7 +448,6 @@ test "optional pointer in packed struct" {
}
test "nested packed struct field access test" {
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
test/behavior/ptrcast.zig
@@ -133,7 +133,6 @@ test "lower reinterpreted comptime field ptr (with under-aligned fields)" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO: CBE does not yet support under-aligned fields
// Test lowering a field ptr
comptime var bytes align(2) = [_]u8{ 1, 2, 3, 4, 5, 6 };
test/behavior/saturating_arithmetic.zig
@@ -54,7 +54,6 @@ test "saturating add 128bit" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const S = struct {
fn doTheTest() !void {
try testSatAdd(i128, maxInt(i128), -maxInt(i128), 0);
@@ -78,7 +77,6 @@ test "saturating subtraction" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const S = struct {
fn doTheTest() !void {
@@ -124,7 +122,6 @@ test "saturating subtraction 128bit" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const S = struct {
fn doTheTest() !void {
@@ -151,7 +148,6 @@ test "saturating multiplication" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage1 and builtin.cpu.arch == .wasm32) {
// https://github.com/ziglang/zig/issues/9660
@@ -199,7 +195,6 @@ test "saturating shift-left" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const S = struct {
fn doTheTest() !void {
test/behavior/sizeof_and_typeof.zig
@@ -19,7 +19,6 @@ test "@sizeOf on compile-time types" {
test "@TypeOf() with multiple arguments" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
{
var var_1: u32 = undefined;
var var_2: u8 = undefined;
test/behavior/widening.zig
@@ -40,7 +40,6 @@ test "float widening" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
var a: f16 = 12.34;
var b: f32 = a;
@@ -60,7 +59,6 @@ test "float widening f16 to f128" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
- if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
// TODO https://github.com/ziglang/zig/issues/3282
if (builtin.cpu.arch == .aarch64) return error.SkipZigTest;